Merge "Remove unused ICU4C headers in libartbase-art-gtest and libart-runtime-gtest"
diff --git a/build/Android.bp b/build/Android.bp
index 09d3a18..46fb0c5 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -7,6 +7,7 @@
         "blueprint-proptools",
         "soong",
         "soong-android",
+        "soong-apex",
         "soong-cc",
     ],
     srcs: [
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index 775443b..1880726 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -73,8 +73,21 @@
 HOST_CORE_IMG_LOCATION := $(HOST_OUT_JAVA_LIBRARIES)/core.art
 TARGET_CORE_IMG_LOCATION := $(ART_TARGET_TEST_OUT)/core.art
 
-# Jar files for core.art.
-TEST_CORE_JARS := core-oj core-libart core-simple
+# Modules to compile for core.art.
+CORE_IMG_JARS := core-oj core-libart core-simple okhttp bouncycastle
+HOST_CORE_IMG_JARS   := $(addsuffix -hostdex,$(CORE_IMG_JARS))
+TARGET_CORE_IMG_JARS := $(addsuffix -testdex,$(CORE_IMG_JARS))
+HOST_CORE_IMG_DEX_LOCATIONS   := $(foreach jar,$(HOST_CORE_IMG_JARS),  $(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar)
+ifeq ($(ART_TEST_ANDROID_ROOT),)
+TARGET_CORE_IMG_DEX_LOCATIONS := $(foreach jar,$(TARGET_CORE_IMG_JARS),/$(DEXPREOPT_BOOT_JAR_DIR)/$(jar).jar)
+else
+TARGET_CORE_IMG_DEX_LOCATIONS := $(foreach jar,$(TARGET_CORE_IMG_JARS),$(ART_TEST_ANDROID_ROOT)/$(jar).jar)
+endif
+HOST_CORE_IMG_DEX_FILES   := $(foreach jar,$(HOST_CORE_IMG_JARS),  $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar)
+TARGET_CORE_IMG_DEX_FILES := $(foreach jar,$(TARGET_CORE_IMG_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar)
+
+# Jar files for the boot class path for testing. Must start with CORE_IMG_JARS.
+TEST_CORE_JARS := $(CORE_IMG_JARS) conscrypt
 HOST_TEST_CORE_JARS   := $(addsuffix -hostdex,$(TEST_CORE_JARS))
 TARGET_TEST_CORE_JARS := $(addsuffix -testdex,$(TEST_CORE_JARS))
 HOST_CORE_DEX_LOCATIONS   := $(foreach jar,$(HOST_TEST_CORE_JARS),  $(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar)
@@ -83,7 +96,6 @@
 else
 TARGET_CORE_DEX_LOCATIONS := $(foreach jar,$(TARGET_TEST_CORE_JARS),$(ART_TEST_ANDROID_ROOT)/framework/$(jar).jar)
 endif
-
 HOST_CORE_DEX_FILES   := $(foreach jar,$(HOST_TEST_CORE_JARS),  $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar)
 TARGET_CORE_DEX_FILES := $(foreach jar,$(TARGET_TEST_CORE_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar)
 
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index e610bb1..468eec6 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -422,6 +422,8 @@
   ART_GTEST_TARGET_ANDROID_ROOT := $(ART_TEST_ANDROID_ROOT)
 endif
 
+ART_GTEST_TARGET_ANDROID_RUNTIME_ROOT := '/apex/com.android.runtime'
+
 # Define a make rule for a target device gtest.
 # $(1): gtest name - the name of the test we're building such as leb128_test.
 # $(2): path relative to $OUT to the test binary
@@ -452,7 +454,10 @@
     $$($(3)TARGET_OUT_SHARED_LIBRARIES)/libopenjdkd.so \
     $$(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar \
     $$(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar \
-    $$(TARGET_OUT_JAVA_LIBRARIES)/core-simple-testdex.jar
+    $$(TARGET_OUT_JAVA_LIBRARIES)/core-simple-testdex.jar \
+    $$(TARGET_OUT_JAVA_LIBRARIES)/conscrypt-testdex.jar \
+    $$(TARGET_OUT_JAVA_LIBRARIES)/okhttp-testdex.jar \
+    $$(TARGET_OUT_JAVA_LIBRARIES)/bouncycastle-testdex.jar
 
   ART_TEST_TARGET_GTEST_DEPENDENCIES += $$(gtest_deps)
 
@@ -472,7 +477,9 @@
 	$(hide) $(ADB) shell $$(PRIVATE_MAYBE_CHROOT_COMMAND) chmod 755 $$(PRIVATE_TARGET_EXE)
 	$(hide) $$(call ART_TEST_SKIP,$$@) && \
 	  ($(ADB) shell "$$(PRIVATE_MAYBE_CHROOT_COMMAND) env $(GCOV_ENV) LD_LIBRARY_PATH=$(4) \
-	       ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) $$(PRIVATE_TARGET_EXE) \
+	       ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) \
+	       ANDROID_RUNTIME_ROOT=$(ART_GTEST_TARGET_ANDROID_RUNTIME_ROOT) \
+	       $$(PRIVATE_TARGET_EXE) \
 	     && touch $$(PRIVATE_GTEST_WITNESS)" \
 	   && ($(ADB) pull $$(PRIVATE_GTEST_WITNESS) /tmp/ && $$(call ART_TEST_PASSED,$$@)) \
 	   || $$(call ART_TEST_FAILED,$$@))
@@ -508,7 +515,8 @@
     $$($(3)ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$$(ART_HOST_SHLIB_EXTENSION) \
     $$(gtest_exe) \
     $$(ART_GTEST_$(1)_HOST_DEPS) \
-    $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX))
+    $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX)) \
+    $(HOST_OUT_EXECUTABLES)/timeout_dumper
 
   ART_TEST_HOST_GTEST_DEPENDENCIES += $$(gtest_deps)
 
@@ -521,7 +529,9 @@
 $$(gtest_output): NAME := $$(gtest_rule)
 ifeq (,$(SANITIZE_HOST))
 $$(gtest_output): $$(gtest_exe) $$(gtest_deps)
-	$(hide) ($$(call ART_TEST_SKIP,$$(NAME)) && $$< --gtest_output=xml:$$@ && \
+	$(hide) ($$(call ART_TEST_SKIP,$$(NAME)) && \
+		timeout -k 120s -s SIGRTMIN+2 2400s $(HOST_OUT_EXECUTABLES)/timeout_dumper \
+			$$< --gtest_output=xml:$$@ && \
 		$$(call ART_TEST_PASSED,$$(NAME))) || $$(call ART_TEST_FAILED,$$(NAME))
 else
 # Note: envsetup currently exports ASAN_OPTIONS=detect_leaks=0 to suppress leak detection, as some
@@ -533,7 +543,9 @@
 # under ASAN.
 $$(gtest_output): $$(gtest_exe) $$(gtest_deps)
 	$(hide) ($$(call ART_TEST_SKIP,$$(NAME)) && set -o pipefail && \
-		ASAN_OPTIONS=detect_leaks=1 $$< --gtest_output=xml:$$@ 2>&1 | tee $$<.tmp.out >&2 && \
+		ASAN_OPTIONS=detect_leaks=1 timeout -k 120s -s SIGRTMIN+2 3600s
+			$(HOST_OUT_EXECUTABLES)/timeout_dumper \
+				$$< --gtest_output=xml:$$@ 2>&1 | tee $$<.tmp.out >&2 && \
 		{ $$(call ART_TEST_PASSED,$$(NAME)) ; rm $$<.tmp.out ; }) || \
 		( grep -q AddressSanitizer $$<.tmp.out && export ANDROID_BUILD_TOP=`pwd` && \
 			{ echo "ABI: 'x86_64'" | cat - $$<.tmp.out | development/scripts/stack | tail -n 3000 ; } ; \
@@ -717,6 +729,7 @@
 ART_TEST_TARGET_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
 ART_TEST_TARGET_GTEST_RULES :=
 ART_GTEST_TARGET_ANDROID_ROOT :=
+ART_GTEST_TARGET_ANDROID_RUNTIME_ROOT :=
 ART_GTEST_class_linker_test_DEX_DEPS :=
 ART_GTEST_class_table_test_DEX_DEPS :=
 ART_GTEST_compiler_driver_test_DEX_DEPS :=
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index e2adac1..2ad1143 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -39,8 +39,6 @@
 # Use dex2oat debug version for better error reporting
 # $(1): compiler - optimizing, interpreter or interp-ac (interpreter-access-checks).
 # $(2): 2ND_ or undefined, 2ND_ for 32-bit host builds.
-# NB depending on HOST_CORE_DEX_LOCATIONS so we are sure to have the dex files in frameworks for
-# run-test --no-image
 define create-core-oat-host-rules
   core_compile_options :=
   core_image_name :=
@@ -80,13 +78,15 @@
 $$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options)
 $$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name)
 $$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name)
-$$(core_image_name): $$(HOST_CORE_DEX_LOCATIONS) $$(core_dex2oat_dependency)
+$$(core_image_name): $$(HOST_CORE_IMG_DEX_LOCATIONS) $$(core_dex2oat_dependency)
 	@echo "host dex2oat: $$@"
 	@mkdir -p $$(dir $$@)
 	$$(hide) ANDROID_LOG_TAGS="*:e" $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
 	  --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
-	  --image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(HOST_CORE_DEX_FILES)) \
-	  $$(addprefix --dex-location=,$$(HOST_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
+	  --image-classes=$$(PRELOADED_CLASSES) \
+	  $$(addprefix --dex-file=,$$(HOST_CORE_IMG_DEX_FILES)) \
+	  $$(addprefix --dex-location=,$$(HOST_CORE_IMG_DEX_LOCATIONS)) \
+	  --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
 	  --oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \
 	  --base=$$(LIBART_IMG_HOST_BASE_ADDRESS) --instruction-set=$$($(2)ART_HOST_ARCH) \
 	  $$(LOCAL_$(2)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION) \
@@ -169,13 +169,15 @@
 $$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options)
 $$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name)
 $$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name)
-$$(core_image_name): $$(TARGET_CORE_DEX_FILES) $$(core_dex2oat_dependency)
+$$(core_image_name): $$(TARGET_CORE_IMG_DEX_FILES) $$(core_dex2oat_dependency)
 	@echo "target dex2oat: $$@"
 	@mkdir -p $$(dir $$@)
 	$$(hide) $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
 	  --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
-	  --image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(TARGET_CORE_DEX_FILES)) \
-	  $$(addprefix --dex-location=,$$(TARGET_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
+	  --image-classes=$$(PRELOADED_CLASSES) \
+	  $$(addprefix --dex-file=,$$(TARGET_CORE_IMG_DEX_FILES)) \
+	  $$(addprefix --dex-location=,$$(TARGET_CORE_IMG_DEX_LOCATIONS)) \
+	  --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
 	  --oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \
 	  --base=$$(LIBART_IMG_TARGET_BASE_ADDRESS) --instruction-set=$$($(2)TARGET_ARCH) \
 	  --instruction-set-variant=$$($(2)DEX2OAT_TARGET_CPU_VARIANT) \
diff --git a/build/apex/Android.bp b/build/apex/Android.bp
index 159e5c1..f2e12f6 100644
--- a/build/apex/Android.bp
+++ b/build/apex/Android.bp
@@ -48,11 +48,21 @@
     "libadbconnectiond",
 ]
 
+// Files associated with bionic / managed core library time zone APIs.
+art_runtime_time_zone_prebuilts = [
+    "apex_tz_version",
+    "apex_tzdata",
+    "apex_tzlookup.xml",
+]
+
 // Modules listed in LOCAL_REQUIRED_MODULES for module art-tools in art/Android.mk.
-art_tools_binaries = [
+art_tools_common_binaries = [
     "dexdiag",
     "dexdump",
     "dexlist",
+]
+
+art_tools_device_binaries = [
     "oatdump",
 ]
 
@@ -66,12 +76,21 @@
     // ...
 ]
 
+art_tools_binaries = art_tools_common_binaries + art_tools_device_binaries
+
 apex_key {
     name: "com.android.runtime.key",
     public_key: "com.android.runtime.avbpubkey",
     private_key: "com.android.runtime.pem",
 }
 
+prebuilt_etc {
+    name: "com.android.runtime.ld.config.txt",
+    src: "ld.config.txt",
+    filename: "ld.config.txt",
+    installable: false,
+}
+
 // TODO: Introduce `apex_defaults` to factor common parts of `apex`
 // module definitions below?
 
@@ -97,9 +116,9 @@
             binaries: [],
         }
     },
+    prebuilts: art_runtime_time_zone_prebuilts
+        + ["com.android.runtime.ld.config.txt"],
     key: "com.android.runtime.key",
-    // TODO: Also package a `ld.config.txt` config file (to be placed in `etc/`).
-    // ...
 }
 
 // "Debug" version of the Runtime APEX module (containing both release and
@@ -126,7 +145,43 @@
             binaries: art_tools_binaries,
         }
     },
+    prebuilts: art_runtime_time_zone_prebuilts
+        + ["com.android.runtime.ld.config.txt"],
     key: "com.android.runtime.key",
-    // TODO: Also package a `ld.config.txt` config file (to be placed in `etc/`).
-    // ...
+}
+
+// TODO: Do this better. art_apex will disable host builds when
+// HOST_PREFER_32_BIT is set. We cannot simply use com.android.runtime.debug
+// because binaries have different multilib classes and 'multilib: {}' isn't
+// supported by target: { ... }.
+// See b/120617876 for more information.
+art_apex {
+    name: "com.android.runtime.host",
+    compile_multilib: "both",
+    payload_type: "zip",
+    host_supported: true,
+    device_supported: false,
+    manifest: "manifest.json",
+    native_shared_libs: art_runtime_base_native_shared_libs
+        + art_runtime_fake_native_shared_libs
+        + art_runtime_debug_native_shared_libs,
+    multilib: {
+        both: {
+            // TODO: Add logic to create a `dalvikvm` symlink to `dalvikvm32` or `dalvikvm64`
+            // (see `symlink_preferred_arch` in art/dalvikvm/Android.bp).
+            binaries: art_runtime_base_binaries_both,
+        },
+        first: {
+            // TODO: oatdump cannot link with host linux_bionic due to not using clang ld
+            binaries: art_tools_common_binaries
+                + art_runtime_base_binaries_prefer32
+                + art_runtime_debug_binaries_prefer32,
+        }
+    },
+    key: "com.android.runtime.key",
+    target: {
+        darwin: {
+            enabled: false,
+        },
+    },
 }
diff --git a/build/apex/ld.config.txt b/build/apex/ld.config.txt
new file mode 100644
index 0000000..fddb17e
--- /dev/null
+++ b/build/apex/ld.config.txt
@@ -0,0 +1,24 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Bionic loader config file for the Runtime APEX.
+#
+# There are no versioned APEX paths here - this APEX module does not support
+# having several versions mounted.
+
+dir.runtime = /apex/com.android.runtime/bin/
+
+[runtime]
+additional.namespaces = platform
+
+# Keep in sync with runtime namespace in /system/etc/ld.config.txt.
+namespace.default.isolated = true
+namespace.default.search.paths = /apex/com.android.runtime/${LIB}
+namespace.default.links = platform
+# TODO(b/119867084): Restrict fallback to platform namespace to PALette library.
+namespace.default.link.platform.allow_all_shared_libs = true
+
+# Keep in sync with default namespace in /system/etc/ld.config.txt.
+namespace.platform.isolated = true
+namespace.platform.search.paths = /system/${LIB}
+namespace.platform.links = default
+namespace.platform.link.default.shared_libs = libc.so:libdl.so:libm.so
diff --git a/build/apex/runtests.sh b/build/apex/runtests.sh
index 86cd8cb..924c44b 100755
--- a/build/apex/runtests.sh
+++ b/build/apex/runtests.sh
@@ -33,6 +33,11 @@
 
    sudo apt-get install libguestfs-tools
 "
+which tree > /dev/null || die "This script requires the 'tree' tool.
+On Debian-based systems, this can be installed with:
+
+   sudo apt-get install tree
+"
 [[ -n "$ANDROID_PRODUCT_OUT" ]] \
   || die "You need to source and lunch before you can use this script."
 
@@ -66,54 +71,17 @@
   shift
 done
 
-work_dir=$(mktemp -d)
-mount_point="$work_dir/image"
 
-# Garbage collection.
-function finish {
-  # Don't fail early during cleanup.
-  set +e
-  guestunmount "$mount_point"
-  rm -rf "$work_dir"
+# build_apex APEX_MODULE
+# ----------------------
+# Build APEX package APEX_MODULE.
+function build_apex {
+  if $build_apex_p; then
+    local apex_module=$1
+    say "Building package $apex_module" && make "$apex_module" || die "Cannot build $apex_module"
+  fi
 }
 
-trap finish EXIT
-
-# TODO: Also exercise the Release Runtime APEX (`com.android.runtime.release`).
-apex_module="com.android.runtime.debug"
-
-# Build the Android Runtime APEX package (optional).
-$build_apex_p && say "Building package" && make "$apex_module"
-
-system_apexdir="$ANDROID_PRODUCT_OUT/system/apex"
-apex_package="$system_apexdir/$apex_module.apex"
-
-say "Extracting and mounting image"
-
-# Extract the image from the Android Runtime APEX.
-image_filename="image.img"
-unzip -q "$apex_package" "$image_filename" -d "$work_dir"
-mkdir "$mount_point"
-image_file="$work_dir/$image_filename"
-
-# Check filesystems in the image.
-image_filesystems="$work_dir/image_filesystems"
-virt-filesystems -a "$image_file" >"$image_filesystems"
-# We expect a single partition (/dev/sda) in the image.
-partition="/dev/sda"
-echo "$partition" | cmp "$image_filesystems" -
-
-# Mount the image from the Android Runtime APEX.
-guestmount -a "$image_file" -m "$partition" "$mount_point"
-
-# List the contents of the mounted image (optional).
-$list_image_files_p && say "Listing image files" && ls -ld "$mount_point" && tree -ap "$mount_point"
-
-say "Running tests"
-
-# Check that the mounted image contains a manifest.
-[[ -f "$mount_point/manifest.json" ]]
-
 function check_binary {
   [[ -x "$mount_point/bin/$1" ]] || die "Cannot find binary '$1' in mounted image"
 }
@@ -136,65 +104,242 @@
     || die "Cannot find library '$1' in mounted image"
 }
 
-# Check that the mounted image contains ART base binaries.
-check_multilib_binary dalvikvm
-# TODO: Does not work yet.
-: check_binary_symlink dalvikvm
-check_binary dex2oat
-check_binary dexoptanalyzer
-check_binary profman
+# Check contents of APEX payload located in `$mount_point`.
+function check_release_contents {
+  # Check that the mounted image contains a manifest.
+  [[ -f "$mount_point/apex_manifest.json" ]] || die "no manifest"
 
-# Check that the mounted image contains ART tools binaries.
-check_binary dexdiag
-check_binary dexdump
-check_binary dexlist
+  # Check that the mounted image contains ART base binaries.
+  check_multilib_binary dalvikvm
+  # TODO: Does not work yet (b/119942078).
+  : check_binary_symlink dalvikvm
+  check_binary dex2oat
+  check_binary dexoptanalyzer
+  check_binary profman
+
+  # oatdump is only in device apex's due to build rules
+  # TODO: Check for it when it is also built for host.
+  : check_binary oatdump
+
+  # Check that the mounted image contains ART libraries.
+  check_library libart-compiler.so
+  check_library libart.so
+  check_library libopenjdkjvm.so
+  check_library libopenjdkjvmti.so
+  check_library libadbconnection.so
+  # TODO: Should we check for these libraries too, even if they are not explicitly
+  # listed as dependencies in the Android Runtime APEX module rule?
+  check_library libartbase.so
+  check_library libart-dexlayout.so
+  check_library libdexfile.so
+  check_library libprofile.so
+
+  # TODO: Should we check for other libraries, such as:
+  #
+  #   libbacktrace.so
+  #   libbase.so
+  #   liblog.so
+  #   libsigchain.so
+  #   libtombstoned_client.so
+  #   libunwindstack.so
+  #   libvixl.so
+  #   libvixld.so
+  #   ...
+  #
+  # ?
+}
+
+# Check debug contents of APEX payload located in `$mount_point`.
+function check_debug_contents {
+  # Check that the mounted image contains ART tools binaries.
+  check_binary dexdiag
+  check_binary dexdump
+  check_binary dexlist
+
+  # Check that the mounted image contains ART debug binaries.
+  check_binary dex2oatd
+  check_binary dexoptanalyzerd
+  check_binary profmand
+
+  # Check that the mounted image contains ART debug libraries.
+  check_library libartd-compiler.so
+  check_library libartd.so
+  check_library libopenjdkd.so
+  check_library libopenjdkjvmd.so
+  check_library libopenjdkjvmtid.so
+  check_library libadbconnectiond.so
+  # TODO: Should we check for these libraries too, even if they are not explicitly
+  # listed as dependencies in the Android Runtime APEX module rule?
+  check_library libdexfiled.so
+  check_library libartbased.so
+  check_library libartd-dexlayout.so
+  check_library libprofiled.so
+}
+
+# Testing target (device) APEX packages.
+# ======================================
+
+# Clean-up.
+function cleanup_target {
+  guestunmount "$mount_point"
+  rm -rf "$work_dir"
+}
+
+# Garbage collection.
+function finish_target {
+  # Don't fail early during cleanup.
+  set +e
+  cleanup_target
+}
+
+# setup_target_apex APEX_MODULE MOUNT_POINT
+# -----------------------------------------
+# Extract image from target APEX_MODULE and mount it in MOUNT_POINT.
+function setup_target_apex {
+  local apex_module=$1
+  local mount_point=$2
+  local system_apexdir="$ANDROID_PRODUCT_OUT/system/apex"
+  local apex_package="$system_apexdir/$apex_module.apex"
+
+  say "Extracting and mounting image"
+
+  # Extract the payload from the Android Runtime APEX.
+  local image_filename="apex_payload.img"
+  unzip -q "$apex_package" "$image_filename" -d "$work_dir"
+  mkdir "$mount_point"
+  local image_file="$work_dir/$image_filename"
+
+  # Check filesystems in the image.
+  local image_filesystems="$work_dir/image_filesystems"
+  virt-filesystems -a "$image_file" >"$image_filesystems"
+  # We expect a single partition (/dev/sda) in the image.
+  local partition="/dev/sda"
+  echo "$partition" | cmp "$image_filesystems" -
+
+  # Mount the image from the Android Runtime APEX.
+  guestmount -a "$image_file" -m "$partition" "$mount_point"
+
+  # List the contents of the mounted image (optional).
+  $list_image_files_p \
+    && say "Listing image files" && ls -ld "$mount_point" && tree -ap "$mount_point"
+}
+
+# Testing release APEX package (com.android.runtime.release).
+# -----------------------------------------------------------
+
+apex_module="com.android.runtime.release"
+
+work_dir=$(mktemp -d)
+mount_point="$work_dir/image"
+
+trap finish_target EXIT
+
+# Build the APEX package (optional).
+build_apex "$apex_module"
+
+# Set up APEX package.
+setup_target_apex "$apex_module" "$mount_point"
+
+# Run tests on APEX package.
+say "Checking APEX package $apex_module"
+check_release_contents
+
+# Clean up.
+trap - EXIT
+cleanup_target
+
+say "$apex_module tests passed"
+
+# Testing debug APEX package (com.android.runtime.debug).
+# -------------------------------------------------------
+
+apex_module="com.android.runtime.debug"
+
+work_dir=$(mktemp -d)
+mount_point="$work_dir/image"
+
+trap finish_target EXIT
+
+# Build the APEX package (optional).
+build_apex "$apex_module"
+
+# Set up APEX package.
+setup_target_apex "$apex_module" "$mount_point"
+
+# Run tests on APEX package.
+say "Checking APEX package $apex_module"
+check_release_contents
+check_debug_contents
+# Check for files pulled in from debug target-only oatdump.
 check_binary oatdump
-
-# Check that the mounted image contains ART debug binaries.
-check_binary dex2oatd
-check_binary dexoptanalyzerd
-check_binary profmand
-
-# Check that the mounted image contains ART libraries.
-check_library libart-compiler.so
-check_library libart.so
-check_library libopenjdkjvm.so
-check_library libopenjdkjvmti.so
-check_library libadbconnection.so
-# TODO: Should we check for these libraries too, even if they are not explicitly
-# listed as dependencies in the Android Runtime APEX module rule?
-check_library libartbase.so
-check_library libart-dexlayout.so
 check_library libart-disassembler.so
-check_library libdexfile.so
-check_library libprofile.so
 
-# Check that the mounted image contains ART debug libraries.
-check_library libartd-compiler.so
-check_library libartd.so
-check_library libdexfiled.so
-check_library libopenjdkd.so
-check_library libopenjdkjvmd.so
-check_library libopenjdkjvmtid.so
-check_library libadbconnectiond.so
-# TODO: Should we check for these libraries too, even if they are not explicitly
-# listed as dependencies in the Android Runtime APEX module rule?
-check_library libartbased.so
-check_library libartd-dexlayout.so
-check_library libprofiled.so
+# Clean up.
+trap - EXIT
+cleanup_target
 
-# TODO: Should we check for other libraries, such as:
-#
-#   libbacktrace.so
-#   libbase.so
-#   liblog.so
-#   libsigchain.so
-#   libtombstoned_client.so
-#   libunwindstack.so
-#   libvixl.so
-#   libvixld.so
-#   ...
-#
-# ?
+say "$apex_module tests passed"
 
-say "Tests passed"
+
+# Testing host APEX package (com.android.runtime.host).
+# =====================================================
+
+# Clean-up.
+function cleanup_host {
+  rm -rf "$work_dir"
+}
+
+# Garbage collection.
+function finish_host {
+  # Don't fail early during cleanup.
+  set +e
+  cleanup_host
+}
+
+# setup_host_apex APEX_MODULE MOUNT_POINT
+# ---------------------------------------
+# Extract Zip file from host APEX_MODULE and extract it in MOUNT_POINT.
+function setup_host_apex {
+  local apex_module=$1
+  local mount_point=$2
+  local system_apexdir="$ANDROID_HOST_OUT/apex"
+  local apex_package="$system_apexdir/$apex_module.zipapex"
+
+  say "Extracting payload"
+
+  # Extract the payload from the Android Runtime APEX.
+  local image_filename="apex_payload.zip"
+  unzip -q "$apex_package" "$image_filename" -d "$work_dir"
+  mkdir "$mount_point"
+  local image_file="$work_dir/$image_filename"
+
+  # Unzipping the payload
+  unzip -q "$image_file" -d "$mount_point"
+}
+
+apex_module="com.android.runtime.host"
+
+work_dir=$(mktemp -d)
+mount_point="$work_dir/zip"
+
+trap finish_host EXIT
+
+# Build the APEX package (optional).
+build_apex "$apex_module"
+
+# Set up APEX package.
+setup_host_apex "$apex_module" "$mount_point"
+
+# Run tests on APEX package.
+say "Checking APEX package $apex_module"
+check_release_contents
+check_debug_contents
+
+# Clean up.
+trap - EXIT
+cleanup_host
+
+say "$apex_module tests passed"
+
+
+say "All Android Runtime APEX tests passed"
diff --git a/build/art.go b/build/art.go
index 1c8be0f..01848c8 100644
--- a/build/art.go
+++ b/build/art.go
@@ -16,8 +16,10 @@
 
 import (
 	"android/soong/android"
+	"android/soong/apex"
 	"android/soong/cc"
 	"fmt"
+	"log"
 	"sync"
 
 	"github.com/google/blueprint/proptools"
@@ -289,6 +291,36 @@
 	android.RegisterModuleType("libart_static_cc_defaults", libartStaticDefaultsFactory)
 	android.RegisterModuleType("art_global_defaults", artGlobalDefaultsFactory)
 	android.RegisterModuleType("art_debug_defaults", artDebugDefaultsFactory)
+
+	// TODO: This makes the module disable itself for host if HOST_PREFER_32_BIT is
+	// set. We need this because the multilib types of binaries listed in the apex
+	// rule must match the declared type. This is normally not difficult but HOST_PREFER_32_BIT
+	// changes this to 'prefer32' on all host binaries. Since HOST_PREFER_32_BIT is
+	// only used for testing we can just disable the module.
+	// See b/120617876 for more information.
+	android.RegisterModuleType("art_apex", artApexBundleFactory)
+}
+
+func artApexBundleFactory() android.Module {
+	module := apex.ApexBundleFactory()
+	android.AddLoadHook(module, func(ctx android.LoadHookContext) {
+		if envTrue(ctx, "HOST_PREFER_32_BIT") {
+			type props struct {
+				Target struct {
+					Host struct {
+						Enabled *bool
+					}
+				}
+			}
+
+			p := &props{}
+			p.Target.Host.Enabled = proptools.BoolPtr(false)
+			ctx.AppendProperties(p)
+			log.Print("Disabling host build of " + ctx.ModuleName() + " for HOST_PREFER_32_BIT=true")
+		}
+	})
+
+	return module
 }
 
 func artGlobalDefaultsFactory() android.Module {
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 97daafa..101e5c4 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -63,6 +63,12 @@
     return expected == actual;
   }
 
+  template <char Separator>
+  bool UsuallyEquals(const std::vector<std::string>& expected,
+                     const ParseStringList<Separator>& actual) {
+    return expected == static_cast<std::vector<std::string>>(actual);
+  }
+
   // Try to use memcmp to compare simple plain-old-data structs.
   //
   // This should *not* generate false positives, but it can generate false negatives.
@@ -218,8 +224,13 @@
   }
 
   EXPECT_SINGLE_PARSE_EXISTS("-Xzygote", M::Zygote);
-  EXPECT_SINGLE_PARSE_VALUE_STR("/hello/world", "-Xbootclasspath:/hello/world", M::BootClassPath);
-  EXPECT_SINGLE_PARSE_VALUE("/hello/world", "-Xbootclasspath:/hello/world", M::BootClassPath);
+  EXPECT_SINGLE_PARSE_VALUE(std::vector<std::string>({"/hello/world"}),
+                            "-Xbootclasspath:/hello/world",
+                            M::BootClassPath);
+  EXPECT_SINGLE_PARSE_VALUE(std::vector<std::string>({"/hello", "/world"}),
+                            "-Xbootclasspath:/hello:/world",
+                            M::BootClassPath);
+  EXPECT_SINGLE_PARSE_VALUE_STR("/hello/world", "-classpath /hello/world", M::ClassPath);
   EXPECT_SINGLE_PARSE_VALUE(Memory<1>(234), "-Xss234", M::StackSize);
   EXPECT_SINGLE_PARSE_VALUE(MemoryKiB(1234*MB), "-Xms1234m", M::MemoryInitialSize);
   EXPECT_SINGLE_PARSE_VALUE(true, "-XX:EnableHSpaceCompactForOOM", M::EnableHSpaceCompactForOOM);
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 66421e2..be6da71 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -22,6 +22,7 @@
 #include "art_field-inl.h"
 #include "art_method-inl.h"
 #include "base/callee_save_type.h"
+#include "base/casts.h"
 #include "base/enums.h"
 #include "base/utils.h"
 #include "class_linker.h"
@@ -152,6 +153,10 @@
 
     CreateCompilerDriver();
   }
+  // Note: We cannot use MemMap because some tests tear down the Runtime and destroy
+  // the gMaps, so when destroying the MemMap, the test would crash.
+  inaccessible_page_ = mmap(nullptr, kPageSize, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+  CHECK(inaccessible_page_ != MAP_FAILED) << strerror(errno);
 }
 
 void CommonCompilerTest::ApplyInstructionSet() {
@@ -190,9 +195,7 @@
   compiler_options_->image_classes_.swap(*GetImageClasses());
   compiler_options_->profile_compilation_info_ = GetProfileCompilationInfo();
   compiler_driver_.reset(new CompilerDriver(compiler_options_.get(),
-                                            verification_results_.get(),
                                             compiler_kind_,
-                                            &compiler_options_->image_classes_,
                                             number_of_threads_,
                                             /* swap_fd */ -1));
 }
@@ -222,6 +225,10 @@
   verification_results_.reset();
   compiler_options_.reset();
   image_reservation_.Reset();
+  if (inaccessible_page_ != nullptr) {
+    munmap(inaccessible_page_, kPageSize);
+    inaccessible_page_ = nullptr;
+  }
 
   CommonRuntimeTest::TearDown();
 }
@@ -267,8 +274,16 @@
 
     compiler_driver_->InitializeThreadPools();
 
-    compiler_driver_->PreCompile(class_loader, dex_files, &timings);
+    compiler_driver_->PreCompile(class_loader,
+                                 dex_files,
+                                 &timings,
+                                 &compiler_options_->image_classes_,
+                                 verification_results_.get());
 
+    // Verification results in the `callback_` should not be used during compilation.
+    down_cast<QuickCompilerCallbacks*>(callbacks_.get())->SetVerificationResults(
+        reinterpret_cast<VerificationResults*>(inaccessible_page_));
+    compiler_options_->verification_results_ = verification_results_.get();
     compiler_driver_->CompileOne(self,
                                  class_loader,
                                  *dex_file,
@@ -279,6 +294,9 @@
                                  code_item,
                                  dex_cache,
                                  h_class_loader);
+    compiler_options_->verification_results_ = nullptr;
+    down_cast<QuickCompilerCallbacks*>(callbacks_.get())->SetVerificationResults(
+        verification_results_.get());
 
     compiler_driver_->FreeThreadPools();
 
@@ -334,6 +352,32 @@
   CHECK(image_reservation_.IsValid()) << error_msg;
 }
 
+void CommonCompilerTest::CompileAll(jobject class_loader,
+                                    const std::vector<const DexFile*>& dex_files,
+                                    TimingLogger* timings) {
+  TimingLogger::ScopedTiming t(__FUNCTION__, timings);
+  SetDexFilesForOatFile(dex_files);
+
+  compiler_driver_->InitializeThreadPools();
+
+  compiler_driver_->PreCompile(class_loader,
+                               dex_files,
+                               timings,
+                               &compiler_options_->image_classes_,
+                               verification_results_.get());
+
+  // Verification results in the `callback_` should not be used during compilation.
+  down_cast<QuickCompilerCallbacks*>(callbacks_.get())->SetVerificationResults(
+      reinterpret_cast<VerificationResults*>(inaccessible_page_));
+  compiler_options_->verification_results_ = verification_results_.get();
+  compiler_driver_->CompileAll(class_loader, dex_files, timings);
+  compiler_options_->verification_results_ = nullptr;
+  down_cast<QuickCompilerCallbacks*>(callbacks_.get())->SetVerificationResults(
+      verification_results_.get());
+
+  compiler_driver_->FreeThreadPools();
+}
+
 void CommonCompilerTest::UnreserveImageSpace() {
   image_reservation_.Reset();
 }
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index e6d1564..a71908e 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -20,6 +20,8 @@
 #include <list>
 #include <vector>
 
+#include <jni.h>
+
 #include "arch/instruction_set.h"
 #include "arch/instruction_set_features.h"
 #include "base/hash_set.h"
@@ -37,6 +39,7 @@
 class CumulativeLogger;
 class DexFile;
 class ProfileCompilationInfo;
+class TimingLogger;
 class VerificationResults;
 
 template<class T> class Handle;
@@ -88,6 +91,10 @@
                             const char* method_name, const char* signature)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  void CompileAll(jobject class_loader,
+                  const std::vector<const DexFile*>& dex_files,
+                  TimingLogger* timings) REQUIRES(!Locks::mutator_lock_);
+
   void ApplyInstructionSet();
   void OverrideInstructionSetFeatures(InstructionSet instruction_set, const std::string& variant);
 
@@ -116,6 +123,7 @@
 
  private:
   MemMap image_reservation_;
+  void* inaccessible_page_;
 
   // Chunks must not move their storage after being created - use the node-based std::list.
   std::list<std::vector<uint8_t>> header_code_and_maps_chunks_;
diff --git a/compiler/debug/elf_debug_writer.cc b/compiler/debug/elf_debug_writer.cc
index 71422d4..1ecb1d8e 100644
--- a/compiler/debug/elf_debug_writer.cc
+++ b/compiler/debug/elf_debug_writer.cc
@@ -16,8 +16,9 @@
 
 #include "elf_debug_writer.h"
 
-#include <vector>
+#include <type_traits>
 #include <unordered_map>
+#include <vector>
 
 #include "base/array_ref.h"
 #include "debug/dwarf/dwarf_constants.h"
@@ -29,6 +30,7 @@
 #include "debug/elf_symtab_writer.h"
 #include "debug/method_debug_info.h"
 #include "debug/xz_utils.h"
+#include "elf.h"
 #include "linker/elf_builder.h"
 #include "linker/vector_output_stream.h"
 #include "oat.h"
@@ -36,6 +38,8 @@
 namespace art {
 namespace debug {
 
+using ElfRuntimeTypes = std::conditional<sizeof(void*) == 4, ElfTypes32, ElfTypes64>::type;
+
 template <typename ElfTypes>
 void WriteDebugInfo(linker::ElfBuilder<ElfTypes>* builder,
                     const DebugInfo& debug_info,
@@ -134,7 +138,7 @@
   CHECK(builder->Good());
   std::vector<uint8_t> compressed_buffer;
   compressed_buffer.reserve(buffer.size() / 4);
-  XzCompress(ArrayRef<uint8_t>(buffer), &compressed_buffer);
+  XzCompress(ArrayRef<const uint8_t>(buffer), &compressed_buffer);
   return compressed_buffer;
 }
 
@@ -165,22 +169,16 @@
   }
 }
 
-template <typename ElfTypes>
-static std::vector<uint8_t> MakeElfFileForJITInternal(
+std::vector<uint8_t> MakeElfFileForJIT(
     InstructionSet isa,
     const InstructionSetFeatures* features,
     bool mini_debug_info,
-    ArrayRef<const MethodDebugInfo> method_infos) {
-  CHECK_GT(method_infos.size(), 0u);
-  uint64_t min_address = std::numeric_limits<uint64_t>::max();
-  uint64_t max_address = 0;
-  for (const MethodDebugInfo& mi : method_infos) {
-    CHECK_EQ(mi.is_code_address_text_relative, false);
-    min_address = std::min(min_address, mi.code_address);
-    max_address = std::max(max_address, mi.code_address + mi.code_size);
-  }
+    const MethodDebugInfo& method_info) {
+  using ElfTypes = ElfRuntimeTypes;
+  CHECK_EQ(sizeof(ElfTypes::Addr), static_cast<size_t>(GetInstructionSetPointerSize(isa)));
+  CHECK_EQ(method_info.is_code_address_text_relative, false);
   DebugInfo debug_info{};
-  debug_info.compiled_methods = method_infos;
+  debug_info.compiled_methods = ArrayRef<const MethodDebugInfo>(&method_info, 1);
   std::vector<uint8_t> buffer;
   buffer.reserve(KB);
   linker::VectorOutputStream out("Debug ELF file", &buffer);
@@ -188,28 +186,16 @@
       new linker::ElfBuilder<ElfTypes>(isa, features, &out));
   // No program headers since the ELF file is not linked and has no allocated sections.
   builder->Start(false /* write_program_headers */);
+  builder->GetText()->AllocateVirtualMemory(method_info.code_address, method_info.code_size);
   if (mini_debug_info) {
-    if (method_infos.size() > 1) {
-      std::vector<uint8_t> mdi = MakeMiniDebugInfo(isa,
-                                                   features,
-                                                   min_address,
-                                                   max_address - min_address,
-                                                   /* dex_section_address */ 0,
-                                                   /* dex_section_size */ 0,
-                                                   debug_info);
-      builder->WriteSection(".gnu_debugdata", &mdi);
-    } else {
-      // The compression is great help for multiple methods but it is not worth it for a
-      // single method due to the overheads so skip the compression here for performance.
-      builder->GetText()->AllocateVirtualMemory(min_address, max_address - min_address);
-      WriteDebugSymbols(builder.get(), true /* mini-debug-info */, debug_info);
-      WriteCFISection(builder.get(),
-                      debug_info.compiled_methods,
-                      dwarf::DW_DEBUG_FRAME_FORMAT,
-                      false /* write_oat_paches */);
-    }
+    // The compression is great help for multiple methods but it is not worth it for a
+    // single method due to the overheads so skip the compression here for performance.
+    WriteDebugSymbols(builder.get(), true /* mini-debug-info */, debug_info);
+    WriteCFISection(builder.get(),
+                    debug_info.compiled_methods,
+                    dwarf::DW_DEBUG_FRAME_FORMAT,
+                    false /* write_oat_paches */);
   } else {
-    builder->GetText()->AllocateVirtualMemory(min_address, max_address - min_address);
     WriteDebugInfo(builder.get(),
                    debug_info,
                    dwarf::DW_DEBUG_FRAME_FORMAT,
@@ -220,24 +206,13 @@
   return buffer;
 }
 
-std::vector<uint8_t> MakeElfFileForJIT(
-    InstructionSet isa,
-    const InstructionSetFeatures* features,
-    bool mini_debug_info,
-    ArrayRef<const MethodDebugInfo> method_infos) {
-  if (Is64BitInstructionSet(isa)) {
-    return MakeElfFileForJITInternal<ElfTypes64>(isa, features, mini_debug_info, method_infos);
-  } else {
-    return MakeElfFileForJITInternal<ElfTypes32>(isa, features, mini_debug_info, method_infos);
-  }
-}
-
-template <typename ElfTypes>
-static std::vector<uint8_t> WriteDebugElfFileForClassesInternal(
+std::vector<uint8_t> WriteDebugElfFileForClasses(
     InstructionSet isa,
     const InstructionSetFeatures* features,
     const ArrayRef<mirror::Class*>& types)
     REQUIRES_SHARED(Locks::mutator_lock_) {
+  using ElfTypes = ElfRuntimeTypes;
+  CHECK_EQ(sizeof(ElfTypes::Addr), static_cast<size_t>(GetInstructionSetPointerSize(isa)));
   std::vector<uint8_t> buffer;
   buffer.reserve(KB);
   linker::VectorOutputStream out("Debug ELF file", &buffer);
@@ -256,16 +231,6 @@
   return buffer;
 }
 
-std::vector<uint8_t> WriteDebugElfFileForClasses(InstructionSet isa,
-                                                 const InstructionSetFeatures* features,
-                                                 const ArrayRef<mirror::Class*>& types) {
-  if (Is64BitInstructionSet(isa)) {
-    return WriteDebugElfFileForClassesInternal<ElfTypes64>(isa, features, types);
-  } else {
-    return WriteDebugElfFileForClassesInternal<ElfTypes32>(isa, features, types);
-  }
-}
-
 // Explicit instantiations
 template void WriteDebugInfo<ElfTypes32>(
     linker::ElfBuilder<ElfTypes32>* builder,
diff --git a/compiler/debug/elf_debug_writer.h b/compiler/debug/elf_debug_writer.h
index e442e00..8ad0c42 100644
--- a/compiler/debug/elf_debug_writer.h
+++ b/compiler/debug/elf_debug_writer.h
@@ -54,7 +54,7 @@
     InstructionSet isa,
     const InstructionSetFeatures* features,
     bool mini_debug_info,
-    ArrayRef<const MethodDebugInfo> method_infos);
+    const MethodDebugInfo& method_info);
 
 std::vector<uint8_t> WriteDebugElfFileForClasses(
     InstructionSet isa,
diff --git a/compiler/debug/xz_utils.cc b/compiler/debug/xz_utils.cc
index a9e30a6..a8f60ac 100644
--- a/compiler/debug/xz_utils.cc
+++ b/compiler/debug/xz_utils.cc
@@ -17,13 +17,16 @@
 #include "xz_utils.h"
 
 #include <vector>
+#include <mutex>
 
 #include "base/array_ref.h"
-#include "dwarf/writer.h"
+#include "base/bit_utils.h"
 #include "base/leb128.h"
+#include "dwarf/writer.h"
 
 // liblzma.
 #include "7zCrc.h"
+#include "Xz.h"
 #include "XzCrc64.h"
 #include "XzEnc.h"
 
@@ -32,10 +35,17 @@
 
 constexpr size_t kChunkSize = kPageSize;
 
-static void XzCompressChunk(ArrayRef<uint8_t> src, std::vector<uint8_t>* dst) {
+static void XzInitCrc() {
+  static std::once_flag crc_initialized;
+  std::call_once(crc_initialized, []() {
+    CrcGenerateTable();
+    Crc64GenerateTable();
+  });
+}
+
+static void XzCompressChunk(ArrayRef<const uint8_t> src, std::vector<uint8_t>* dst) {
   // Configure the compression library.
-  CrcGenerateTable();
-  Crc64GenerateTable();
+  XzInitCrc();
   CLzma2EncProps lzma2Props;
   Lzma2EncProps_Init(&lzma2Props);
   lzma2Props.lzmaProps.level = 1;  // Fast compression.
@@ -62,7 +72,7 @@
       return SZ_OK;
     }
     size_t src_pos_;
-    ArrayRef<uint8_t> src_;
+    ArrayRef<const uint8_t> src_;
     std::vector<uint8_t>* dst_;
   };
   XzCallbacks callbacks;
@@ -85,7 +95,7 @@
 // In short, the file format is: [header] [compressed_block]* [index] [footer]
 // Where [index] is: [num_records] ([compressed_size] [uncompressed_size])* [crc32]
 //
-void XzCompress(ArrayRef<uint8_t> src, std::vector<uint8_t>* dst) {
+void XzCompress(ArrayRef<const uint8_t> src, std::vector<uint8_t>* dst) {
   uint8_t header[] = { 0xFD, '7', 'z', 'X', 'Z', 0, 0, 1, 0x69, 0x22, 0xDE, 0x36 };
   uint8_t footer[] = { 0, 1, 'Y', 'Z' };
   dst->insert(dst->end(), header, header + sizeof(header));
@@ -138,6 +148,47 @@
     writer.UpdateUint32(0, CrcCalc(tmp.data() + 4, 6));
     dst->insert(dst->end(), tmp.begin(), tmp.end());
   }
+
+  // Decompress the data back and check that we get the original.
+  if (kIsDebugBuild) {
+    std::vector<uint8_t> decompressed;
+    XzDecompress(ArrayRef<const uint8_t>(*dst), &decompressed);
+    DCHECK_EQ(decompressed.size(), src.size());
+    DCHECK_EQ(memcmp(decompressed.data(), src.data(), src.size()), 0);
+  }
+}
+
+void XzDecompress(ArrayRef<const uint8_t> src, std::vector<uint8_t>* dst) {
+  XzInitCrc();
+  std::unique_ptr<CXzUnpacker> state(new CXzUnpacker());
+  ISzAlloc alloc;
+  alloc.Alloc = [](ISzAllocPtr, size_t size) { return malloc(size); };
+  alloc.Free = [](ISzAllocPtr, void* ptr) { return free(ptr); };
+  XzUnpacker_Construct(state.get(), &alloc);
+
+  size_t src_offset = 0;
+  size_t dst_offset = 0;
+  ECoderStatus status;
+  do {
+    dst->resize(RoundUp(dst_offset + kPageSize / 4, kPageSize));
+    size_t src_remaining = src.size() - src_offset;
+    size_t dst_remaining = dst->size() - dst_offset;
+    int return_val = XzUnpacker_Code(state.get(),
+                                     dst->data() + dst_offset,
+                                     &dst_remaining,
+                                     src.data() + src_offset,
+                                     &src_remaining,
+                                     true,
+                                     CODER_FINISH_ANY,
+                                     &status);
+    CHECK_EQ(return_val, SZ_OK);
+    src_offset += src_remaining;
+    dst_offset += dst_remaining;
+  } while (status == CODER_STATUS_NOT_FINISHED);
+  CHECK_EQ(src_offset, src.size());
+  CHECK(XzUnpacker_IsStreamWasFinished(state.get()));
+  XzUnpacker_Free(state.get());
+  dst->resize(dst_offset);
 }
 
 }  // namespace debug
diff --git a/compiler/debug/xz_utils.h b/compiler/debug/xz_utils.h
index c4076c6..731b03c 100644
--- a/compiler/debug/xz_utils.h
+++ b/compiler/debug/xz_utils.h
@@ -24,7 +24,8 @@
 namespace art {
 namespace debug {
 
-void XzCompress(ArrayRef<uint8_t> src, std::vector<uint8_t>* dst);
+void XzCompress(ArrayRef<const uint8_t> src, std::vector<uint8_t>* dst);
+void XzDecompress(ArrayRef<const uint8_t> src, std::vector<uint8_t>* dst);
 
 }  // namespace debug
 }  // namespace art
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index 04ad10c..c124ef5 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -528,7 +528,7 @@
       class_def_idx,
       method_idx,
       access_flags,
-      driver_->GetVerifiedMethod(&dex_file, method_idx),
+      driver_->GetCompilerOptions().GetVerifiedMethod(&dex_file, method_idx),
       hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file)));
 
   std::vector<uint8_t> quicken_data;
diff --git a/compiler/dex/dex_to_dex_compiler.h b/compiler/dex/dex_to_dex_compiler.h
index 7536c31..7253488 100644
--- a/compiler/dex/dex_to_dex_compiler.h
+++ b/compiler/dex/dex_to_dex_compiler.h
@@ -22,6 +22,7 @@
 #include <unordered_set>
 
 #include "base/bit_vector.h"
+#include "base/mutex.h"
 #include "dex/dex_file.h"
 #include "dex/invoke_type.h"
 #include "dex/method_reference.h"
diff --git a/compiler/dex/dex_to_dex_decompiler_test.cc b/compiler/dex/dex_to_dex_decompiler_test.cc
index f61e6c4..b055416 100644
--- a/compiler/dex/dex_to_dex_decompiler_test.cc
+++ b/compiler/dex/dex_to_dex_decompiler_test.cc
@@ -39,16 +39,15 @@
 class DexToDexDecompilerTest : public CommonCompilerTest {
  public:
   void CompileAll(jobject class_loader) REQUIRES(!Locks::mutator_lock_) {
-    TimingLogger timings("CompilerDriverTest::CompileAll", false, false);
-    TimingLogger::ScopedTiming t(__FUNCTION__, &timings);
+    TimingLogger timings("DexToDexDecompilerTest::CompileAll", false, false);
     compiler_options_->image_type_ = CompilerOptions::ImageType::kNone;
     compiler_options_->SetCompilerFilter(CompilerFilter::kQuicken);
     // Create the main VerifierDeps, here instead of in the compiler since we want to aggregate
     // the results for all the dex files, not just the results for the current dex file.
     down_cast<QuickCompilerCallbacks*>(Runtime::Current()->GetCompilerCallbacks())->SetVerifierDeps(
         new verifier::VerifierDeps(GetDexFiles(class_loader)));
-    SetDexFilesForOatFile(GetDexFiles(class_loader));
-    compiler_driver_->CompileAll(class_loader, GetDexFiles(class_loader), &timings);
+    std::vector<const DexFile*> dex_files = GetDexFiles(class_loader);
+    CommonCompilerTest::CompileAll(class_loader, dex_files, &timings);
   }
 
   void RunTest(const char* dex_name) {
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index dd947d9..5a34efb 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -97,7 +97,7 @@
   }
 }
 
-const VerifiedMethod* VerificationResults::GetVerifiedMethod(MethodReference ref) {
+const VerifiedMethod* VerificationResults::GetVerifiedMethod(MethodReference ref) const {
   const VerifiedMethod* ret = nullptr;
   if (atomic_verified_methods_.Get(ref, &ret)) {
     return ret;
@@ -129,13 +129,13 @@
   DCHECK(IsClassRejected(ref));
 }
 
-bool VerificationResults::IsClassRejected(ClassReference ref) {
+bool VerificationResults::IsClassRejected(ClassReference ref) const {
   ReaderMutexLock mu(Thread::Current(), rejected_classes_lock_);
   return (rejected_classes_.find(ref) != rejected_classes_.end());
 }
 
 bool VerificationResults::IsCandidateForCompilation(MethodReference&,
-                                                    const uint32_t access_flags) {
+                                                    const uint32_t access_flags) const {
   if (!compiler_options_->IsAotCompilationEnabled()) {
     return false;
   }
diff --git a/compiler/dex/verification_results.h b/compiler/dex/verification_results.h
index 56f0030..04c4fa6 100644
--- a/compiler/dex/verification_results.h
+++ b/compiler/dex/verification_results.h
@@ -51,13 +51,13 @@
   void CreateVerifiedMethodFor(MethodReference ref)
       REQUIRES(!verified_methods_lock_);
 
-  const VerifiedMethod* GetVerifiedMethod(MethodReference ref)
+  const VerifiedMethod* GetVerifiedMethod(MethodReference ref) const
       REQUIRES(!verified_methods_lock_);
 
   void AddRejectedClass(ClassReference ref) REQUIRES(!rejected_classes_lock_);
-  bool IsClassRejected(ClassReference ref) REQUIRES(!rejected_classes_lock_);
+  bool IsClassRejected(ClassReference ref) const REQUIRES(!rejected_classes_lock_);
 
-  bool IsCandidateForCompilation(MethodReference& method_ref, const uint32_t access_flags);
+  bool IsCandidateForCompilation(MethodReference& method_ref, const uint32_t access_flags) const;
 
   // Add a dex file to enable using the atomic map.
   void AddDexFile(const DexFile* dex_file) REQUIRES(!verified_methods_lock_);
@@ -74,10 +74,12 @@
   // GetVerifiedMethod.
   AtomicMap atomic_verified_methods_;
 
-  ReaderWriterMutex verified_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  // TODO: External locking during CompilerDriver::PreCompile(), no locking during compilation.
+  mutable ReaderWriterMutex verified_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
 
   // Rejected classes.
-  ReaderWriterMutex rejected_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  // TODO: External locking during CompilerDriver::PreCompile(), no locking during compilation.
+  mutable ReaderWriterMutex rejected_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   std::set<ClassReference> rejected_classes_ GUARDED_BY(rejected_classes_lock_);
 
   friend class verifier::VerifierDepsTest;
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 792f508..63dcb46 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -99,11 +99,6 @@
   return std::make_pair(fast_get, fast_put);
 }
 
-inline VerificationResults* CompilerDriver::GetVerificationResults() const {
-  DCHECK(Runtime::Current()->IsAotCompiler());
-  return verification_results_;
-}
-
 }  // namespace art
 
 #endif  // ART_COMPILER_DRIVER_COMPILER_DRIVER_INL_H_
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 67cabef..18f7105 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -245,16 +245,12 @@
 
 CompilerDriver::CompilerDriver(
     const CompilerOptions* compiler_options,
-    VerificationResults* verification_results,
     Compiler::Kind compiler_kind,
-    HashSet<std::string>* image_classes,
     size_t thread_count,
     int swap_fd)
     : compiler_options_(compiler_options),
-      verification_results_(verification_results),
       compiler_(Compiler::Create(this, compiler_kind)),
       compiler_kind_(compiler_kind),
-      image_classes_(std::move(image_classes)),
       number_of_soft_verifier_failures_(0),
       had_hard_verifier_failure_(false),
       parallel_thread_count_(thread_count),
@@ -266,10 +262,6 @@
 
   compiler_->Init();
 
-  if (GetCompilerOptions().IsBootImage()) {
-    CHECK(image_classes_ != nullptr) << "Expected image classes for boot image";
-  }
-
   compiled_method_storage_.SetDedupeEnabled(compiler_options_->DeduplicateCode());
 }
 
@@ -325,9 +317,8 @@
                                 TimingLogger* timings) {
   DCHECK(!Runtime::Current()->IsStarted());
 
-  InitializeThreadPools();
+  CheckThreadPools();
 
-  PreCompile(class_loader, dex_files, timings);
   if (GetCompilerOptions().IsBootImage()) {
     // We don't need to setup the intrinsics for non boot image compilation, as
     // those compilations will pick up a boot image that have the ArtMethod already
@@ -343,8 +334,6 @@
   if (GetCompilerOptions().GetDumpStats()) {
     stats_->Dump();
   }
-
-  FreeThreadPools();
 }
 
 static optimizer::DexToDexCompiler::CompilationLevel GetDexToDexCompilationLevel(
@@ -496,7 +485,7 @@
     optimizer::DexToDexCompiler* const compiler = &driver->GetDexToDexCompiler();
 
     if (compiler->ShouldCompileMethod(method_ref)) {
-      VerificationResults* results = driver->GetVerificationResults();
+      const VerificationResults* results = driver->GetCompilerOptions().GetVerificationResults();
       DCHECK(results != nullptr);
       const VerifiedMethod* verified_method = results->GetVerifiedMethod(method_ref);
       // Do not optimize if a VerifiedMethod is missing. SafeCast elision,
@@ -574,7 +563,7 @@
     } else if ((access_flags & kAccAbstract) != 0) {
       // Abstract methods don't have code.
     } else {
-      VerificationResults* results = driver->GetVerificationResults();
+      const VerificationResults* results = driver->GetCompilerOptions().GetVerificationResults();
       DCHECK(results != nullptr);
       const VerifiedMethod* verified_method = results->GetVerifiedMethod(method_ref);
       bool compile =
@@ -881,7 +870,9 @@
 
 void CompilerDriver::PreCompile(jobject class_loader,
                                 const std::vector<const DexFile*>& dex_files,
-                                TimingLogger* timings) {
+                                TimingLogger* timings,
+                                /*inout*/ HashSet<std::string>* image_classes,
+                                /*out*/ VerificationResults* verification_results) {
   CheckThreadPools();
 
   VLOG(compiler) << "Before precompile " << GetMemoryUsageString(false);
@@ -898,7 +889,7 @@
   // 6) Update the set of image classes.
   // 7) For deterministic boot image, initialize bitstrings for type checking.
 
-  LoadImageClasses(timings);
+  LoadImageClasses(timings, image_classes);
   VLOG(compiler) << "LoadImageClasses: " << GetMemoryUsageString(false);
 
   if (compiler_options_->IsAnyCompilationEnabled()) {
@@ -932,7 +923,7 @@
     ResolveConstStrings(dex_files, /*only_startup_strings=*/ true, timings);
   }
 
-  Verify(class_loader, dex_files, timings);
+  Verify(class_loader, dex_files, timings, verification_results);
   VLOG(compiler) << "Verify: " << GetMemoryUsageString(false);
 
   if (had_hard_verifier_failure_ && GetCompilerOptions().AbortOnHardVerifierFailure()) {
@@ -957,7 +948,7 @@
     VLOG(compiler) << "InitializeClasses: " << GetMemoryUsageString(false);
   }
 
-  UpdateImageClasses(timings);
+  UpdateImageClasses(timings, image_classes);
   VLOG(compiler) << "UpdateImageClasses: " << GetMemoryUsageString(false);
 
   if (kBitstringSubtypeCheckEnabled &&
@@ -1071,7 +1062,8 @@
 };
 
 // Make a list of descriptors for classes to include in the image
-void CompilerDriver::LoadImageClasses(TimingLogger* timings) {
+void CompilerDriver::LoadImageClasses(TimingLogger* timings,
+                                      /*inout*/ HashSet<std::string>* image_classes) {
   CHECK(timings != nullptr);
   if (!GetCompilerOptions().IsBootImage()) {
     return;
@@ -1082,15 +1074,15 @@
   Thread* self = Thread::Current();
   ScopedObjectAccess soa(self);
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  CHECK(image_classes_ != nullptr);
-  for (auto it = image_classes_->begin(), end = image_classes_->end(); it != end;) {
+  CHECK(image_classes != nullptr);
+  for (auto it = image_classes->begin(), end = image_classes->end(); it != end;) {
     const std::string& descriptor(*it);
     StackHandleScope<1> hs(self);
     Handle<mirror::Class> klass(
         hs.NewHandle(class_linker->FindSystemClass(self, descriptor.c_str())));
     if (klass == nullptr) {
       VLOG(compiler) << "Failed to find class " << descriptor;
-      it = image_classes_->erase(it);
+      it = image_classes->erase(it);
       self->ClearException();
     } else {
       ++it;
@@ -1140,10 +1132,10 @@
   // We walk the roots looking for classes so that we'll pick up the
   // above classes plus any classes them depend on such super
   // classes, interfaces, and the required ClassLinker roots.
-  RecordImageClassesVisitor visitor(image_classes_);
+  RecordImageClassesVisitor visitor(image_classes);
   class_linker->VisitClasses(&visitor);
 
-  CHECK(!image_classes_->empty());
+  CHECK(!image_classes->empty());
 }
 
 static void MaybeAddToImageClasses(Thread* self,
@@ -1312,7 +1304,8 @@
   DISALLOW_COPY_AND_ASSIGN(ClinitImageUpdate);
 };
 
-void CompilerDriver::UpdateImageClasses(TimingLogger* timings) {
+void CompilerDriver::UpdateImageClasses(TimingLogger* timings,
+                                        /*inout*/ HashSet<std::string>* image_classes) {
   if (GetCompilerOptions().IsBootImage()) {
     TimingLogger::ScopedTiming t("UpdateImageClasses", timings);
 
@@ -1324,7 +1317,7 @@
     VariableSizedHandleScope hs(Thread::Current());
     std::string error_msg;
     std::unique_ptr<ClinitImageUpdate> update(ClinitImageUpdate::Create(hs,
-                                                                        image_classes_,
+                                                                        image_classes,
                                                                         Thread::Current(),
                                                                         runtime->GetClassLinker()));
 
@@ -1393,12 +1386,6 @@
   }
 }
 
-const VerifiedMethod* CompilerDriver::GetVerifiedMethod(const DexFile* dex_file,
-                                                        uint32_t method_idx) const {
-  MethodReference ref(dex_file, method_idx);
-  return verification_results_->GetVerifiedMethod(ref);
-}
-
 bool CompilerDriver::IsSafeCast(const DexCompilationUnit* mUnit, uint32_t dex_pc) {
   if (!compiler_options_->IsVerificationEnabled()) {
     // If we didn't verify, every cast has to be treated as non-safe.
@@ -1764,7 +1751,8 @@
 
 bool CompilerDriver::FastVerify(jobject jclass_loader,
                                 const std::vector<const DexFile*>& dex_files,
-                                TimingLogger* timings) {
+                                TimingLogger* timings,
+                                /*out*/ VerificationResults* verification_results) {
   verifier::VerifierDeps* verifier_deps =
       Runtime::Current()->GetCompilerCallbacks()->GetVerifierDeps();
   // If there exist VerifierDeps that aren't the ones we just created to output, use them to verify.
@@ -1812,7 +1800,7 @@
           // - Quickening will not do checkcast ellision.
           // TODO(ngeoffray): Reconsider this once we refactor compiler filters.
           for (const ClassAccessor::Method& method : accessor.GetMethods()) {
-            verification_results_->CreateVerifiedMethodFor(method.GetReference());
+            verification_results->CreateVerifiedMethodFor(method.GetReference());
           }
         }
       } else if (!compiler_only_verifies) {
@@ -1830,8 +1818,9 @@
 
 void CompilerDriver::Verify(jobject jclass_loader,
                             const std::vector<const DexFile*>& dex_files,
-                            TimingLogger* timings) {
-  if (FastVerify(jclass_loader, dex_files, timings)) {
+                            TimingLogger* timings,
+                            /*out*/ VerificationResults* verification_results) {
+  if (FastVerify(jclass_loader, dex_files, timings, verification_results)) {
     return;
   }
 
@@ -2530,7 +2519,7 @@
     // SetVerificationAttempted so that the access flags are set. If we do not do this they get
     // changed at runtime resulting in more dirty image pages.
     // Also create conflict tables.
-    // Only useful if we are compiling an image (image_classes_ is not null).
+    // Only useful if we are compiling an image.
     ScopedObjectAccess soa(Thread::Current());
     VariableSizedHandleScope hs(soa.Self());
     InitializeArrayClassesAndCreateConflictTablesVisitor visitor(hs);
@@ -2569,8 +2558,9 @@
     ClassReference ref(&dex_file, class_def_index);
     const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
     ClassAccessor accessor(dex_file, class_def_index);
+    CompilerDriver* const driver = context.GetCompiler();
     // Skip compiling classes with generic verifier failures since they will still fail at runtime
-    if (context.GetCompiler()->GetVerificationResults()->IsClassRejected(ref)) {
+    if (driver->GetCompilerOptions().GetVerificationResults()->IsClassRejected(ref)) {
       return;
     }
     // Use a scoped object access to perform to the quick SkipClass check.
@@ -2602,8 +2592,6 @@
     // Go to native so that we don't block GC during compilation.
     ScopedThreadSuspension sts(soa.Self(), kNative);
 
-    CompilerDriver* const driver = context.GetCompiler();
-
     // Can we run DEX-to-DEX compiler on this class ?
     optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level =
         GetDexToDexCompilationLevel(soa.Self(), *driver, jclass_loader, dex_file, class_def);
@@ -2775,31 +2763,6 @@
   return compiled_method;
 }
 
-bool CompilerDriver::IsMethodVerifiedWithoutFailures(uint32_t method_idx,
-                                                     uint16_t class_def_idx,
-                                                     const DexFile& dex_file) const {
-  const VerifiedMethod* verified_method = GetVerifiedMethod(&dex_file, method_idx);
-  if (verified_method != nullptr) {
-    return !verified_method->HasVerificationFailures();
-  }
-
-  // If we can't find verification metadata, check if this is a system class (we trust that system
-  // classes have their methods verified). If it's not, be conservative and assume the method
-  // has not been verified successfully.
-
-  // TODO: When compiling the boot image it should be safe to assume that everything is verified,
-  // even if methods are not found in the verification cache.
-  const char* descriptor = dex_file.GetClassDescriptor(dex_file.GetClassDef(class_def_idx));
-  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  Thread* self = Thread::Current();
-  ScopedObjectAccess soa(self);
-  bool is_system_class = class_linker->FindSystemClass(self, descriptor) != nullptr;
-  if (!is_system_class) {
-    self->ClearException();
-  }
-  return is_system_class;
-}
-
 std::string CompilerDriver::GetMemoryUsageString(bool extended) const {
   std::ostringstream oss;
   const gc::Heap* const heap = Runtime::Current()->GetHeap();
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 714b2d1..7c0fc64 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -76,7 +76,6 @@
 class TimingLogger;
 class VdexFile;
 class VerificationResults;
-class VerifiedMethod;
 
 enum EntryPointCallingConvention {
   // ABI of invocations to a method's interpreter entry point.
@@ -95,9 +94,7 @@
   // can assume will be in the image, with null implying all available
   // classes.
   CompilerDriver(const CompilerOptions* compiler_options,
-                 VerificationResults* verification_results,
                  Compiler::Kind compiler_kind,
-                 HashSet<std::string>* image_classes,
                  size_t thread_count,
                  int swap_fd);
 
@@ -106,6 +103,17 @@
   // Set dex files classpath.
   void SetClasspathDexFiles(const std::vector<const DexFile*>& dex_files);
 
+  // Initialize and destroy thread pools. This is exposed because we do not want
+  // to do this twice, for PreCompile() and CompileAll().
+  void InitializeThreadPools();
+  void FreeThreadPools();
+
+  void PreCompile(jobject class_loader,
+                  const std::vector<const DexFile*>& dex_files,
+                  TimingLogger* timings,
+                  /*inout*/ HashSet<std::string>* image_classes,
+                  /*out*/ VerificationResults* verification_results)
+      REQUIRES(!Locks::mutator_lock_);
   void CompileAll(jobject class_loader,
                   const std::vector<const DexFile*>& dex_files,
                   TimingLogger* timings)
@@ -124,8 +132,6 @@
                   Handle<mirror::ClassLoader> h_class_loader)
       REQUIRES(!Locks::mutator_lock_);
 
-  VerificationResults* GetVerificationResults() const;
-
   const CompilerOptions& GetCompilerOptions() const {
     return *compiler_options_;
   }
@@ -194,7 +200,6 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
 
-  const VerifiedMethod* GetVerifiedMethod(const DexFile* dex_file, uint32_t method_idx) const;
   bool IsSafeCast(const DexCompilationUnit* mUnit, uint32_t dex_pc);
 
   size_t GetThreadCount() const {
@@ -219,12 +224,6 @@
 
   void RecordClassStatus(const ClassReference& ref, ClassStatus status);
 
-  // Checks if the specified method has been verified without failures. Returns
-  // false if the method is not in the verification results (GetVerificationResults).
-  bool IsMethodVerifiedWithoutFailures(uint32_t method_idx,
-                                       uint16_t class_def_idx,
-                                       const DexFile& dex_file) const;
-
   // Get memory usage during compilation.
   std::string GetMemoryUsageString(bool extended) const;
 
@@ -265,13 +264,9 @@
   }
 
  private:
-  void PreCompile(jobject class_loader,
-                  const std::vector<const DexFile*>& dex_files,
-                  TimingLogger* timings)
+  void LoadImageClasses(TimingLogger* timings, /*inout*/ HashSet<std::string>* image_classes)
       REQUIRES(!Locks::mutator_lock_);
 
-  void LoadImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_);
-
   // Attempt to resolve all type, methods, fields, and strings
   // referenced from code in the dex file following PathClassLoader
   // ordering semantics.
@@ -291,11 +286,13 @@
   // verification was successful.
   bool FastVerify(jobject class_loader,
                   const std::vector<const DexFile*>& dex_files,
-                  TimingLogger* timings);
+                  TimingLogger* timings,
+                  /*out*/ VerificationResults* verification_results);
 
   void Verify(jobject class_loader,
               const std::vector<const DexFile*>& dex_files,
-              TimingLogger* timings);
+              TimingLogger* timings,
+              /*out*/ VerificationResults* verification_results);
 
   void VerifyDexFile(jobject class_loader,
                      const DexFile& dex_file,
@@ -326,14 +323,13 @@
                          TimingLogger* timings)
       REQUIRES(!Locks::mutator_lock_);
 
-  void UpdateImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_);
+  void UpdateImageClasses(TimingLogger* timings, /*inout*/ HashSet<std::string>* image_classes)
+      REQUIRES(!Locks::mutator_lock_);
 
   void Compile(jobject class_loader,
                const std::vector<const DexFile*>& dex_files,
                TimingLogger* timings);
 
-  void InitializeThreadPools();
-  void FreeThreadPools();
   void CheckThreadPools();
 
   // Resolve const string literals that are loaded from dex code. If only_startup_strings is
@@ -343,7 +339,6 @@
                            /*inout*/ TimingLogger* timings);
 
   const CompilerOptions* const compiler_options_;
-  VerificationResults* const verification_results_;
 
   std::unique_ptr<Compiler> compiler_;
   Compiler::Kind compiler_kind_;
@@ -359,12 +354,6 @@
   // All method references that this compiler has compiled.
   MethodTable compiled_methods_;
 
-  // Image classes to be updated by PreCompile().
-  // TODO: Remove this member which is a non-const pointer to the CompilerOptions' data.
-  //       Pass this explicitly to the PreCompile() which should be called directly from
-  //       Dex2Oat rather than implicitly by CompileAll().
-  HashSet<std::string>* image_classes_;
-
   std::atomic<uint32_t> number_of_soft_verifier_failures_;
 
   bool had_hard_verifier_failure_;
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index fe1568d..b924129 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -42,20 +42,18 @@
 
 class CompilerDriverTest : public CommonCompilerTest {
  protected:
-  void CompileAll(jobject class_loader) REQUIRES(!Locks::mutator_lock_) {
-    TimingLogger timings("CompilerDriverTest::CompileAll", false, false);
-    TimingLogger::ScopedTiming t(__FUNCTION__, &timings);
+  void CompileAllAndMakeExecutable(jobject class_loader) REQUIRES(!Locks::mutator_lock_) {
+    TimingLogger timings("CompilerDriverTest::CompileAllAndMakeExecutable", false, false);
     dex_files_ = GetDexFiles(class_loader);
-    SetDexFilesForOatFile(dex_files_);
-    compiler_driver_->CompileAll(class_loader, dex_files_, &timings);
-    t.NewTiming("MakeAllExecutable");
+    CompileAll(class_loader, dex_files_, &timings);
+    TimingLogger::ScopedTiming t("MakeAllExecutable", &timings);
     MakeAllExecutable(class_loader);
   }
 
   void EnsureCompiled(jobject class_loader, const char* class_name, const char* method,
                       const char* signature, bool is_virtual)
       REQUIRES(!Locks::mutator_lock_) {
-    CompileAll(class_loader);
+    CompileAllAndMakeExecutable(class_loader);
     Thread::Current()->TransitionFromSuspendedToRunnable();
     bool started = runtime_->Start();
     CHECK(started);
@@ -106,7 +104,7 @@
 // Disabled due to 10 second runtime on host
 // TODO: Update the test for hash-based dex cache arrays. Bug: 30627598
 TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) {
-  CompileAll(nullptr);
+  CompileAllAndMakeExecutable(nullptr);
 
   // All libcore references should resolve
   ScopedObjectAccess soa(Thread::Current());
@@ -266,7 +264,7 @@
     ASSERT_TRUE(dex_file->EnableWrite());
   }
 
-  CompileAll(class_loader);
+  CompileAllAndMakeExecutable(class_loader);
 
   std::unordered_set<std::string> m = GetExpectedMethodsForClass("Main");
   std::unordered_set<std::string> s = GetExpectedMethodsForClass("Second");
@@ -310,7 +308,7 @@
   }
   ASSERT_NE(class_loader, nullptr);
 
-  CompileAll(class_loader);
+  CompileAllAndMakeExecutable(class_loader);
 
   CheckVerifiedClass(class_loader, "LMain;");
   CheckVerifiedClass(class_loader, "LSecond;");
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index b28c7e0..8d1ae3d 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -24,9 +24,14 @@
 #include "arch/instruction_set_features.h"
 #include "base/runtime_debug.h"
 #include "base/variant_map.h"
+#include "class_linker.h"
 #include "cmdline_parser.h"
 #include "compiler_options_map-inl.h"
+#include "dex/dex_file-inl.h"
+#include "dex/verification_results.h"
+#include "dex/verified_method.h"
 #include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
 #include "simple_compiler_options_map.h"
 
 namespace art {
@@ -44,6 +49,7 @@
       no_inline_from_(),
       dex_files_for_oat_file_(),
       image_classes_(),
+      verification_results_(nullptr),
       image_type_(ImageType::kNone),
       compiling_with_core_image_(false),
       baseline_(false),
@@ -141,4 +147,40 @@
   return image_classes_.find(StringPiece(descriptor)) != image_classes_.end();
 }
 
+const VerificationResults* CompilerOptions::GetVerificationResults() const {
+  DCHECK(Runtime::Current()->IsAotCompiler());
+  return verification_results_;
+}
+
+const VerifiedMethod* CompilerOptions::GetVerifiedMethod(const DexFile* dex_file,
+                                                         uint32_t method_idx) const {
+  MethodReference ref(dex_file, method_idx);
+  return verification_results_->GetVerifiedMethod(ref);
+}
+
+bool CompilerOptions::IsMethodVerifiedWithoutFailures(uint32_t method_idx,
+                                                      uint16_t class_def_idx,
+                                                      const DexFile& dex_file) const {
+  const VerifiedMethod* verified_method = GetVerifiedMethod(&dex_file, method_idx);
+  if (verified_method != nullptr) {
+    return !verified_method->HasVerificationFailures();
+  }
+
+  // If we can't find verification metadata, check if this is a system class (we trust that system
+  // classes have their methods verified). If it's not, be conservative and assume the method
+  // has not been verified successfully.
+
+  // TODO: When compiling the boot image it should be safe to assume that everything is verified,
+  // even if methods are not found in the verification cache.
+  const char* descriptor = dex_file.GetClassDescriptor(dex_file.GetClassDef(class_def_idx));
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  Thread* self = Thread::Current();
+  ScopedObjectAccess soa(self);
+  bool is_system_class = class_linker->FindSystemClass(self, descriptor) != nullptr;
+  if (!is_system_class) {
+    self->ClearException();
+  }
+  return is_system_class;
+}
+
 }  // namespace art
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 17a779c..bd12bf7 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -39,10 +39,16 @@
 class VerifierDepsTest;
 }  // namespace verifier
 
+namespace linker {
+class Arm64RelativePatcherTest;
+}  // namespace linker
+
 class DexFile;
 enum class InstructionSet;
 class InstructionSetFeatures;
 class ProfileCompilationInfo;
+class VerificationResults;
+class VerifiedMethod;
 
 // Enum for CheckProfileMethodsCompiled. Outside CompilerOptions so it can be forward-declared.
 enum class ProfileMethodsCheck : uint8_t {
@@ -279,6 +285,16 @@
 
   bool IsImageClass(const char* descriptor) const;
 
+  const VerificationResults* GetVerificationResults() const;
+
+  const VerifiedMethod* GetVerifiedMethod(const DexFile* dex_file, uint32_t method_idx) const;
+
+  // Checks if the specified method has been verified without failures. Returns
+  // false if the method is not in the verification results (GetVerificationResults).
+  bool IsMethodVerifiedWithoutFailures(uint32_t method_idx,
+                                       uint16_t class_def_idx,
+                                       const DexFile& dex_file) const;
+
   bool ParseCompilerOptions(const std::vector<std::string>& options,
                             bool ignore_unrecognized,
                             std::string* error_msg);
@@ -377,6 +393,9 @@
   // Must not be empty for real boot image, only for tests pretending to compile boot image.
   HashSet<std::string> image_classes_;
 
+  // Results of AOT verification.
+  const VerificationResults* verification_results_;
+
   ImageType image_type_;
   bool compiling_with_core_image_;
   bool baseline_;
@@ -450,6 +469,7 @@
   friend class CommonCompilerTest;
   friend class jit::JitCompiler;
   friend class verifier::VerifierDepsTest;
+  friend class linker::Arm64RelativePatcherTest;
 
   template <class Base>
   friend bool ReadCompilerOptions(Base& map, CompilerOptions* options, std::string* error_msg);
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 0eab835..e57bbfa 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -126,11 +126,11 @@
 }
 
 extern "C" bool jit_compile_method(
-    void* handle, ArtMethod* method, Thread* self, bool osr)
+    void* handle, ArtMethod* method, Thread* self, bool baseline, bool osr)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
   DCHECK(jit_compiler != nullptr);
-  return jit_compiler->CompileMethod(self, method, osr);
+  return jit_compiler->CompileMethod(self, method, baseline, osr);
 }
 
 extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t count)
@@ -142,10 +142,11 @@
     const ArrayRef<mirror::Class*> types_array(types, count);
     std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForClasses(
         kRuntimeISA, compiler_options.GetInstructionSetFeatures(), types_array);
-    MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
     // We never free debug info for types, so we don't need to provide a handle
     // (which would have been otherwise used as identifier to remove it later).
-    AddNativeDebugInfoForJit(nullptr /* handle */, elf_file);
+    AddNativeDebugInfoForJit(Thread::Current(),
+                             /*code_ptr=*/ nullptr,
+                             elf_file);
   }
 }
 
@@ -167,9 +168,7 @@
 
   compiler_driver_.reset(new CompilerDriver(
       compiler_options_.get(),
-      /* verification_results */ nullptr,
       Compiler::kOptimizing,
-      /* image_classes */ nullptr,
       /* thread_count */ 1,
       /* swap_fd */ -1));
   // Disable dedupe so we can remove compiled methods.
@@ -182,7 +181,7 @@
   }
 }
 
-bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool osr) {
+bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool baseline, bool osr) {
   SCOPED_TRACE << "JIT compiling " << method->PrettyMethod();
 
   DCHECK(!method->IsProxyMethod());
@@ -199,7 +198,7 @@
     TimingLogger::ScopedTiming t2("Compiling", &logger);
     JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache();
     success = compiler_driver_->GetCompiler()->JitCompile(
-        self, code_cache, method, /* baseline= */ false, osr, jit_logger_.get());
+        self, code_cache, method, baseline, osr, jit_logger_.get());
   }
 
   // Trim maps to reduce memory usage.
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index d201611..29d2761 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -37,7 +37,7 @@
   virtual ~JitCompiler();
 
   // Compilation entrypoint. Returns whether the compilation succeeded.
-  bool CompileMethod(Thread* self, ArtMethod* method, bool osr)
+  bool CompileMethod(Thread* self, ArtMethod* method, bool baseline, bool osr)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   const CompilerOptions& GetCompilerOptions() const {
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 92b9543..bd4304c 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -1300,15 +1300,15 @@
   EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass));
   EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj1));
   EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj2));
-  EXPECT_EQ(0x12345678ABCDEF88ll, val1);
-  EXPECT_EQ(0x7FEDCBA987654321ll, val2);
+  EXPECT_EQ(0x12345678ABCDEF88LL, val1);
+  EXPECT_EQ(0x7FEDCBA987654321LL, val2);
   return 42;
 }
 
 void JniCompilerTest::GetTextImpl() {
   SetUpForTest(true, "getText", "(JLjava/lang/Object;JLjava/lang/Object;)I",
                CURRENT_JNI_WRAPPER(my_gettext));
-  jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 0x12345678ABCDEF88ll, jobj_,
+  jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 0x12345678ABCDEF88LL, jobj_,
                                           INT64_C(0x7FEDCBA987654321), jobj_);
   EXPECT_EQ(result, 42);
 }
diff --git a/compiler/linker/elf_builder.h b/compiler/linker/elf_builder.h
index 81ecc17..44f3296 100644
--- a/compiler/linker/elf_builder.h
+++ b/compiler/linker/elf_builder.h
@@ -18,6 +18,7 @@
 #define ART_COMPILER_LINKER_ELF_BUILDER_H_
 
 #include <vector>
+#include <deque>
 
 #include "arch/instruction_set.h"
 #include "arch/mips/instruction_set_features_mips.h"
@@ -357,57 +358,49 @@
     }
 
     // Buffer symbol for this section.  It will be written later.
-    // If the symbol's section is null, it will be considered absolute (SHN_ABS).
-    // (we use this in JIT to reference code which is stored outside the debug ELF file)
     void Add(Elf_Word name,
              const Section* section,
              Elf_Addr addr,
              Elf_Word size,
              uint8_t binding,
              uint8_t type) {
-      Elf_Word section_index;
-      if (section != nullptr) {
-        DCHECK_LE(section->GetAddress(), addr);
-        DCHECK_LE(addr, section->GetAddress() + section->header_.sh_size);
-        section_index = section->GetSectionIndex();
-      } else {
-        section_index = static_cast<Elf_Word>(SHN_ABS);
-      }
-      Add(name, section_index, addr, size, binding, type);
-    }
-
-    // Buffer symbol for this section.  It will be written later.
-    void Add(Elf_Word name,
-             Elf_Word section_index,
-             Elf_Addr addr,
-             Elf_Word size,
-             uint8_t binding,
-             uint8_t type) {
       Elf_Sym sym = Elf_Sym();
       sym.st_name = name;
       sym.st_value = addr;
       sym.st_size = size;
       sym.st_other = 0;
-      sym.st_shndx = section_index;
       sym.st_info = (binding << 4) + (type & 0xf);
-      syms_.push_back(sym);
+      Add(sym, section);
+    }
+
+    // Buffer symbol for this section.  It will be written later.
+    void Add(Elf_Sym sym, const Section* section) {
+      DCHECK(section != nullptr);
+      DCHECK_LE(section->GetAddress(), sym.st_value);
+      DCHECK_LE(sym.st_value, section->GetAddress() + section->header_.sh_size);
+      sym.st_shndx = section->GetSectionIndex();
 
       // The sh_info file must be set to index one-past the last local symbol.
-      if (binding == STB_LOCAL) {
-        this->header_.sh_info = syms_.size();
+      if (sym.getBinding() == STB_LOCAL) {
+        DCHECK_EQ(syms_.back().getBinding(), STB_LOCAL);
+        this->header_.sh_info = syms_.size() + 1;
       }
+
+      syms_.push_back(sym);
     }
 
     Elf_Word GetCacheSize() { return syms_.size() * sizeof(Elf_Sym); }
 
     void WriteCachedSection() {
       this->Start();
-      this->WriteFully(syms_.data(), syms_.size() * sizeof(Elf_Sym));
+      for (; !syms_.empty(); syms_.pop_front()) {
+        this->WriteFully(&syms_.front(), sizeof(Elf_Sym));
+      }
       this->End();
     }
 
    private:
-    std::vector<Elf_Sym> syms_;  // Buffered/cached content of the whole section.
+    std::deque<Elf_Sym> syms_;  // Buffered/cached content of the whole section.
   };
 
   class AbiflagsSection final : public Section {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index bbf167d..9e2fd9e 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -885,7 +885,8 @@
       location_builder_(graph, this),
       instruction_visitor_(graph, this),
       move_resolver_(graph->GetAllocator(), this),
-      assembler_(graph->GetAllocator()),
+      assembler_(graph->GetAllocator(),
+                 compiler_options.GetInstructionSetFeatures()->AsArm64InstructionSetFeatures()),
       uint32_literals_(std::less<uint32_t>(),
                        graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       uint64_literals_(std::less<uint64_t>(),
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index f186191..b5a7c13 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -823,6 +823,33 @@
   InternalCodeAllocator code_allocator;
   codegen.Finalize(&code_allocator);
 }
+
+// Check that ART ISA Features are propagated to VIXL for arm64 (using cortex-a75 as example).
+TEST_F(CodegenTest, ARM64IsaVIXLFeaturesA75) {
+  OverrideInstructionSetFeatures(InstructionSet::kArm64, "cortex-a75");
+  HGraph* graph = CreateGraph();
+  arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_);
+  vixl::CPUFeatures* features = codegen.GetVIXLAssembler()->GetCPUFeatures();
+
+  EXPECT_TRUE(features->Has(vixl::CPUFeatures::kCRC32));
+  EXPECT_TRUE(features->Has(vixl::CPUFeatures::kDotProduct));
+  EXPECT_TRUE(features->Has(vixl::CPUFeatures::kFPHalf));
+  EXPECT_TRUE(features->Has(vixl::CPUFeatures::kAtomics));
+}
+
+// Check that ART ISA Features are propagated to VIXL for arm64 (using cortex-a53 as example).
+TEST_F(CodegenTest, ARM64IsaVIXLFeaturesA53) {
+  OverrideInstructionSetFeatures(InstructionSet::kArm64, "cortex-a53");
+  HGraph* graph = CreateGraph();
+  arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_);
+  vixl::CPUFeatures* features = codegen.GetVIXLAssembler()->GetCPUFeatures();
+
+  EXPECT_TRUE(features->Has(vixl::CPUFeatures::kCRC32));
+  EXPECT_FALSE(features->Has(vixl::CPUFeatures::kDotProduct));
+  EXPECT_FALSE(features->Has(vixl::CPUFeatures::kFPHalf));
+  EXPECT_FALSE(features->Has(vixl::CPUFeatures::kAtomics));
+}
+
 #endif
 
 #ifdef ART_ENABLE_CODEGEN_mips
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index ec93222..417d794 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -28,7 +28,6 @@
 #include "dex/inline_method_analyser.h"
 #include "dex/verification_results.h"
 #include "dex/verified_method.h"
-#include "driver/compiler_driver-inl.h"
 #include "driver/compiler_options.h"
 #include "driver/dex_compilation_unit.h"
 #include "instruction_simplifier.h"
@@ -408,7 +407,7 @@
   return single_impl;
 }
 
-static bool IsMethodUnverified(CompilerDriver* const compiler_driver, ArtMethod* method)
+static bool IsMethodUnverified(const CompilerOptions& compiler_options, ArtMethod* method)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   if (!method->GetDeclaringClass()->IsVerified()) {
     if (Runtime::Current()->UseJitCompilation()) {
@@ -417,8 +416,9 @@
       return true;
     }
     uint16_t class_def_idx = method->GetDeclaringClass()->GetDexClassDefIndex();
-    if (!compiler_driver->IsMethodVerifiedWithoutFailures(
-        method->GetDexMethodIndex(), class_def_idx, *method->GetDexFile())) {
+    if (!compiler_options.IsMethodVerifiedWithoutFailures(method->GetDexMethodIndex(),
+                                                          class_def_idx,
+                                                          *method->GetDexFile())) {
       // Method has soft or hard failures, don't analyze.
       return true;
     }
@@ -426,11 +426,11 @@
   return false;
 }
 
-static bool AlwaysThrows(CompilerDriver* const compiler_driver, ArtMethod* method)
+static bool AlwaysThrows(const CompilerOptions& compiler_options, ArtMethod* method)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   DCHECK(method != nullptr);
   // Skip non-compilable and unverified methods.
-  if (!method->IsCompilable() || IsMethodUnverified(compiler_driver, method)) {
+  if (!method->IsCompilable() || IsMethodUnverified(compiler_options, method)) {
     return false;
   }
   // Skip native methods, methods with try blocks, and methods that are too large.
@@ -518,7 +518,7 @@
           MaybeRecordStat(stats_, MethodCompilationStat::kInlinedInvokeVirtualOrInterface);
         }
       }
-    } else if (!cha_devirtualize && AlwaysThrows(compiler_driver_, actual_method)) {
+    } else if (!cha_devirtualize && AlwaysThrows(codegen_->GetCompilerOptions(), actual_method)) {
       // Set always throws property for non-inlined method call with single target
       // (unless it was obtained through CHA, because that would imply we have
       // to add the CHA dependency, which seems not worth it).
@@ -1500,7 +1500,7 @@
     return false;
   }
 
-  if (IsMethodUnverified(compiler_driver_, method)) {
+  if (IsMethodUnverified(codegen_->GetCompilerOptions(), method)) {
     LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedNotVerified)
         << "Method " << method->PrettyMethod()
         << " couldn't be verified, so it cannot be inlined";
@@ -2069,7 +2069,6 @@
                    codegen_,
                    outer_compilation_unit_,
                    dex_compilation_unit,
-                   compiler_driver_,
                    handles_,
                    inline_stats_,
                    total_number_of_dex_registers_ + accessor.RegistersSize(),
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 6fd0c20..8ac2163 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -38,7 +38,6 @@
            CodeGenerator* codegen,
            const DexCompilationUnit& outer_compilation_unit,
            const DexCompilationUnit& caller_compilation_unit,
-           CompilerDriver* compiler_driver,
            VariableSizedHandleScope* handles,
            OptimizingCompilerStats* stats,
            size_t total_number_of_dex_registers,
@@ -51,7 +50,6 @@
         outer_compilation_unit_(outer_compilation_unit),
         caller_compilation_unit_(caller_compilation_unit),
         codegen_(codegen),
-        compiler_driver_(compiler_driver),
         total_number_of_dex_registers_(total_number_of_dex_registers),
         total_number_of_instructions_(total_number_of_instructions),
         parent_(parent),
@@ -280,7 +278,6 @@
   const DexCompilationUnit& outer_compilation_unit_;
   const DexCompilationUnit& caller_compilation_unit_;
   CodeGenerator* const codegen_;
-  CompilerDriver* const compiler_driver_;
   const size_t total_number_of_dex_registers_;
   size_t total_number_of_instructions_;
 
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 5bd11226..50b13c8 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -243,7 +243,8 @@
 // compilation.
 #define UNREACHABLE_INTRINSIC(Arch, Name)                                \
 void IntrinsicLocationsBuilder ## Arch::Visit ## Name(HInvoke* invoke) { \
-  if (!codegen_->GetCompilerOptions().IsBaseline()) {                    \
+  if (Runtime::Current()->IsAotCompiler() &&                             \
+      !codegen_->GetCompilerOptions().IsBaseline()) {                    \
     LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic()    \
                << " should have been converted to HIR";                  \
   }                                                                      \
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 6d04b0e..1688ea7 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -2950,6 +2950,151 @@
   __ Mvn(out, out);
 }
 
+// The threshold for sizes of arrays to use the library provided implementation
+// of CRC32.updateBytes instead of the intrinsic.
+static constexpr int32_t kCRC32UpdateBytesThreshold = 64 * 1024;
+
+void IntrinsicLocationsBuilderARM64::VisitCRC32UpdateBytes(HInvoke* invoke) {
+  if (!codegen_->GetInstructionSetFeatures().HasCRC()) {
+    return;
+  }
+
+  LocationSummary* locations
+      = new (allocator_) LocationSummary(invoke,
+                                         LocationSummary::kCallOnSlowPath,
+                                         kIntrinsified);
+
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetInAt(2, Location::RegisterOrConstant(invoke->InputAt(2)));
+  locations->SetInAt(3, Location::RequiresRegister());
+  locations->AddTemp(Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister());
+}
+
+// Lower the invoke of CRC32.updateBytes(int crc, byte[] b, int off, int len)
+//
+// Note: The intrinsic is not used if len exceeds a threshold.
+void IntrinsicCodeGeneratorARM64::VisitCRC32UpdateBytes(HInvoke* invoke) {
+  DCHECK(codegen_->GetInstructionSetFeatures().HasCRC());
+
+  auto masm = GetVIXLAssembler();
+  auto locations = invoke->GetLocations();
+
+  auto slow_path =
+    new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
+  codegen_->AddSlowPath(slow_path);
+
+  Register length = WRegisterFrom(locations->InAt(3));
+  __ Cmp(length, kCRC32UpdateBytesThreshold);
+  __ B(slow_path->GetEntryLabel(), hi);
+
+  const uint32_t array_data_offset =
+      mirror::Array::DataOffset(Primitive::kPrimByte).Uint32Value();
+  Register ptr = XRegisterFrom(locations->GetTemp(0));
+  Register array = XRegisterFrom(locations->InAt(1));
+  auto offset = locations->InAt(2);
+  if (offset.IsConstant()) {
+    int32_t offset_value = offset.GetConstant()->AsIntConstant()->GetValue();
+    __ Add(ptr, array, array_data_offset + offset_value);
+  } else {
+    __ Add(ptr, array, array_data_offset);
+    __ Add(ptr, ptr, XRegisterFrom(offset));
+  }
+
+  // The algorithm of CRC32 of bytes is:
+  //   crc = ~crc
+  //   process a few first bytes to make the array 8-byte aligned
+  //   while array has 8 bytes do:
+  //     crc = crc32_of_8bytes(crc, 8_bytes(array))
+  //   if array has 4 bytes:
+  //     crc = crc32_of_4bytes(crc, 4_bytes(array))
+  //   if array has 2 bytes:
+  //     crc = crc32_of_2bytes(crc, 2_bytes(array))
+  //   if array has a byte:
+  //     crc = crc32_of_byte(crc, 1_byte(array))
+  //   crc = ~crc
+
+  vixl::aarch64::Label loop, done;
+  vixl::aarch64::Label process_4bytes, process_2bytes, process_1byte;
+  vixl::aarch64::Label aligned2, aligned4, aligned8;
+
+  // Use VIXL scratch registers as the VIXL macro assembler won't use them in
+  // instructions below.
+  UseScratchRegisterScope temps(masm);
+  Register len = temps.AcquireW();
+  Register array_elem = temps.AcquireW();
+
+  Register out = WRegisterFrom(locations->Out());
+  __ Mvn(out, WRegisterFrom(locations->InAt(0)));
+  __ Mov(len, length);
+
+  __ Tbz(ptr, 0, &aligned2);
+  __ Subs(len, len, 1);
+  __ B(&done, lo);
+  __ Ldrb(array_elem, MemOperand(ptr, 1, PostIndex));
+  __ Crc32b(out, out, array_elem);
+
+  __ Bind(&aligned2);
+  __ Tbz(ptr, 1, &aligned4);
+  __ Subs(len, len, 2);
+  __ B(&process_1byte, lo);
+  __ Ldrh(array_elem, MemOperand(ptr, 2, PostIndex));
+  __ Crc32h(out, out, array_elem);
+
+  __ Bind(&aligned4);
+  __ Tbz(ptr, 2, &aligned8);
+  __ Subs(len, len, 4);
+  __ B(&process_2bytes, lo);
+  __ Ldr(array_elem, MemOperand(ptr, 4, PostIndex));
+  __ Crc32w(out, out, array_elem);
+
+  __ Bind(&aligned8);
+  __ Subs(len, len, 8);
+  // If len < 8 go to process data by 4 bytes, 2 bytes and a byte.
+  __ B(&process_4bytes, lo);
+
+  // The main loop processing data by 8 bytes.
+  __ Bind(&loop);
+  __ Ldr(array_elem.X(), MemOperand(ptr, 8, PostIndex));
+  __ Subs(len, len, 8);
+  __ Crc32x(out, out, array_elem.X());
+  // if len >= 8, process the next 8 bytes.
+  __ B(&loop, hs);
+
+  // Process the data which is less than 8 bytes.
+  // The code generated below works with values of len
+  // which come in the range [-8, 0].
+  // The first three bits are used to detect whether 4 bytes or 2 bytes or
+  // a byte can be processed.
+  // The checking order is from bit 2 to bit 0:
+  //  bit 2 is set: at least 4 bytes available
+  //  bit 1 is set: at least 2 bytes available
+  //  bit 0 is set: at least a byte available
+  __ Bind(&process_4bytes);
+  // Goto process_2bytes if less than four bytes available
+  __ Tbz(len, 2, &process_2bytes);
+  __ Ldr(array_elem, MemOperand(ptr, 4, PostIndex));
+  __ Crc32w(out, out, array_elem);
+
+  __ Bind(&process_2bytes);
+  // Goto process_1bytes if less than two bytes available
+  __ Tbz(len, 1, &process_1byte);
+  __ Ldrh(array_elem, MemOperand(ptr, 2, PostIndex));
+  __ Crc32h(out, out, array_elem);
+
+  __ Bind(&process_1byte);
+  // Goto done if no bytes available
+  __ Tbz(len, 0, &done);
+  __ Ldrb(array_elem, MemOperand(ptr));
+  __ Crc32b(out, out, array_elem);
+
+  __ Bind(&done);
+  __ Mvn(out, out);
+
+  __ Bind(slow_path->GetExitLabel());
+}
+
 UNIMPLEMENTED_INTRINSIC(ARM64, ReferenceGetReferent)
 
 UNIMPLEMENTED_INTRINSIC(ARM64, StringStringIndexOf);
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 4d45a99..88f1457 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -3060,6 +3060,7 @@
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, SystemArrayCopyChar)
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, ReferenceGetReferent)
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, CRC32Update)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, CRC32UpdateBytes)
 
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOf);
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 21fb7d7..08ba0a0 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2697,6 +2697,7 @@
 UNIMPLEMENTED_INTRINSIC(MIPS, SystemArrayCopy)
 
 UNIMPLEMENTED_INTRINSIC(MIPS, CRC32Update)
+UNIMPLEMENTED_INTRINSIC(MIPS, CRC32UpdateBytes)
 
 UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOf);
 UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 4b86f5d..59d3ba2 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -2347,6 +2347,7 @@
 UNIMPLEMENTED_INTRINSIC(MIPS64, ReferenceGetReferent)
 UNIMPLEMENTED_INTRINSIC(MIPS64, SystemArrayCopy)
 UNIMPLEMENTED_INTRINSIC(MIPS64, CRC32Update)
+UNIMPLEMENTED_INTRINSIC(MIPS64, CRC32UpdateBytes)
 
 UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOf);
 UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index a73f4e8..1d94950 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -3071,6 +3071,7 @@
 UNIMPLEMENTED_INTRINSIC(X86, IntegerHighestOneBit)
 UNIMPLEMENTED_INTRINSIC(X86, LongHighestOneBit)
 UNIMPLEMENTED_INTRINSIC(X86, CRC32Update)
+UNIMPLEMENTED_INTRINSIC(X86, CRC32UpdateBytes)
 
 UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOf);
 UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 88c766f..4f0b61d 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -2738,6 +2738,7 @@
 UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite)
 UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite)
 UNIMPLEMENTED_INTRINSIC(X86_64, CRC32Update)
+UNIMPLEMENTED_INTRINSIC(X86_64, CRC32UpdateBytes)
 
 UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOf);
 UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 6108522..13c8684 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -26,6 +26,7 @@
 #include "base/arena_object.h"
 #include "base/array_ref.h"
 #include "base/iteration_range.h"
+#include "base/mutex.h"
 #include "base/quasi_atomic.h"
 #include "base/stl_util.h"
 #include "base/transform_array_ref.h"
diff --git a/compiler/optimizing/optimization.cc b/compiler/optimizing/optimization.cc
index 0f971e1..b75afad 100644
--- a/compiler/optimizing/optimization.cc
+++ b/compiler/optimizing/optimization.cc
@@ -181,7 +181,6 @@
     HGraph* graph,
     OptimizingCompilerStats* stats,
     CodeGenerator* codegen,
-    CompilerDriver* driver,
     const DexCompilationUnit& dex_compilation_unit,
     VariableSizedHandleScope* handles) {
   ArenaVector<HOptimization*> optimizations(allocator->Adapter());
@@ -258,7 +257,6 @@
                                        codegen,
                                        dex_compilation_unit,    // outer_compilation_unit
                                        dex_compilation_unit,    // outermost_compilation_unit
-                                       driver,
                                        handles,
                                        stats,
                                        accessor.RegistersSize(),
diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h
index 490007d..ce44b5f 100644
--- a/compiler/optimizing/optimization.h
+++ b/compiler/optimizing/optimization.h
@@ -147,7 +147,6 @@
     HGraph* graph,
     OptimizingCompilerStats* stats,
     CodeGenerator* codegen,
-    CompilerDriver* driver,
     const DexCompilationUnit& dex_compilation_unit,
     VariableSizedHandleScope* handles);
 
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 1d3fcf3..c9b4d36 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -321,7 +321,6 @@
         graph,
         compilation_stats_.get(),
         codegen,
-        GetCompilerDriver(),
         dex_compilation_unit,
         handles);
     DCHECK_EQ(length, optimizations.size());
@@ -962,9 +961,9 @@
       arena_stack,
       dex_file,
       method_idx,
-      compiler_driver->GetCompilerOptions().GetInstructionSet(),
+      compiler_options.GetInstructionSet(),
       kInvalidInvokeType,
-      compiler_driver->GetCompilerOptions().GetDebuggable(),
+      compiler_options.GetDebuggable(),
       /* osr */ false);
 
   DCHECK(Runtime::Current()->IsAotCompiler());
@@ -1043,12 +1042,13 @@
                                             const DexFile& dex_file,
                                             Handle<mirror::DexCache> dex_cache) const {
   CompilerDriver* compiler_driver = GetCompilerDriver();
+  const CompilerOptions& compiler_options = compiler_driver->GetCompilerOptions();
   CompiledMethod* compiled_method = nullptr;
   Runtime* runtime = Runtime::Current();
   DCHECK(runtime->IsAotCompiler());
-  const VerifiedMethod* verified_method = compiler_driver->GetVerifiedMethod(&dex_file, method_idx);
+  const VerifiedMethod* verified_method = compiler_options.GetVerifiedMethod(&dex_file, method_idx);
   DCHECK(!verified_method->HasRuntimeThrow());
-  if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file) ||
+  if (compiler_options.IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file) ||
       verifier::CanCompilerHandleVerificationFailure(
           verified_method->GetEncounteredVerificationFailures())) {
     ArenaAllocator allocator(runtime->GetArenaPool());
@@ -1080,7 +1080,7 @@
       // Go to native so that we don't block GC during compilation.
       ScopedThreadSuspension sts(soa.Self(), kNative);
       if (method != nullptr && UNLIKELY(method->IsIntrinsic())) {
-        DCHECK(compiler_driver->GetCompilerOptions().IsBootImage());
+        DCHECK(compiler_options.IsBootImage());
         codegen.reset(
             TryCompileIntrinsic(&allocator,
                                 &arena_stack,
@@ -1099,7 +1099,7 @@
                        &code_allocator,
                        dex_compilation_unit,
                        method,
-                       compiler_driver->GetCompilerOptions().IsBaseline(),
+                       compiler_options.IsBaseline(),
                        /* osr= */ false,
                        &handles));
       }
@@ -1128,7 +1128,7 @@
     }
   } else {
     MethodCompilationStat method_stat;
-    if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) {
+    if (compiler_options.VerifyAtRuntime()) {
       method_stat = MethodCompilationStat::kNotCompiledVerifyAtRuntime;
     } else {
       method_stat = MethodCompilationStat::kNotCompiledVerificationError;
@@ -1137,8 +1137,8 @@
   }
 
   if (kIsDebugBuild &&
-      compiler_driver->GetCompilerOptions().CompilingWithCoreImage() &&
-      IsInstructionSetSupported(compiler_driver->GetCompilerOptions().GetInstructionSet())) {
+      compiler_options.CompilingWithCoreImage() &&
+      IsInstructionSetSupported(compiler_options.GetInstructionSet())) {
     // For testing purposes, we put a special marker on method names
     // that should be compiled with this compiler (when the
     // instruction set is supported). This makes sure we're not
@@ -1469,14 +1469,15 @@
       compiler_options.GetInstructionSet(),
       compiler_options.GetInstructionSetFeatures(),
       mini_debug_info,
-      ArrayRef<const debug::MethodDebugInfo>(&info, 1));
-  MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
-  AddNativeDebugInfoForJit(reinterpret_cast<const void*>(info.code_address), elf_file);
+      info);
+  AddNativeDebugInfoForJit(Thread::Current(),
+                           reinterpret_cast<const void*>(info.code_address),
+                           elf_file);
 
   VLOG(jit)
       << "JIT mini-debug-info added for " << ArtMethod::PrettyMethod(method)
       << " size=" << PrettySize(elf_file.size())
-      << " total_size=" << PrettySize(GetJitNativeDebugInfoMemUsage());
+      << " total_size=" << PrettySize(GetJitMiniDebugInfoMemUsage());
 }
 
 }  // namespace art
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index c83fd44..d7ade05 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#include "arch/arm64/instruction_set_features_arm64.h"
 #include "assembler_arm64.h"
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "heap_poisoning.h"
@@ -31,6 +32,37 @@
 #define ___   vixl_masm_.
 #endif
 
+// Sets vixl::CPUFeatures according to ART instruction set features.
+static void SetVIXLCPUFeaturesFromART(vixl::aarch64::MacroAssembler* vixl_masm_,
+                                      const Arm64InstructionSetFeatures* art_features) {
+  // Retrieve already initialized default features of vixl.
+  vixl::CPUFeatures* features = vixl_masm_->GetCPUFeatures();
+
+  DCHECK(features->Has(vixl::CPUFeatures::kFP));
+  DCHECK(features->Has(vixl::CPUFeatures::kNEON));
+  DCHECK(art_features != nullptr);
+  if (art_features->HasCRC()) {
+    features->Combine(vixl::CPUFeatures::kCRC32);
+  }
+  if (art_features->HasDotProd()) {
+    features->Combine(vixl::CPUFeatures::kDotProduct);
+  }
+  if (art_features->HasFP16()) {
+    features->Combine(vixl::CPUFeatures::kFPHalf);
+  }
+  if (art_features->HasLSE()) {
+    features->Combine(vixl::CPUFeatures::kAtomics);
+  }
+}
+
+Arm64Assembler::Arm64Assembler(ArenaAllocator* allocator,
+                               const Arm64InstructionSetFeatures* art_features)
+    : Assembler(allocator) {
+  if (art_features != nullptr) {
+    SetVIXLCPUFeaturesFromART(&vixl_masm_, art_features);
+  }
+}
+
 void Arm64Assembler::FinalizeCode() {
   ___ FinalizeCode();
 }
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 74537dd..fdecab8 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -37,6 +37,9 @@
 #pragma GCC diagnostic pop
 
 namespace art {
+
+class Arm64InstructionSetFeatures;
+
 namespace arm64 {
 
 #define MEM_OP(...)      vixl::aarch64::MemOperand(__VA_ARGS__)
@@ -63,7 +66,8 @@
 
 class Arm64Assembler final : public Assembler {
  public:
-  explicit Arm64Assembler(ArenaAllocator* allocator) : Assembler(allocator) {}
+  explicit Arm64Assembler(
+      ArenaAllocator* allocator, const Arm64InstructionSetFeatures* features = nullptr);
 
   virtual ~Arm64Assembler() {}
 
diff --git a/compiler/utils/assembler_test_base.h b/compiler/utils/assembler_test_base.h
index 778a015..5fa0b3c 100644
--- a/compiler/utils/assembler_test_base.h
+++ b/compiler/utils/assembler_test_base.h
@@ -59,12 +59,12 @@
       disassembler_cmd_name_(disasm),
       disassembler_parameters_(disasm_params) {
     // Fake a runtime test for ScratchFile
-    CommonRuntimeTest::SetUpAndroidData(android_data_);
+    CommonRuntimeTest::SetUpAndroidDataDir(android_data_);
   }
 
   virtual ~AssemblerTestInfrastructure() {
     // We leave temporaries in case this failed so we can debug issues.
-    CommonRuntimeTest::TearDownAndroidData(android_data_, false);
+    CommonRuntimeTest::TearDownAndroidDataDir(android_data_, false);
     tmpnam_ = "";
   }
 
diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc
index eb44dd7..8c90aa7 100644
--- a/compiler/verifier_deps_test.cc
+++ b/compiler/verifier_deps_test.cc
@@ -93,12 +93,12 @@
       verifier_deps_.reset(deps);
     }
     callbacks_->SetVerifierDeps(deps);
-    compiler_driver_->Verify(class_loader_, dex_files_, &timings);
+    compiler_driver_->Verify(class_loader_, dex_files_, &timings, verification_results_.get());
     callbacks_->SetVerifierDeps(nullptr);
     // Clear entries in the verification results to avoid hitting a DCHECK that
     // we always succeed inserting a new entry after verifying.
     AtomicDexRefMap<MethodReference, const VerifiedMethod*>* map =
-        &compiler_driver_->GetVerificationResults()->atomic_verified_methods_;
+        &verification_results_->atomic_verified_methods_;
     map->Visit([](const DexFileReference& ref ATTRIBUTE_UNUSED, const VerifiedMethod* method) {
       delete method;
     });
@@ -126,7 +126,7 @@
       class_linker_->RegisterDexFile(*dex_file, loader.Get());
     }
     for (const DexFile* dex_file : dex_files_) {
-      compiler_driver_->GetVerificationResults()->AddDexFile(dex_file);
+      verification_results_->AddDexFile(dex_file);
     }
     SetDexFilesForOatFile(dex_files_);
   }
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index edd6189..872fab3 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -103,6 +103,7 @@
 
 using android::base::StringAppendV;
 using android::base::StringPrintf;
+using gc::space::ImageSpace;
 
 static constexpr size_t kDefaultMinDexFilesForSwap = 2;
 static constexpr size_t kDefaultMinDexFileCumulativeSizeForSwap = 20 * MB;
@@ -623,13 +624,13 @@
   explicit Dex2Oat(TimingLogger* timings) :
       compiler_kind_(Compiler::kOptimizing),
       // Take the default set of instruction features from the build.
-      boot_image_checksum_(0),
       key_value_store_(nullptr),
       verification_results_(nullptr),
       runtime_(nullptr),
       thread_count_(sysconf(_SC_NPROCESSORS_CONF)),
       start_ns_(NanoTime()),
       start_cputime_ns_(ProcessCpuNanoTime()),
+      strip_(false),
       oat_fd_(-1),
       input_vdex_fd_(-1),
       output_vdex_fd_(-1),
@@ -963,89 +964,22 @@
   }
 
   void ExpandOatAndImageFilenames() {
-    std::string base_oat = oat_filenames_[0];
-    size_t last_oat_slash = base_oat.rfind('/');
-    if (last_oat_slash == std::string::npos) {
-      Usage("Unusable boot image oat filename %s", base_oat.c_str());
+    if (image_filenames_[0].rfind('/') == std::string::npos) {
+      Usage("Unusable boot image filename %s", image_filenames_[0].c_str());
     }
-    // We also need to honor path components that were encoded through '@'. Otherwise the loading
-    // code won't be able to find the images.
-    if (base_oat.find('@', last_oat_slash) != std::string::npos) {
-      last_oat_slash = base_oat.rfind('@');
-    }
-    base_oat = base_oat.substr(0, last_oat_slash + 1);
+    image_filenames_ = ImageSpace::ExpandMultiImageLocations(dex_locations_, image_filenames_[0]);
 
-    std::string base_img = image_filenames_[0];
-    size_t last_img_slash = base_img.rfind('/');
-    if (last_img_slash == std::string::npos) {
-      Usage("Unusable boot image filename %s", base_img.c_str());
+    if (oat_filenames_[0].rfind('/') == std::string::npos) {
+      Usage("Unusable boot image oat filename %s", oat_filenames_[0].c_str());
     }
-    // We also need to honor path components that were encoded through '@'. Otherwise the loading
-    // code won't be able to find the images.
-    if (base_img.find('@', last_img_slash) != std::string::npos) {
-      last_img_slash = base_img.rfind('@');
-    }
+    oat_filenames_ = ImageSpace::ExpandMultiImageLocations(dex_locations_, oat_filenames_[0]);
 
-    // Get the prefix, which is the primary image name (without path components). Strip the
-    // extension.
-    std::string prefix = base_img.substr(last_img_slash + 1);
-    if (prefix.rfind('.') != std::string::npos) {
-      prefix = prefix.substr(0, prefix.rfind('.'));
-    }
-    if (!prefix.empty()) {
-      prefix = prefix + "-";
-    }
-
-    base_img = base_img.substr(0, last_img_slash + 1);
-
-    std::string base_symbol_oat;
     if (!oat_unstripped_.empty()) {
-      base_symbol_oat = oat_unstripped_[0];
-      size_t last_symbol_oat_slash = base_symbol_oat.rfind('/');
-      if (last_symbol_oat_slash == std::string::npos) {
-        Usage("Unusable boot image symbol filename %s", base_symbol_oat.c_str());
+      if (oat_unstripped_[0].rfind('/') == std::string::npos) {
+        Usage("Unusable boot image symbol filename %s", oat_unstripped_[0].c_str());
       }
-      base_symbol_oat = base_symbol_oat.substr(0, last_symbol_oat_slash + 1);
+      oat_unstripped_ = ImageSpace::ExpandMultiImageLocations(dex_locations_, oat_unstripped_[0]);
     }
-
-    // Now create the other names. Use a counted loop to skip the first one.
-    for (size_t i = 1; i < dex_locations_.size(); ++i) {
-      // TODO: Make everything properly std::string.
-      std::string image_name = CreateMultiImageName(dex_locations_[i], prefix, ".art");
-      char_backing_storage_.push_front(base_img + image_name);
-      image_filenames_.push_back(char_backing_storage_.front().c_str());
-
-      std::string oat_name = CreateMultiImageName(dex_locations_[i], prefix, ".oat");
-      char_backing_storage_.push_front(base_oat + oat_name);
-      oat_filenames_.push_back(char_backing_storage_.front().c_str());
-
-      if (!base_symbol_oat.empty()) {
-        char_backing_storage_.push_front(base_symbol_oat + oat_name);
-        oat_unstripped_.push_back(char_backing_storage_.front().c_str());
-      }
-    }
-  }
-
-  // Modify the input string in the following way:
-  //   0) Assume input is /a/b/c.d
-  //   1) Strip the path  -> c.d
-  //   2) Inject prefix p -> pc.d
-  //   3) Replace suffix with s if it's "jar"  -> d == "jar" -> pc.s
-  static std::string CreateMultiImageName(std::string in,
-                                          const std::string& prefix,
-                                          const char* replace_suffix) {
-    size_t last_dex_slash = in.rfind('/');
-    if (last_dex_slash != std::string::npos) {
-      in = in.substr(last_dex_slash + 1);
-    }
-    if (!prefix.empty()) {
-      in = prefix + in;
-    }
-    if (android::base::EndsWith(in, ".jar")) {
-      in = in.substr(0, in.length() - strlen(".jar")) +
-          (replace_suffix != nullptr ? replace_suffix : "");
-    }
-    return in;
   }
 
   void InsertCompileOptions(int argc, char** argv) {
@@ -1496,27 +1430,28 @@
 
     if (IsBootImage()) {
       // If we're compiling the boot image, store the boot classpath into the Key-Value store.
-      // We need this for the multi-image case.
-      key_value_store_->Put(OatHeader::kBootClassPathKey,
-                            gc::space::ImageSpace::GetMultiImageBootClassPath(dex_locations_,
-                                                                              oat_filenames_,
-                                                                              image_filenames_));
+      // We use this when loading the boot image.
+      key_value_store_->Put(OatHeader::kBootClassPathKey, android::base::Join(dex_locations_, ':'));
     }
 
     if (!IsBootImage()) {
       // When compiling an app, create the runtime early to retrieve
-      // the image location key needed for the oat header.
+      // the boot image checksums needed for the oat header.
       if (!CreateRuntime(std::move(runtime_options))) {
         return dex2oat::ReturnCode::kCreateRuntime;
       }
 
       if (CompilerFilter::DependsOnImageChecksum(compiler_options_->GetCompilerFilter())) {
         TimingLogger::ScopedTiming t3("Loading image checksum", timings_);
-        std::vector<gc::space::ImageSpace*> image_spaces =
-            Runtime::Current()->GetHeap()->GetBootImageSpaces();
-        boot_image_checksum_ = image_spaces[0]->GetImageHeader().GetImageChecksum();
-      } else {
-        boot_image_checksum_ = 0u;
+        Runtime* runtime = Runtime::Current();
+        key_value_store_->Put(OatHeader::kBootClassPathKey,
+                              android::base::Join(runtime->GetBootClassPathLocations(), ':'));
+        std::vector<ImageSpace*> image_spaces = runtime->GetHeap()->GetBootImageSpaces();
+        const std::vector<const DexFile*>& bcp_dex_files =
+            runtime->GetClassLinker()->GetBootClassPath();
+        key_value_store_->Put(
+            OatHeader::kBootClassPathChecksumsKey,
+            gc::space::ImageSpace::GetBootClassPathChecksums(image_spaces, bcp_dex_files));
       }
 
       // Open dex files for class path.
@@ -1788,9 +1723,7 @@
     compiler_options_->profile_compilation_info_ = profile_compilation_info_.get();
 
     driver_.reset(new CompilerDriver(compiler_options_.get(),
-                                     verification_results_.get(),
                                      compiler_kind_,
-                                     &compiler_options_->image_classes_,
                                      thread_count_,
                                      swap_fd_));
     if (!IsBootImage()) {
@@ -1862,7 +1795,16 @@
                    << soa.Self()->GetException()->Dump();
       }
     }
+    driver_->InitializeThreadPools();
+    driver_->PreCompile(class_loader,
+                        dex_files,
+                        timings_,
+                        &compiler_options_->image_classes_,
+                        verification_results_.get());
+    callbacks_->SetVerificationResults(nullptr);  // Should not be needed anymore.
+    compiler_options_->verification_results_ = verification_results_.get();
     driver_->CompileAll(class_loader, dex_files, timings_);
+    driver_->FreeThreadPools();
     return class_loader;
   }
 
@@ -1945,7 +1887,7 @@
     if (IsImage()) {
       if (IsAppImage() && image_base_ == 0) {
         gc::Heap* const heap = Runtime::Current()->GetHeap();
-        for (gc::space::ImageSpace* image_space : heap->GetBootImageSpaces()) {
+        for (ImageSpace* image_space : heap->GetBootImageSpaces()) {
           image_base_ = std::max(image_base_, RoundUp(
               reinterpret_cast<uintptr_t>(image_space->GetImageHeader().GetOatFileEnd()),
               kPageSize));
@@ -2077,7 +2019,7 @@
           elf_writer->EndDataBimgRelRo(data_bimg_rel_ro);
         }
 
-        if (!oat_writer->WriteHeader(elf_writer->GetStream(), boot_image_checksum_)) {
+        if (!oat_writer->WriteHeader(elf_writer->GetStream())) {
           LOG(ERROR) << "Failed to write oat header to the ELF file " << oat_file->GetPath();
           return false;
         }
@@ -2708,7 +2650,6 @@
   std::unique_ptr<CompilerOptions> compiler_options_;
   Compiler::Kind compiler_kind_;
 
-  uint32_t boot_image_checksum_;
   std::unique_ptr<SafeMap<std::string, std::string> > key_value_store_;
 
   std::unique_ptr<VerificationResults> verification_results_;
diff --git a/dex2oat/linker/arm64/relative_patcher_arm64_test.cc b/dex2oat/linker/arm64/relative_patcher_arm64_test.cc
index f242ae2..678574be2 100644
--- a/dex2oat/linker/arm64/relative_patcher_arm64_test.cc
+++ b/dex2oat/linker/arm64/relative_patcher_arm64_test.cc
@@ -177,6 +177,13 @@
     OptimizingUnitTestHelper helper;
     HGraph* graph = helper.CreateGraph();
     CompilerOptions compiler_options;
+
+    // Set isa to arm64.
+    compiler_options.instruction_set_ = instruction_set_;
+    compiler_options.instruction_set_features_ =
+        InstructionSetFeatures::FromBitmap(instruction_set_, instruction_set_features_->AsBitmap());
+    CHECK(compiler_options.instruction_set_features_->Equals(instruction_set_features_.get()));
+
     arm64::CodeGeneratorARM64 codegen(graph, compiler_options);
     ArenaVector<uint8_t> code(helper.GetAllocator()->Adapter());
     codegen.EmitThunkCode(patch, &code, debug_name);
@@ -568,11 +575,6 @@
   Arm64RelativePatcherTestDefault() : Arm64RelativePatcherTest("default") { }
 };
 
-class Arm64RelativePatcherTestDenver64 : public Arm64RelativePatcherTest {
- public:
-  Arm64RelativePatcherTestDenver64() : Arm64RelativePatcherTest("denver64") { }
-};
-
 TEST_F(Arm64RelativePatcherTestDefault, CallSelf) {
   const LinkerPatch patches[] = {
       LinkerPatch::RelativeCodePatch(0u, nullptr, 1u),
@@ -827,18 +829,6 @@
       { 0x1234u, 0x1238u });
 }
 
-TEST_F(Arm64RelativePatcherTestDenver64, StringBssEntryLdur) {
-  TestForAdrpOffsets(
-      [&](uint32_t adrp_offset, uint32_t string_entry_offset) {
-        Reset();
-        TestAdrpLdurLdr(adrp_offset,
-                        /*has_thunk=*/ false,
-                        /*bss_begin=*/ 0x12345678u,
-                        string_entry_offset);
-      },
-      { 0x1234u, 0x1238u });
-}
-
 // LDR <Wt>, <label> is always aligned. We should never have to use a fixup.
 TEST_F(Arm64RelativePatcherTestDefault, StringBssEntryWPcRel) {
   TestForAdrpOffsets(
@@ -910,15 +900,6 @@
       { 0x12345678u, 0xffffc840u });
 }
 
-TEST_F(Arm64RelativePatcherTestDenver64, StringReferenceLdur) {
-  TestForAdrpOffsets(
-      [&](uint32_t adrp_offset, uint32_t string_offset) {
-        Reset();
-        TestAdrpLdurAdd(adrp_offset, /*has_thunk=*/ false, string_offset);
-      },
-      { 0x12345678u, 0xffffc840U });
-}
-
 TEST_F(Arm64RelativePatcherTestDefault, StringReferenceSubX3X2) {
   TestForAdrpOffsets(
       [&](uint32_t adrp_offset, uint32_t string_offset) {
diff --git a/dex2oat/linker/image_test.cc b/dex2oat/linker/image_test.cc
index ebd829b..1a5701d 100644
--- a/dex2oat/linker/image_test.cc
+++ b/dex2oat/linker/image_test.cc
@@ -73,7 +73,10 @@
     uint32_t oat_data_end = ART_BASE_ADDRESS + (9 * KB);
     uint32_t oat_file_end = ART_BASE_ADDRESS + (10 * KB);
     ImageSection sections[ImageHeader::kSectionCount];
-    ImageHeader image_header(image_begin,
+    uint32_t image_reservation_size = RoundUp(oat_file_end - image_begin, kPageSize);
+    ImageHeader image_header(image_reservation_size,
+                             /*component_count=*/ 1u,
+                             image_begin,
                              image_size_,
                              sections,
                              image_roots,
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index 6ffcef1..fa0a3d4 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -169,10 +169,11 @@
   {
     // Create a generic tmp file, to be the base of the .art and .oat temporary files.
     ScratchFile location;
-    for (int i = 0; i < static_cast<int>(class_path.size()); ++i) {
-      std::string cur_location =
-          android::base::StringPrintf("%s-%d.art", location.GetFilename().c_str(), i);
-      out_helper.image_locations.push_back(ScratchFile(cur_location));
+    std::vector<std::string> image_locations =
+        gc::space::ImageSpace::ExpandMultiImageLocations(out_helper.dex_file_locations,
+                                                         location.GetFilename() + ".art");
+    for (size_t i = 0u; i != class_path.size(); ++i) {
+      out_helper.image_locations.push_back(ScratchFile(image_locations[i]));
     }
   }
   std::vector<std::string> image_filenames;
@@ -218,17 +219,12 @@
     {
       jobject class_loader = nullptr;
       TimingLogger timings("ImageTest::WriteRead", false, false);
-      TimingLogger::ScopedTiming t("CompileAll", &timings);
-      SetDexFilesForOatFile(class_path);
-      driver->CompileAll(class_loader, class_path, &timings);
+      CompileAll(class_loader, class_path, &timings);
 
-      t.NewTiming("WriteElf");
+      TimingLogger::ScopedTiming t("WriteElf", &timings);
       SafeMap<std::string, std::string> key_value_store;
       key_value_store.Put(OatHeader::kBootClassPathKey,
-                          gc::space::ImageSpace::GetMultiImageBootClassPath(
-                              out_helper.dex_file_locations,
-                              oat_filenames,
-                              image_filenames));
+                          android::base::Join(out_helper.dex_file_locations, ':'));
 
       std::vector<std::unique_ptr<ElfWriter>> elf_writers;
       std::vector<std::unique_ptr<OatWriter>> oat_writers;
@@ -330,8 +326,7 @@
           elf_writer->EndDataBimgRelRo(data_bimg_rel_ro);
         }
 
-        bool header_ok = oat_writer->WriteHeader(elf_writer->GetStream(),
-                                                 /*boot_image_checksum=*/ 0u);
+        bool header_ok = oat_writer->WriteHeader(elf_writer->GetStream());
         ASSERT_TRUE(header_ok);
 
         writer->UpdateOatFileHeader(i, oat_writer->GetOatHeader());
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index 231ac04..e4e4b13 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -32,6 +32,7 @@
 #include "base/enums.h"
 #include "base/globals.h"
 #include "base/logging.h"  // For VLOG.
+#include "base/stl_util.h"
 #include "base/unix_file/fd_file.h"
 #include "class_linker-inl.h"
 #include "class_root.h"
@@ -153,6 +154,26 @@
       : nullptr;
 }
 
+bool ImageWriter::IsImageObject(ObjPtr<mirror::Object> obj) const {
+  // For boot image, we keep all objects remaining after the GC in PrepareImageAddressSpace().
+  if (compiler_options_.IsBootImage()) {
+    return true;
+  }
+  // Objects already in the boot image do not belong to the image being written.
+  if (IsInBootImage(obj.Ptr())) {
+    return false;
+  }
+  // DexCaches for the boot class path components that are not a part of the boot image
+  // cannot be garbage collected in PrepareImageAddressSpace() but we do not want to
+  // include them in the app image. So make sure we include only the app DexCaches.
+  if (obj->IsDexCache() &&
+      !ContainsElement(compiler_options_.GetDexFilesForOatFile(),
+                       obj->AsDexCache()->GetDexFile())) {
+    return false;
+  }
+  return true;
+}
+
 // Return true if an object is already in an image space.
 bool ImageWriter::IsInBootImage(const void* obj) const {
   gc::Heap* const heap = Runtime::Current()->GetHeap();
@@ -437,7 +458,7 @@
    */
   heap->VisitObjects([this, &visitor](ObjPtr<mirror::Object> object)
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (!IsInBootImage(object.Ptr())) {
+    if (IsImageObject(object)) {
       visitor.SetObject(object);
 
       if (object->IsDexCache()) {
@@ -680,7 +701,7 @@
     ObjPtr<mirror::ClassLoader> class_loader = GetAppClassLoader();
     std::vector<ObjPtr<mirror::DexCache>> dex_caches = FindDexCaches(self);
     for (ObjPtr<mirror::DexCache> dex_cache : dex_caches) {
-      if (IsInBootImage(dex_cache.Ptr())) {
+      if (!IsImageObject(dex_cache)) {
         continue;  // Boot image DexCache is not written to the app image.
       }
       PreloadDexCache(dex_cache, class_loader);
@@ -989,7 +1010,7 @@
   for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
     ObjPtr<mirror::DexCache> dex_cache =
         ObjPtr<mirror::DexCache>::DownCast(self->DecodeJObject(data.weak_root));
-    if (dex_cache == nullptr || IsInBootImage(dex_cache.Ptr())) {
+    if (dex_cache == nullptr || !IsImageObject(dex_cache)) {
       continue;
     }
     const DexFile* dex_file = dex_cache->GetDexFile();
@@ -1758,7 +1779,8 @@
   for (ObjPtr<mirror::DexCache> dex_cache : dex_caches) {
     // Pass the class loader associated with the DexCache. This can either be
     // the app's `class_loader` or `nullptr` if boot class loader.
-    PruneDexCache(dex_cache, IsInBootImage(dex_cache.Ptr()) ? nullptr : GetAppClassLoader());
+    bool is_app_image_dex_cache = compiler_options_.IsAppImage() && IsImageObject(dex_cache);
+    PruneDexCache(dex_cache, is_app_image_dex_cache ? GetAppClassLoader() : nullptr);
   }
 
   // Drop the array class cache in the ClassLinker, as these are roots holding those classes live.
@@ -1856,7 +1878,7 @@
         continue;
       }
       const DexFile* dex_file = dex_cache->GetDexFile();
-      if (!IsInBootImage(dex_cache.Ptr())) {
+      if (IsImageObject(dex_cache)) {
         dex_cache_count += image_dex_files.find(dex_file) != image_dex_files.end() ? 1u : 0u;
       }
     }
@@ -1875,7 +1897,7 @@
         continue;
       }
       const DexFile* dex_file = dex_cache->GetDexFile();
-      if (!IsInBootImage(dex_cache.Ptr())) {
+      if (IsImageObject(dex_cache)) {
         non_image_dex_caches += image_dex_files.find(dex_file) != image_dex_files.end() ? 1u : 0u;
       }
     }
@@ -1889,7 +1911,7 @@
         continue;
       }
       const DexFile* dex_file = dex_cache->GetDexFile();
-      if (!IsInBootImage(dex_cache.Ptr()) &&
+      if (IsImageObject(dex_cache) &&
           image_dex_files.find(dex_file) != image_dex_files.end()) {
         dex_caches->Set<false>(i, dex_cache.Ptr());
         ++i;
@@ -1942,7 +1964,7 @@
 mirror::Object* ImageWriter::TryAssignBinSlot(WorkStack& work_stack,
                                               mirror::Object* obj,
                                               size_t oat_index) {
-  if (obj == nullptr || IsInBootImage(obj)) {
+  if (obj == nullptr || !IsImageObject(obj)) {
     // Object is null or already in the image, there is no work to do.
     return obj;
   }
@@ -2373,7 +2395,7 @@
   {
     auto ensure_bin_slots_assigned = [&](mirror::Object* obj)
         REQUIRES_SHARED(Locks::mutator_lock_) {
-      if (!Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(obj)) {
+      if (IsImageObject(obj)) {
         CHECK(IsImageBinSlotAssigned(obj)) << mirror::Object::PrettyTypeOf(obj) << " " << obj;
       }
     };
@@ -2444,7 +2466,7 @@
   {
     auto unbin_objects_into_offset = [&](mirror::Object* obj)
         REQUIRES_SHARED(Locks::mutator_lock_) {
-      if (!IsInBootImage(obj)) {
+      if (IsImageObject(obj)) {
         UnbinObjectsIntoOffset(obj);
       }
     };
@@ -2583,6 +2605,23 @@
   const uint8_t* oat_file_end = oat_file_begin + image_info.oat_loaded_size_;
   const uint8_t* oat_data_end = image_info.oat_data_begin_ + image_info.oat_size_;
 
+  uint32_t image_reservation_size = image_info.image_size_;
+  DCHECK_ALIGNED(image_reservation_size, kPageSize);
+  uint32_t component_count = 1u;
+  if (!compiler_options_.IsAppImage()) {
+    if (oat_index == 0u) {
+      const ImageInfo& last_info = image_infos_.back();
+      const uint8_t* end = last_info.oat_file_begin_ + last_info.oat_loaded_size_;
+      DCHECK_ALIGNED(image_info.image_begin_, kPageSize);
+      image_reservation_size =
+          dchecked_integral_cast<uint32_t>(RoundUp(end - image_info.image_begin_, kPageSize));
+      component_count = image_infos_.size();
+    } else {
+      image_reservation_size = 0u;
+      component_count = 0u;
+    }
+  }
+
   // Create the image sections.
   auto section_info_pair = image_info.CreateImageSections();
   const size_t image_end = section_info_pair.first;
@@ -2619,6 +2658,8 @@
   // Create the header, leave 0 for data size since we will fill this in as we are writing the
   // image.
   new (image_info.image_.Begin()) ImageHeader(
+      image_reservation_size,
+      component_count,
       PointerToLowMemUInt32(image_info.image_begin_),
       image_end,
       sections.data(),
@@ -2890,7 +2931,7 @@
 }
 
 void ImageWriter::CopyAndFixupObject(Object* obj) {
-  if (IsInBootImage(obj)) {
+  if (!IsImageObject(obj)) {
     return;
   }
   size_t offset = GetImageOffset(obj);
diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h
index 33bacf8..b680265 100644
--- a/dex2oat/linker/image_writer.h
+++ b/dex2oat/linker/image_writer.h
@@ -674,7 +674,12 @@
   template <typename T>
   T* NativeCopyLocation(T* obj) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Return true of obj is inside of the boot image space. This may only return true if we are
+  // Return true if `obj` belongs to the image we're writing.
+  // For a boot image, this is true for all objects.
+  // For an app image, boot image objects and boot class path dex caches are excluded.
+  bool IsImageObject(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Return true if `obj` is inside of the boot image space. This may only return true if we are
   // compiling an app image.
   bool IsInBootImage(const void* obj) const;
 
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index 6b17ef6..e2a9ac2 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -968,7 +968,7 @@
     ClassStatus status;
     bool found = writer_->compiler_driver_->GetCompiledClass(class_ref, &status);
     if (!found) {
-      VerificationResults* results = writer_->compiler_driver_->GetVerificationResults();
+      const VerificationResults* results = writer_->compiler_options_.GetVerificationResults();
       if (results != nullptr && results->IsClassRejected(class_ref)) {
         // The oat class status is used only for verification of resolved classes,
         // so use ClassStatus::kErrorResolved whether the class was resolved or unresolved
@@ -2808,11 +2808,9 @@
   return true;
 }
 
-bool OatWriter::WriteHeader(OutputStream* out, uint32_t boot_image_checksum) {
+bool OatWriter::WriteHeader(OutputStream* out) {
   CHECK(write_state_ == WriteState::kWriteHeader);
 
-  oat_header_->SetBootImageChecksum(boot_image_checksum);
-
   // Update checksum with header data.
   DCHECK_EQ(oat_header_->GetChecksum(), 0u);  // For checksum calculation.
   const uint8_t* header_begin = reinterpret_cast<const uint8_t*>(oat_header_.get());
diff --git a/dex2oat/linker/oat_writer.h b/dex2oat/linker/oat_writer.h
index 9cd2fd0..cc0e83a 100644
--- a/dex2oat/linker/oat_writer.h
+++ b/dex2oat/linker/oat_writer.h
@@ -198,7 +198,7 @@
   // Check the size of the written oat file.
   bool CheckOatSize(OutputStream* out, size_t file_offset, size_t relative_offset);
   // Write the oat header. This finalizes the oat file.
-  bool WriteHeader(OutputStream* out, uint32_t boot_image_checksum);
+  bool WriteHeader(OutputStream* out);
 
   // Returns whether the oat file has an associated image.
   bool HasImage() const {
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index 598a0f8..ecf9db8 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -240,7 +240,7 @@
       elf_writer->EndDataBimgRelRo(data_bimg_rel_ro);
     }
 
-    if (!oat_writer.WriteHeader(elf_writer->GetStream(), /*boot_image_checksum=*/ 42u)) {
+    if (!oat_writer.WriteHeader(elf_writer->GetStream())) {
       return false;
     }
 
@@ -391,12 +391,12 @@
   jobject class_loader = nullptr;
   if (kCompile) {
     TimingLogger timings2("OatTest::WriteRead", false, false);
-    SetDexFilesForOatFile(class_linker->GetBootClassPath());
-    compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings2);
+    CompileAll(class_loader, class_linker->GetBootClassPath(), &timings2);
   }
 
   ScratchFile tmp_base, tmp_oat(tmp_base, ".oat"), tmp_vdex(tmp_base, ".vdex");
   SafeMap<std::string, std::string> key_value_store;
+  key_value_store.Put(OatHeader::kBootClassPathChecksumsKey, "testkey");
   bool success = WriteElf(tmp_vdex.GetFile(),
                           tmp_oat.GetFile(),
                           class_linker->GetBootClassPath(),
@@ -405,7 +405,7 @@
   ASSERT_TRUE(success);
 
   if (kCompile) {  // OatWriter strips the code, regenerate to compare
-    compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings);
+    CompileAll(class_loader, class_linker->GetBootClassPath(), &timings);
   }
   std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
                                                   tmp_oat.GetFilename(),
@@ -419,7 +419,8 @@
   const OatHeader& oat_header = oat_file->GetOatHeader();
   ASSERT_TRUE(oat_header.IsValid());
   ASSERT_EQ(class_linker->GetBootClassPath().size(), oat_header.GetDexFileCount());  // core
-  ASSERT_EQ(42u, oat_header.GetBootImageChecksum());
+  ASSERT_TRUE(oat_header.GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey) != nullptr);
+  ASSERT_STREQ("testkey", oat_header.GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey));
 
   ASSERT_TRUE(java_lang_dex_file_ != nullptr);
   const DexFile& dex_file = *java_lang_dex_file_;
@@ -465,7 +466,7 @@
 TEST_F(OatTest, OatHeaderSizeCheck) {
   // If this test is failing and you have to update these constants,
   // it is time to update OatHeader::kOatVersion
-  EXPECT_EQ(68U, sizeof(OatHeader));
+  EXPECT_EQ(64U, sizeof(OatHeader));
   EXPECT_EQ(4U, sizeof(OatMethodOffsets));
   EXPECT_EQ(8U, sizeof(OatQuickMethodHeader));
   EXPECT_EQ(166 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
@@ -513,8 +514,7 @@
     ScopedObjectAccess soa(Thread::Current());
     class_linker->RegisterDexFile(*dex_file, soa.Decode<mirror::ClassLoader>(class_loader));
   }
-  SetDexFilesForOatFile(dex_files);
-  compiler_driver_->CompileAll(class_loader, dex_files, &timings);
+  CompileAll(class_loader, dex_files, &timings);
 
   ScratchFile tmp_base, tmp_oat(tmp_base, ".oat"), tmp_vdex(tmp_base, ".vdex");
   SafeMap<std::string, std::string> key_value_store;
diff --git a/imgdiag/imgdiag_test.cc b/imgdiag/imgdiag_test.cc
index c789fd7..739d9b8 100644
--- a/imgdiag/imgdiag_test.cc
+++ b/imgdiag/imgdiag_test.cc
@@ -91,8 +91,8 @@
     // Run imgdiag --image-diff-pid=$image_diff_pid and wait until it's done with a 0 exit code.
     std::vector<std::string> exec_argv = {
         file_path,
-        "--image-diff-pid=" + PidToString(image_diff_pid),
-        "--zygote-diff-pid=" + PidToString(image_diff_pid),
+        "--image-diff-pid=" + std::to_string(image_diff_pid),
+        "--zygote-diff-pid=" + std::to_string(image_diff_pid),
         "--runtime-arg",
         GetClassPathOption("-Xbootclasspath:", GetLibCoreDexFileNames()),
         "--runtime-arg",
@@ -109,12 +109,6 @@
   }
 
  private:
-  std::string PidToString(pid_t pid) {
-    std::stringstream sstream;
-    sstream << pid;
-    return sstream.str();
-  }
-
   std::string runtime_args_image_;
   std::string boot_image_location_;
 };
diff --git a/libartbase/base/common_art_test.cc b/libartbase/base/common_art_test.cc
index 2d9e15e..46724ee 100644
--- a/libartbase/base/common_art_test.cc
+++ b/libartbase/base/common_art_test.cc
@@ -24,6 +24,7 @@
 #include "nativehelper/scoped_local_ref.h"
 
 #include "android-base/stringprintf.h"
+#include "android-base/strings.h"
 #include "android-base/unique_fd.h"
 
 #include "art_field-inl.h"
@@ -109,49 +110,59 @@
   CHECK_EQ(0, unlink_result);
 }
 
-void CommonArtTestImpl::SetUpAndroidRoot() {
+void CommonArtTestImpl::SetUpAndroidRootEnvVars() {
   if (IsHost()) {
-    // $ANDROID_ROOT is set on the device, but not necessarily on the host.
-    // But it needs to be set so that icu4c can find its locale data.
-    const char* android_root_from_env = getenv("ANDROID_ROOT");
-    if (android_root_from_env == nullptr) {
-      // Use ANDROID_HOST_OUT for ANDROID_ROOT if it is set.
-      const char* android_host_out = getenv("ANDROID_HOST_OUT");
-      if (android_host_out != nullptr) {
-        setenv("ANDROID_ROOT", android_host_out, 1);
-      } else {
-        // Build it from ANDROID_BUILD_TOP or cwd
-        std::string root;
-        const char* android_build_top = getenv("ANDROID_BUILD_TOP");
-        if (android_build_top != nullptr) {
-          root += android_build_top;
-        } else {
-          // Not set by build server, so default to current directory
-          char* cwd = getcwd(nullptr, 0);
-          setenv("ANDROID_BUILD_TOP", cwd, 1);
-          root += cwd;
-          free(cwd);
-        }
+    // Make sure that ANDROID_BUILD_TOP is set. If not, set it from CWD.
+    const char* android_build_top_from_env = getenv("ANDROID_BUILD_TOP");
+    if (android_build_top_from_env == nullptr) {
+      // Not set by build server, so default to current directory.
+      char* cwd = getcwd(nullptr, 0);
+      setenv("ANDROID_BUILD_TOP", cwd, 1);
+      free(cwd);
+      android_build_top_from_env = getenv("ANDROID_BUILD_TOP");
+    }
+
+    const char* android_host_out_from_env = getenv("ANDROID_HOST_OUT");
+    if (android_host_out_from_env == nullptr) {
+      // Not set by build server, so default to the usual value of
+      // ANDROID_HOST_OUT.
+      std::string android_host_out = android_build_top_from_env;
 #if defined(__linux__)
-        root += "/out/host/linux-x86";
+      android_host_out += "/out/host/linux-x86";
 #elif defined(__APPLE__)
-        root += "/out/host/darwin-x86";
+      android_host_out += "/out/host/darwin-x86";
 #else
 #error unsupported OS
 #endif
-        setenv("ANDROID_ROOT", root.c_str(), 1);
-      }
+      setenv("ANDROID_HOST_OUT", android_host_out.c_str(), 1);
+      android_host_out_from_env = getenv("ANDROID_HOST_OUT");
     }
-    setenv("LD_LIBRARY_PATH", ":", 0);  // Required by java.lang.System.<clinit>.
 
-    // Not set by build server, so default
-    if (getenv("ANDROID_HOST_OUT") == nullptr) {
-      setenv("ANDROID_HOST_OUT", getenv("ANDROID_ROOT"), 1);
+    // Environment variable ANDROID_ROOT is set on the device, but not
+    // necessarily on the host.
+    const char* android_root_from_env = getenv("ANDROID_ROOT");
+    if (android_root_from_env == nullptr) {
+      // Use ANDROID_HOST_OUT for ANDROID_ROOT.
+      setenv("ANDROID_ROOT", android_host_out_from_env, 1);
+      android_root_from_env = getenv("ANDROID_ROOT");
     }
+
+    // Environment variable ANDROID_RUNTIME_ROOT is set on the device, but not
+    // necessarily on the host. It needs to be set so that various libraries
+    // like icu4c can find their data files.
+    const char* android_runtime_root_from_env = getenv("ANDROID_RUNTIME_ROOT");
+    if (android_runtime_root_from_env == nullptr) {
+      // Use ${ANDROID_HOST_OUT}/com.android.runtime for ANDROID_RUNTIME_ROOT.
+      std::string android_runtime_root = android_host_out_from_env;
+      android_runtime_root += "/com.android.runtime";
+      setenv("ANDROID_RUNTIME_ROOT", android_runtime_root.c_str(), 1);
+    }
+
+    setenv("LD_LIBRARY_PATH", ":", 0);  // Required by java.lang.System.<clinit>.
   }
 }
 
-void CommonArtTestImpl::SetUpAndroidData(std::string& android_data) {
+void CommonArtTestImpl::SetUpAndroidDataDir(std::string& android_data) {
   // On target, Cannot use /mnt/sdcard because it is mounted noexec, so use subdir of dalvik-cache
   if (IsHost()) {
     const char* tmpdir = getenv("TMPDIR");
@@ -171,15 +182,16 @@
 }
 
 void CommonArtTestImpl::SetUp() {
-  SetUpAndroidRoot();
-  SetUpAndroidData(android_data_);
+  SetUpAndroidRootEnvVars();
+  SetUpAndroidDataDir(android_data_);
   dalvik_cache_.append(android_data_.c_str());
   dalvik_cache_.append("/dalvik-cache");
   int mkdir_result = mkdir(dalvik_cache_.c_str(), 0700);
   ASSERT_EQ(mkdir_result, 0);
 }
 
-void CommonArtTestImpl::TearDownAndroidData(const std::string& android_data, bool fail_on_error) {
+void CommonArtTestImpl::TearDownAndroidDataDir(const std::string& android_data,
+                                               bool fail_on_error) {
   if (fail_on_error) {
     ASSERT_EQ(rmdir(android_data.c_str()), 0);
   } else {
@@ -294,7 +306,7 @@
   ClearDirectory(dalvik_cache_.c_str());
   int rmdir_cache_result = rmdir(dalvik_cache_.c_str());
   ASSERT_EQ(0, rmdir_cache_result);
-  TearDownAndroidData(android_data_, true);
+  TearDownAndroidDataDir(android_data_, true);
   dalvik_cache_.clear();
 }
 
@@ -316,12 +328,18 @@
 }
 
 std::vector<std::string> CommonArtTestImpl::GetLibCoreDexFileNames() {
-  // Note: This must match the TEST_CORE_JARS in Android.common_path.mk
+  // Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
   // because that's what we use for compiling the core.art image.
+  // It may contain additional modules from TEST_CORE_JARS.
   static const char* const kLibcoreModules[] = {
+      // CORE_IMG_JARS modules.
       "core-oj",
       "core-libart",
       "core-simple",
+      "okhttp",
+      "bouncycastle",
+      // Additional modules.
+      "conscrypt",
   };
 
   std::vector<std::string> result;
@@ -354,14 +372,7 @@
 
 std::string CommonArtTestImpl::GetClassPathOption(const char* option,
                                                   const std::vector<std::string>& class_path) {
-  std::string option_string = option;
-  const char* separator = "";
-  for (const std::string& core_dex_file_name : class_path) {
-    option_string += separator;
-    option_string += core_dex_file_name;
-    separator = ":";
-  }
-  return option_string;
+  return option + android::base::Join(class_path, ':');
 }
 
 std::string CommonArtTestImpl::GetTestAndroidRoot() {
diff --git a/libartbase/base/common_art_test.h b/libartbase/base/common_art_test.h
index 5c2ae82..3e2340f 100644
--- a/libartbase/base/common_art_test.h
+++ b/libartbase/base/common_art_test.h
@@ -85,13 +85,16 @@
   CommonArtTestImpl() = default;
   virtual ~CommonArtTestImpl() = default;
 
-  static void SetUpAndroidRoot();
+  // Set up ANDROID_BUILD_TOP, ANDROID_HOST_OUT, ANDROID_ROOT and ANDROID_RUNTIME_ROOT
+  // environment variables using sensible defaults if not already set.
+  static void SetUpAndroidRootEnvVars();
 
+  // Set up the ANDROID_DATA environment variable, creating the directory if required.
   // Note: setting up ANDROID_DATA may create a temporary directory. If this is used in a
   // non-derived class, be sure to also call the corresponding tear-down below.
-  static void SetUpAndroidData(std::string& android_data);
+  static void SetUpAndroidDataDir(std::string& android_data);
 
-  static void TearDownAndroidData(const std::string& android_data, bool fail_on_error);
+  static void TearDownAndroidDataDir(const std::string& android_data, bool fail_on_error);
 
   // Gets the paths of the libcore dex files.
   static std::vector<std::string> GetLibCoreDexFileNames();
diff --git a/libartbase/base/hiddenapi_flags.h b/libartbase/base/hiddenapi_flags.h
index 9ea01d7..8c1ffd5 100644
--- a/libartbase/base/hiddenapi_flags.h
+++ b/libartbase/base/hiddenapi_flags.h
@@ -44,11 +44,12 @@
     // The max release code implicitly includes all maintenance releases,
     // e.g. GreylistMaxO is accessible to targetSdkVersion <= 27 (O_MR1).
     kGreylistMaxO =  3,
+    kGreylistMaxP =  4,
 
     // Special values
     kInvalid =       static_cast<uint32_t>(-1),
     kMinValue =      kWhitelist,
-    kMaxValue =      kGreylistMaxO,
+    kMaxValue =      kGreylistMaxP,
   };
 
   static constexpr const char* kNames[] = {
@@ -56,6 +57,7 @@
     "greylist",
     "blacklist",
     "greylist-max-o",
+    "greylist-max-p",
   };
 
   static constexpr const char* kInvalidName = "invalid";
@@ -65,6 +67,7 @@
     /* greylist */ SdkVersion::kMax,
     /* blacklist */ SdkVersion::kMin,
     /* greylist-max-o */ SdkVersion::kO_MR1,
+    /* greylist-max-p */ SdkVersion::kP,
   };
 
   static ApiList MinValue() { return ApiList(Value::kMinValue); }
@@ -79,6 +82,7 @@
   static ApiList Greylist() { return ApiList(Value::kGreylist); }
   static ApiList Blacklist() { return ApiList(Value::kBlacklist); }
   static ApiList GreylistMaxO() { return ApiList(Value::kGreylistMaxO); }
+  static ApiList GreylistMaxP() { return ApiList(Value::kGreylistMaxP); }
   static ApiList Invalid() { return ApiList(Value::kInvalid); }
 
   // Decodes ApiList from dex hiddenapi flags.
diff --git a/libartbase/base/sdk_version.h b/libartbase/base/sdk_version.h
index 4372e5a..e2dbc50 100644
--- a/libartbase/base/sdk_version.h
+++ b/libartbase/base/sdk_version.h
@@ -33,7 +33,6 @@
   kO     = 26u,
   kO_MR1 = 27u,
   kP     = 28u,
-  kP_MR1 = 29u,
   kMax   = std::numeric_limits<uint32_t>::max(),
 };
 
diff --git a/libartbase/base/unix_file/random_access_file_test.h b/libartbase/base/unix_file/random_access_file_test.h
index dbe6ca9..178f89d 100644
--- a/libartbase/base/unix_file/random_access_file_test.h
+++ b/libartbase/base/unix_file/random_access_file_test.h
@@ -35,11 +35,11 @@
   virtual RandomAccessFile* MakeTestFile() = 0;
 
   virtual void SetUp() {
-    art::CommonArtTest::SetUpAndroidData(android_data_);
+    art::CommonArtTest::SetUpAndroidDataDir(android_data_);
   }
 
   virtual void TearDown() {
-    art::CommonArtTest::TearDownAndroidData(android_data_, true);
+    art::CommonArtTest::TearDownAndroidDataDir(android_data_, true);
   }
 
   std::string GetTmpPath(const std::string& name) {
diff --git a/libartbase/base/utils.h b/libartbase/base/utils.h
index 9c71055..11472a8 100644
--- a/libartbase/base/utils.h
+++ b/libartbase/base/utils.h
@@ -172,30 +172,6 @@
   }
 }
 
-// Returns a type cast pointer if object pointed to is within the provided bounds.
-// Otherwise returns nullptr.
-template <typename T>
-inline static T BoundsCheckedCast(const void* pointer,
-                                  const void* lower,
-                                  const void* upper) {
-  const uint8_t* bound_begin = static_cast<const uint8_t*>(lower);
-  const uint8_t* bound_end = static_cast<const uint8_t*>(upper);
-  DCHECK(bound_begin <= bound_end);
-
-  T result = reinterpret_cast<T>(pointer);
-  const uint8_t* begin = static_cast<const uint8_t*>(pointer);
-  const uint8_t* end = begin + sizeof(*result);
-  if (begin < bound_begin || end > bound_end || begin > end) {
-    return nullptr;
-  }
-  return result;
-}
-
-template <typename T, size_t size>
-constexpr size_t ArrayCount(const T (&)[size]) {
-  return size;
-}
-
 // Return -1 if <, 0 if ==, 1 if >.
 template <typename T>
 inline static int32_t Compare(T lhs, T rhs) {
diff --git a/libartbase/base/utils_test.cc b/libartbase/base/utils_test.cc
index 9bd50c3..c3b61ce 100644
--- a/libartbase/base/utils_test.cc
+++ b/libartbase/base/utils_test.cc
@@ -107,25 +107,6 @@
   EXPECT_EQ(expected, actual);
 }
 
-TEST_F(UtilsTest, ArrayCount) {
-  int i[64];
-  EXPECT_EQ(ArrayCount(i), 64u);
-  char c[7];
-  EXPECT_EQ(ArrayCount(c), 7u);
-}
-
-TEST_F(UtilsTest, BoundsCheckedCast) {
-  char buffer[64];
-  const char* buffer_end = buffer + ArrayCount(buffer);
-  EXPECT_EQ(BoundsCheckedCast<const uint64_t*>(nullptr, buffer, buffer_end), nullptr);
-  EXPECT_EQ(BoundsCheckedCast<const uint64_t*>(buffer, buffer, buffer_end),
-            reinterpret_cast<const uint64_t*>(buffer));
-  EXPECT_EQ(BoundsCheckedCast<const uint64_t*>(buffer + 56, buffer, buffer_end),
-            reinterpret_cast<const uint64_t*>(buffer + 56));
-  EXPECT_EQ(BoundsCheckedCast<const uint64_t*>(buffer - 1, buffer, buffer_end), nullptr);
-  EXPECT_EQ(BoundsCheckedCast<const uint64_t*>(buffer + 57, buffer, buffer_end), nullptr);
-}
-
 TEST_F(UtilsTest, GetProcessStatus) {
   EXPECT_EQ("utils_test", GetProcessStatus("Name"));
   EXPECT_EQ("R (running)", GetProcessStatus("State"));
diff --git a/libdexfile/dex/class_accessor-inl.h b/libdexfile/dex/class_accessor-inl.h
index f0f14c6..334b072 100644
--- a/libdexfile/dex/class_accessor-inl.h
+++ b/libdexfile/dex/class_accessor-inl.h
@@ -21,6 +21,7 @@
 
 #include "base/hiddenapi_flags.h"
 #include "base/leb128.h"
+#include "base/utils.h"
 #include "class_iterator.h"
 #include "code_item_accessors-inl.h"
 
diff --git a/libdexfile/dex/class_accessor.h b/libdexfile/dex/class_accessor.h
index adfc864..bd7b912 100644
--- a/libdexfile/dex/class_accessor.h
+++ b/libdexfile/dex/class_accessor.h
@@ -17,7 +17,6 @@
 #ifndef ART_LIBDEXFILE_DEX_CLASS_ACCESSOR_H_
 #define ART_LIBDEXFILE_DEX_CLASS_ACCESSOR_H_
 
-#include "base/utils.h"
 #include "code_item_accessors.h"
 #include "dex_file.h"
 #include "invoke_type.h"
diff --git a/libdexfile/dex/utf.cc b/libdexfile/dex/utf.cc
index d09da73..ed07568 100644
--- a/libdexfile/dex/utf.cc
+++ b/libdexfile/dex/utf.cc
@@ -194,9 +194,10 @@
 uint32_t ComputeModifiedUtf8Hash(const char* chars) {
   uint32_t hash = 0;
   while (*chars != '\0') {
-    hash = hash * 31 + *chars++;
+    hash = hash * 31 + static_cast<uint8_t>(*chars);
+    ++chars;
   }
-  return static_cast<int32_t>(hash);
+  return hash;
 }
 
 int CompareModifiedUtf8ToUtf16AsCodePointValues(const char* utf8, const uint16_t* utf16,
diff --git a/libdexfile/dex/utf_test.cc b/libdexfile/dex/utf_test.cc
index d2f22d1..c7a6a34 100644
--- a/libdexfile/dex/utf_test.cc
+++ b/libdexfile/dex/utf_test.cc
@@ -378,4 +378,11 @@
   }
 }
 
+TEST_F(UtfTest, NonAscii) {
+  const char kNonAsciiCharacter = '\x80';
+  const char input[] = { kNonAsciiCharacter, '\0' };
+  uint32_t hash = ComputeModifiedUtf8Hash(input);
+  EXPECT_EQ(static_cast<uint8_t>(kNonAsciiCharacter), hash);
+}
+
 }  // namespace art
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 1c74a92..d2a5bb8 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -473,9 +473,6 @@
                            GetQuickToInterpreterBridgeOffset);
 #undef DUMP_OAT_HEADER_OFFSET
 
-    os << "BOOT IMAGE CHECKSUM:\n";
-    os << StringPrintf("0x%08x\n\n", oat_header.GetBootImageChecksum());
-
     // Print the key-value store.
     {
       os << "KEY VALUE STORE:\n";
diff --git a/oatdump/oatdump_test.h b/oatdump/oatdump_test.h
index 728939f..dfa659b 100644
--- a/oatdump/oatdump_test.h
+++ b/oatdump/oatdump_test.h
@@ -178,6 +178,11 @@
         expected_prefixes.push_back("InlineInfo");
       }
       if (mode == kModeArt) {
+        exec_argv.push_back("--runtime-arg");
+        exec_argv.push_back(GetClassPathOption("-Xbootclasspath:", GetLibCoreDexFileNames()));
+        exec_argv.push_back("--runtime-arg");
+        exec_argv.push_back(
+            GetClassPathOption("-Xbootclasspath-locations:", GetLibCoreDexLocations()));
         exec_argv.push_back("--image=" + core_art_location_);
         exec_argv.push_back("--instruction-set=" + std::string(
             GetInstructionSetString(kRuntimeISA)));
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index cabb758..6ca4e38 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -1427,11 +1427,6 @@
     method.SetCodeItemOffset(dex_file_->FindCodeItemOffset(class_def, dex_method_idx));
     // Clear all the intrinsics related flags.
     method.SetNotIntrinsic();
-    // Disable hiddenapi checks when accessing this method.
-    // Redefining hiddenapi flags is unsupported for the same reasons as redefining
-    // access flags. Moreover, ArtMethod loses pointer to the old dex file, so just
-    // disable the checks completely for consistency.
-    method.SetAccessFlags(method.GetAccessFlags() | art::kAccPublicApi);
   }
 }
 
@@ -1450,11 +1445,6 @@
       CHECK(new_field_id != nullptr);
       // We only need to update the index since the other data in the ArtField cannot be updated.
       field.SetDexFieldIndex(dex_file_->GetIndexForFieldId(*new_field_id));
-      // Disable hiddenapi checks when accessing this method.
-      // Redefining hiddenapi flags is unsupported for the same reasons as redefining
-      // access flags. Moreover, ArtField loses pointer to the old dex file, so just
-      // disable the checks completely for consistency.
-      field.SetAccessFlags(field.GetAccessFlags() | art::kAccPublicApi);
     }
   }
 }
@@ -1469,15 +1459,25 @@
   UpdateMethods(mclass, class_def);
   UpdateFields(mclass);
 
+  art::ObjPtr<art::mirror::ClassExt> ext(mclass->GetExtData());
+  CHECK(!ext.IsNull());
+  ext->SetOriginalDexFile(original_dex_file);
+
+  // If this is the first time the class is being redefined, store
+  // the native DexFile pointer and initial ClassDef index in ClassExt.
+  // This preserves the pointer for hiddenapi access checks which need
+  // to read access flags from the initial DexFile.
+  if (ext->GetPreRedefineDexFile() == nullptr) {
+    ext->SetPreRedefineDexFile(&mclass->GetDexFile());
+    ext->SetPreRedefineClassDefIndex(mclass->GetDexClassDefIndex());
+  }
+
   // Update the class fields.
   // Need to update class last since the ArtMethod gets its DexFile from the class (which is needed
   // to call GetReturnTypeDescriptor and GetParameterTypeList above).
   mclass->SetDexCache(new_dex_cache.Ptr());
   mclass->SetDexClassDefIndex(dex_file_->GetIndexForClassDef(class_def));
   mclass->SetDexTypeIndex(dex_file_->GetIndexForTypeId(*dex_file_->FindTypeId(class_sig_.c_str())));
-  art::ObjPtr<art::mirror::ClassExt> ext(mclass->GetExtData());
-  CHECK(!ext.IsNull());
-  ext->SetOriginalDexFile(original_dex_file);
 
   // Notify the jit that all the methods in this class were redefined. Need to do this last since
   // the jit relies on the dex_file_ being correct (for native methods at least) to find the method
diff --git a/openjdkjvmti/ti_thread.cc b/openjdkjvmti/ti_thread.cc
index 2131120..051db4c 100644
--- a/openjdkjvmti/ti_thread.cc
+++ b/openjdkjvmti/ti_thread.cc
@@ -91,7 +91,8 @@
         self->GetThreadName(name);
         if (name != "JDWP" &&
             name != "Signal Catcher" &&
-            !android::base::StartsWith(name, "Jit thread pool")) {
+            !android::base::StartsWith(name, "Jit thread pool") &&
+            !android::base::StartsWith(name, "Runtime worker thread")) {
           LOG(FATAL) << "Unexpected thread before start: " << name << " id: "
                      << self->GetThreadId();
         }
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 4aa5d3d..60f1af1 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -33,6 +33,7 @@
         "art_method.cc",
         "backtrace_helper.cc",
         "barrier.cc",
+        "base/locks.cc",
         "base/mem_map_arena_pool.cc",
         "base/mutex.cc",
         "base/quasi_atomic.cc",
@@ -454,7 +455,7 @@
     cmd: "$(location generate_operator_out) art/runtime $(in) > $(out)",
     tools: ["generate_operator_out"],
     srcs: [
-        "base/mutex.h",
+        "base/locks.h",
         "class_loader_context.h",
         "class_status.h",
         "debugger.h",
diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc
index e97d2cb..fcf3c75 100644
--- a/runtime/arch/arm/instruction_set_features_arm.cc
+++ b/runtime/arch/arm/instruction_set_features_arm.cc
@@ -53,8 +53,8 @@
       "cortex-a75",
       "cortex-a76",
       "exynos-m1",
-      "denver",
-      "kryo"
+      "kryo",
+      "kryo385",
   };
   bool has_armv8a = FindVariantInArray(arm_variants_with_armv8a,
                                        arraysize(arm_variants_with_armv8a),
diff --git a/runtime/arch/arm/instruction_set_features_arm_test.cc b/runtime/arch/arm/instruction_set_features_arm_test.cc
index d9651f9..36e31bd 100644
--- a/runtime/arch/arm/instruction_set_features_arm_test.cc
+++ b/runtime/arch/arm/instruction_set_features_arm_test.cc
@@ -46,20 +46,6 @@
   EXPECT_STREQ("div,atomic_ldrd_strd,armv8a", kryo_features->GetFeatureString().c_str());
   EXPECT_EQ(kryo_features->AsBitmap(), 7U);
 
-  // Build features for a 32-bit ARM denver processor.
-  std::unique_ptr<const InstructionSetFeatures> denver_features(
-      InstructionSetFeatures::FromVariant(InstructionSet::kArm, "denver", &error_msg));
-  ASSERT_TRUE(denver_features.get() != nullptr) << error_msg;
-
-  EXPECT_TRUE(denver_features->Equals(denver_features.get()));
-  EXPECT_TRUE(denver_features->HasAtLeast(krait_features.get()));
-  EXPECT_FALSE(krait_features->Equals(denver_features.get()));
-  EXPECT_FALSE(krait_features->HasAtLeast(denver_features.get()));
-  EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
-  EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
-  EXPECT_STREQ("div,atomic_ldrd_strd,armv8a", denver_features->GetFeatureString().c_str());
-  EXPECT_EQ(denver_features->AsBitmap(), 7U);
-
   // Build features for a 32-bit ARMv7 processor.
   std::unique_ptr<const InstructionSetFeatures> generic_features(
       InstructionSetFeatures::FromVariant(InstructionSet::kArm, "generic", &error_msg));
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc
index 7796ca7..4a2b9d5 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64.cc
@@ -57,10 +57,6 @@
   static const char* arm64_variants_with_crc[] = {
       "default",
       "generic",
-      "kryo",
-      "exynos-m1",
-      "exynos-m2",
-      "exynos-m3",
       "cortex-a35",
       "cortex-a53",
       "cortex-a53.a57",
@@ -71,18 +67,25 @@
       "cortex-a55",
       "cortex-a75",
       "cortex-a76",
+      "exynos-m1",
+      "exynos-m2",
+      "exynos-m3",
+      "kryo",
+      "kryo385",
   };
 
   static const char* arm64_variants_with_lse[] = {
       "cortex-a55",
       "cortex-a75",
       "cortex-a76",
+      "kryo385",
   };
 
   static const char* arm64_variants_with_fp16[] = {
       "cortex-a55",
       "cortex-a75",
       "cortex-a76",
+      "kryo385",
   };
 
   static const char* arm64_variants_with_dotprod[] = {
@@ -123,8 +126,8 @@
         "exynos-m1",
         "exynos-m2",
         "exynos-m3",
-        "denver64",
-        "kryo"
+        "kryo",
+        "kryo385",
     };
     if (!FindVariantInArray(arm64_known_variants, arraysize(arm64_known_variants), variant)) {
       std::ostringstream os;
diff --git a/runtime/arch/context.h b/runtime/arch/context.h
index d067f66..5980b03 100644
--- a/runtime/arch/context.h
+++ b/runtime/arch/context.h
@@ -21,7 +21,6 @@
 #include <stdint.h>
 
 #include "base/macros.h"
-#include "base/mutex.h"
 
 namespace art {
 
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index c240017..fda269c 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -21,7 +21,6 @@
 
 #include "art_field.h"
 #include "base/callee_save_type.h"
-#include "base/utils.h"
 #include "class_linker-inl.h"
 #include "common_throws.h"
 #include "dex/code_item_accessors-inl.h"
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 882291f..5f5361a 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -814,7 +814,12 @@
   }
   ArtMethod* m =
       GetInterfaceMethodIfProxy(Runtime::Current()->GetClassLinker()->GetImagePointerSize());
-  return m->GetDexFile()->PrettyMethod(m->GetDexMethodIndex(), with_signature);
+  std::string res(m->GetDexFile()->PrettyMethod(m->GetDexMethodIndex(), with_signature));
+  if (with_signature && m->IsObsolete()) {
+    return "<OBSOLETE> " + res;
+  } else {
+    return res;
+  }
 }
 
 std::string ArtMethod::JniShortName() {
diff --git a/runtime/barrier.cc b/runtime/barrier.cc
index 8d3cf45..a1a3659 100644
--- a/runtime/barrier.cc
+++ b/runtime/barrier.cc
@@ -27,15 +27,15 @@
 
 Barrier::Barrier(int count)
     : count_(count),
-      lock_("GC barrier lock", kThreadSuspendCountLock),
-      condition_("GC barrier condition", lock_) {
+      lock_(new Mutex("GC barrier lock", kThreadSuspendCountLock)),
+      condition_(new ConditionVariable("GC barrier condition", *lock_)) {
 }
 
 template void Barrier::Increment<Barrier::kAllowHoldingLocks>(Thread* self, int delta);
 template void Barrier::Increment<Barrier::kDisallowHoldingLocks>(Thread* self, int delta);
 
 void Barrier::Pass(Thread* self) {
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *GetLock());
   SetCountLocked(self, count_ - 1);
 }
 
@@ -44,13 +44,13 @@
 }
 
 void Barrier::Init(Thread* self, int count) {
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *GetLock());
   SetCountLocked(self, count);
 }
 
 template <Barrier::LockHandling locks>
 void Barrier::Increment(Thread* self, int delta) {
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *GetLock());
   SetCountLocked(self, count_ + delta);
 
   // Increment the count.  If it becomes zero after the increment
@@ -62,22 +62,22 @@
   // condition variable, thus waking this up.
   while (count_ != 0) {
     if (locks == kAllowHoldingLocks) {
-      condition_.WaitHoldingLocks(self);
+      condition_->WaitHoldingLocks(self);
     } else {
-      condition_.Wait(self);
+      condition_->Wait(self);
     }
   }
 }
 
 bool Barrier::Increment(Thread* self, int delta, uint32_t timeout_ms) {
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *GetLock());
   SetCountLocked(self, count_ + delta);
   bool timed_out = false;
   if (count_ != 0) {
     uint32_t timeout_ns = 0;
     uint64_t abs_timeout = NanoTime() + MsToNs(timeout_ms);
     for (;;) {
-      timed_out = condition_.TimedWait(self, timeout_ms, timeout_ns);
+      timed_out = condition_->TimedWait(self, timeout_ms, timeout_ns);
       if (timed_out || count_ == 0) return timed_out;
       // Compute time remaining on timeout.
       uint64_t now = NanoTime();
@@ -91,14 +91,14 @@
 }
 
 int Barrier::GetCount(Thread* self) {
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *GetLock());
   return count_;
 }
 
 void Barrier::SetCountLocked(Thread* self, int count) {
   count_ = count;
   if (count == 0) {
-    condition_.Broadcast(self);
+    condition_->Broadcast(self);
   }
 }
 
diff --git a/runtime/barrier.h b/runtime/barrier.h
index 8a38c4c..e21627e 100644
--- a/runtime/barrier.h
+++ b/runtime/barrier.h
@@ -28,10 +28,14 @@
 #define ART_RUNTIME_BARRIER_H_
 
 #include <memory>
-#include "base/mutex.h"
+
+#include "base/locks.h"
 
 namespace art {
 
+class ConditionVariable;
+class LOCKABLE Mutex;
+
 // TODO: Maybe give this a better name.
 class Barrier {
  public:
@@ -44,10 +48,10 @@
   virtual ~Barrier();
 
   // Pass through the barrier, decrement the count but do not block.
-  void Pass(Thread* self) REQUIRES(!lock_);
+  void Pass(Thread* self) REQUIRES(!GetLock());
 
   // Wait on the barrier, decrement the count.
-  void Wait(Thread* self) REQUIRES(!lock_);
+  void Wait(Thread* self) REQUIRES(!GetLock());
 
   // The following three calls are only safe if we somehow know that no other thread both
   // - has been woken up, and
@@ -58,26 +62,30 @@
   // Increment the count by delta, wait on condition if count is non zero.  If LockHandling is
   // kAllowHoldingLocks we will not check that all locks are released when waiting.
   template <Barrier::LockHandling locks = kDisallowHoldingLocks>
-  void Increment(Thread* self, int delta) REQUIRES(!lock_);
+  void Increment(Thread* self, int delta) REQUIRES(!GetLock());
 
   // Increment the count by delta, wait on condition if count is non zero, with a timeout. Returns
   // true if time out occurred.
-  bool Increment(Thread* self, int delta, uint32_t timeout_ms) REQUIRES(!lock_);
+  bool Increment(Thread* self, int delta, uint32_t timeout_ms) REQUIRES(!GetLock());
 
   // Set the count to a new value.  This should only be used if there is no possibility that
   // another thread is still in Wait().  See above.
-  void Init(Thread* self, int count) REQUIRES(!lock_);
+  void Init(Thread* self, int count) REQUIRES(!GetLock());
 
-  int GetCount(Thread* self) REQUIRES(!lock_);
+  int GetCount(Thread* self) REQUIRES(!GetLock());
 
  private:
-  void SetCountLocked(Thread* self, int count) REQUIRES(lock_);
+  void SetCountLocked(Thread* self, int count) REQUIRES(GetLock());
+
+  Mutex* GetLock() {
+    return lock_.get();
+  }
 
   // Counter, when this reaches 0 all people blocked on the barrier are signalled.
-  int count_ GUARDED_BY(lock_);
+  int count_ GUARDED_BY(GetLock());
 
-  Mutex lock_ ACQUIRED_AFTER(Locks::abort_lock_);
-  ConditionVariable condition_ GUARDED_BY(lock_);
+  std::unique_ptr<Mutex> lock_ ACQUIRED_AFTER(Locks::abort_lock_);
+  std::unique_ptr<ConditionVariable> condition_ GUARDED_BY(GetLock());
 };
 
 }  // namespace art
diff --git a/runtime/base/locks.cc b/runtime/base/locks.cc
new file mode 100644
index 0000000..cfc9f1d
--- /dev/null
+++ b/runtime/base/locks.cc
@@ -0,0 +1,384 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "locks.h"
+
+#include <errno.h>
+#include <sys/time.h>
+
+#include "android-base/logging.h"
+
+#include "base/atomic.h"
+#include "base/logging.h"
+#include "base/systrace.h"
+#include "base/time_utils.h"
+#include "base/value_object.h"
+#include "mutex-inl.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-inl.h"
+
+namespace art {
+
+static Atomic<Locks::ClientCallback*> safe_to_call_abort_callback(nullptr);
+
+Mutex* Locks::abort_lock_ = nullptr;
+Mutex* Locks::alloc_tracker_lock_ = nullptr;
+Mutex* Locks::allocated_monitor_ids_lock_ = nullptr;
+Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
+ReaderWriterMutex* Locks::breakpoint_lock_ = nullptr;
+ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
+Mutex* Locks::custom_tls_lock_ = nullptr;
+Mutex* Locks::deoptimization_lock_ = nullptr;
+ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
+Mutex* Locks::instrument_entrypoints_lock_ = nullptr;
+Mutex* Locks::intern_table_lock_ = nullptr;
+Mutex* Locks::jni_function_table_lock_ = nullptr;
+Mutex* Locks::jni_libraries_lock_ = nullptr;
+Mutex* Locks::logging_lock_ = nullptr;
+Mutex* Locks::modify_ldt_lock_ = nullptr;
+MutatorMutex* Locks::mutator_lock_ = nullptr;
+Mutex* Locks::profiler_lock_ = nullptr;
+ReaderWriterMutex* Locks::verifier_deps_lock_ = nullptr;
+ReaderWriterMutex* Locks::oat_file_manager_lock_ = nullptr;
+Mutex* Locks::host_dlopen_handles_lock_ = nullptr;
+Mutex* Locks::reference_processor_lock_ = nullptr;
+Mutex* Locks::reference_queue_cleared_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_finalizer_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_phantom_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_soft_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_weak_references_lock_ = nullptr;
+Mutex* Locks::runtime_shutdown_lock_ = nullptr;
+Mutex* Locks::cha_lock_ = nullptr;
+Mutex* Locks::subtype_check_lock_ = nullptr;
+Mutex* Locks::thread_list_lock_ = nullptr;
+ConditionVariable* Locks::thread_exit_cond_ = nullptr;
+Mutex* Locks::thread_suspend_count_lock_ = nullptr;
+Mutex* Locks::trace_lock_ = nullptr;
+Mutex* Locks::unexpected_signal_lock_ = nullptr;
+Mutex* Locks::user_code_suspension_lock_ = nullptr;
+Uninterruptible Roles::uninterruptible_;
+ReaderWriterMutex* Locks::jni_globals_lock_ = nullptr;
+Mutex* Locks::jni_weak_globals_lock_ = nullptr;
+ReaderWriterMutex* Locks::dex_lock_ = nullptr;
+Mutex* Locks::native_debug_interface_lock_ = nullptr;
+std::vector<BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_;
+Atomic<const BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_guard_;
+
+// Wait for an amount of time that roughly increases in the argument i.
+// Spin for small arguments and yield/sleep for longer ones.
+static void BackOff(uint32_t i) {
+  static constexpr uint32_t kSpinMax = 10;
+  static constexpr uint32_t kYieldMax = 20;
+  if (i <= kSpinMax) {
+    // TODO: Esp. in very latency-sensitive cases, consider replacing this with an explicit
+    // test-and-test-and-set loop in the caller.  Possibly skip entirely on a uniprocessor.
+    volatile uint32_t x = 0;
+    const uint32_t spin_count = 10 * i;
+    for (uint32_t spin = 0; spin < spin_count; ++spin) {
+      ++x;  // Volatile; hence should not be optimized away.
+    }
+    // TODO: Consider adding x86 PAUSE and/or ARM YIELD here.
+  } else if (i <= kYieldMax) {
+    sched_yield();
+  } else {
+    NanoSleep(1000ull * (i - kYieldMax));
+  }
+}
+
+class Locks::ScopedExpectedMutexesOnWeakRefAccessLock final {
+ public:
+  explicit ScopedExpectedMutexesOnWeakRefAccessLock(const BaseMutex* mutex) : mutex_(mutex) {
+    for (uint32_t i = 0;
+         !Locks::expected_mutexes_on_weak_ref_access_guard_.CompareAndSetWeakAcquire(nullptr,
+                                                                                     mutex);
+         ++i) {
+      BackOff(i);
+    }
+  }
+
+  ~ScopedExpectedMutexesOnWeakRefAccessLock() {
+    DCHECK_EQ(Locks::expected_mutexes_on_weak_ref_access_guard_.load(std::memory_order_relaxed),
+              mutex_);
+    Locks::expected_mutexes_on_weak_ref_access_guard_.store(nullptr, std::memory_order_release);
+  }
+
+ private:
+  const BaseMutex* const mutex_;
+};
+
+void Locks::Init() {
+  if (logging_lock_ != nullptr) {
+    // Already initialized.
+    if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) {
+      DCHECK(modify_ldt_lock_ != nullptr);
+    } else {
+      DCHECK(modify_ldt_lock_ == nullptr);
+    }
+    DCHECK(abort_lock_ != nullptr);
+    DCHECK(alloc_tracker_lock_ != nullptr);
+    DCHECK(allocated_monitor_ids_lock_ != nullptr);
+    DCHECK(allocated_thread_ids_lock_ != nullptr);
+    DCHECK(breakpoint_lock_ != nullptr);
+    DCHECK(classlinker_classes_lock_ != nullptr);
+    DCHECK(custom_tls_lock_ != nullptr);
+    DCHECK(deoptimization_lock_ != nullptr);
+    DCHECK(heap_bitmap_lock_ != nullptr);
+    DCHECK(oat_file_manager_lock_ != nullptr);
+    DCHECK(verifier_deps_lock_ != nullptr);
+    DCHECK(host_dlopen_handles_lock_ != nullptr);
+    DCHECK(intern_table_lock_ != nullptr);
+    DCHECK(jni_function_table_lock_ != nullptr);
+    DCHECK(jni_libraries_lock_ != nullptr);
+    DCHECK(logging_lock_ != nullptr);
+    DCHECK(mutator_lock_ != nullptr);
+    DCHECK(profiler_lock_ != nullptr);
+    DCHECK(cha_lock_ != nullptr);
+    DCHECK(subtype_check_lock_ != nullptr);
+    DCHECK(thread_list_lock_ != nullptr);
+    DCHECK(thread_suspend_count_lock_ != nullptr);
+    DCHECK(trace_lock_ != nullptr);
+    DCHECK(unexpected_signal_lock_ != nullptr);
+    DCHECK(user_code_suspension_lock_ != nullptr);
+    DCHECK(dex_lock_ != nullptr);
+    DCHECK(native_debug_interface_lock_ != nullptr);
+  } else {
+    // Create global locks in level order from highest lock level to lowest.
+    LockLevel current_lock_level = kInstrumentEntrypointsLock;
+    DCHECK(instrument_entrypoints_lock_ == nullptr);
+    instrument_entrypoints_lock_ = new Mutex("instrument entrypoint lock", current_lock_level);
+
+    #define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
+      if ((new_level) >= current_lock_level) { \
+        /* Do not use CHECKs or FATAL here, abort_lock_ is not setup yet. */ \
+        fprintf(stderr, "New local level %d is not less than current level %d\n", \
+                new_level, current_lock_level); \
+        exit(1); \
+      } \
+      current_lock_level = new_level;
+
+    UPDATE_CURRENT_LOCK_LEVEL(kUserCodeSuspensionLock);
+    DCHECK(user_code_suspension_lock_ == nullptr);
+    user_code_suspension_lock_ = new Mutex("user code suspension lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock);
+    DCHECK(mutator_lock_ == nullptr);
+    mutator_lock_ = new MutatorMutex("mutator lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock);
+    DCHECK(heap_bitmap_lock_ == nullptr);
+    heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
+    DCHECK(trace_lock_ == nullptr);
+    trace_lock_ = new Mutex("trace lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
+    DCHECK(runtime_shutdown_lock_ == nullptr);
+    runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
+    DCHECK(profiler_lock_ == nullptr);
+    profiler_lock_ = new Mutex("profiler lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kDeoptimizationLock);
+    DCHECK(deoptimization_lock_ == nullptr);
+    deoptimization_lock_ = new Mutex("Deoptimization lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kAllocTrackerLock);
+    DCHECK(alloc_tracker_lock_ == nullptr);
+    alloc_tracker_lock_ = new Mutex("AllocTracker lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
+    DCHECK(thread_list_lock_ == nullptr);
+    thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kJniLoadLibraryLock);
+    DCHECK(jni_libraries_lock_ == nullptr);
+    jni_libraries_lock_ = new Mutex("JNI shared libraries map lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
+    DCHECK(breakpoint_lock_ == nullptr);
+    breakpoint_lock_ = new ReaderWriterMutex("breakpoint lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kSubtypeCheckLock);
+    DCHECK(subtype_check_lock_ == nullptr);
+    subtype_check_lock_ = new Mutex("SubtypeCheck lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
+    DCHECK(classlinker_classes_lock_ == nullptr);
+    classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
+                                                      current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kMonitorPoolLock);
+    DCHECK(allocated_monitor_ids_lock_ == nullptr);
+    allocated_monitor_ids_lock_ =  new Mutex("allocated monitor ids lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock);
+    DCHECK(allocated_thread_ids_lock_ == nullptr);
+    allocated_thread_ids_lock_ =  new Mutex("allocated thread ids lock", current_lock_level);
+
+    if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) {
+      UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock);
+      DCHECK(modify_ldt_lock_ == nullptr);
+      modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
+    }
+
+    UPDATE_CURRENT_LOCK_LEVEL(kDexLock);
+    DCHECK(dex_lock_ == nullptr);
+    dex_lock_ = new ReaderWriterMutex("ClassLinker dex lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kOatFileManagerLock);
+    DCHECK(oat_file_manager_lock_ == nullptr);
+    oat_file_manager_lock_ = new ReaderWriterMutex("OatFile manager lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kVerifierDepsLock);
+    DCHECK(verifier_deps_lock_ == nullptr);
+    verifier_deps_lock_ = new ReaderWriterMutex("verifier deps lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kHostDlOpenHandlesLock);
+    DCHECK(host_dlopen_handles_lock_ == nullptr);
+    host_dlopen_handles_lock_ = new Mutex("host dlopen handles lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock);
+    DCHECK(intern_table_lock_ == nullptr);
+    intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceProcessorLock);
+    DCHECK(reference_processor_lock_ == nullptr);
+    reference_processor_lock_ = new Mutex("ReferenceProcessor lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueClearedReferencesLock);
+    DCHECK(reference_queue_cleared_references_lock_ == nullptr);
+    reference_queue_cleared_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueWeakReferencesLock);
+    DCHECK(reference_queue_weak_references_lock_ == nullptr);
+    reference_queue_weak_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueFinalizerReferencesLock);
+    DCHECK(reference_queue_finalizer_references_lock_ == nullptr);
+    reference_queue_finalizer_references_lock_ = new Mutex("ReferenceQueue finalizer references lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueuePhantomReferencesLock);
+    DCHECK(reference_queue_phantom_references_lock_ == nullptr);
+    reference_queue_phantom_references_lock_ = new Mutex("ReferenceQueue phantom references lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueSoftReferencesLock);
+    DCHECK(reference_queue_soft_references_lock_ == nullptr);
+    reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kJniGlobalsLock);
+    DCHECK(jni_globals_lock_ == nullptr);
+    jni_globals_lock_ =
+        new ReaderWriterMutex("JNI global reference table lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kJniWeakGlobalsLock);
+    DCHECK(jni_weak_globals_lock_ == nullptr);
+    jni_weak_globals_lock_ = new Mutex("JNI weak global reference table lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kJniFunctionTableLock);
+    DCHECK(jni_function_table_lock_ == nullptr);
+    jni_function_table_lock_ = new Mutex("JNI function table lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kCustomTlsLock);
+    DCHECK(custom_tls_lock_ == nullptr);
+    custom_tls_lock_ = new Mutex("Thread::custom_tls_ lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kCHALock);
+    DCHECK(cha_lock_ == nullptr);
+    cha_lock_ = new Mutex("CHA lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kNativeDebugInterfaceLock);
+    DCHECK(native_debug_interface_lock_ == nullptr);
+    native_debug_interface_lock_ = new Mutex("Native debug interface lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
+    DCHECK(abort_lock_ == nullptr);
+    abort_lock_ = new Mutex("abort lock", current_lock_level, true);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kThreadSuspendCountLock);
+    DCHECK(thread_suspend_count_lock_ == nullptr);
+    thread_suspend_count_lock_ = new Mutex("thread suspend count lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kUnexpectedSignalLock);
+    DCHECK(unexpected_signal_lock_ == nullptr);
+    unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
+    DCHECK(logging_lock_ == nullptr);
+    logging_lock_ = new Mutex("logging lock", current_lock_level, true);
+
+    #undef UPDATE_CURRENT_LOCK_LEVEL
+
+    // List of mutexes that we may hold when accessing a weak ref.
+    AddToExpectedMutexesOnWeakRefAccess(dex_lock_, /*need_lock=*/ false);
+    AddToExpectedMutexesOnWeakRefAccess(classlinker_classes_lock_, /*need_lock=*/ false);
+    AddToExpectedMutexesOnWeakRefAccess(jni_libraries_lock_, /*need_lock=*/ false);
+
+    InitConditions();
+  }
+}
+
+void Locks::InitConditions() {
+  thread_exit_cond_ = new ConditionVariable("thread exit condition variable", *thread_list_lock_);
+}
+
+void Locks::SetClientCallback(ClientCallback* safe_to_call_abort_cb) {
+  safe_to_call_abort_callback.store(safe_to_call_abort_cb, std::memory_order_release);
+}
+
+// Helper to allow checking shutdown while ignoring locking requirements.
+bool Locks::IsSafeToCallAbortRacy() {
+  Locks::ClientCallback* safe_to_call_abort_cb =
+      safe_to_call_abort_callback.load(std::memory_order_acquire);
+  return safe_to_call_abort_cb != nullptr && safe_to_call_abort_cb();
+}
+
+void Locks::AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock) {
+  if (need_lock) {
+    ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
+    mutex->SetShouldRespondToEmptyCheckpointRequest(true);
+    expected_mutexes_on_weak_ref_access_.push_back(mutex);
+  } else {
+    mutex->SetShouldRespondToEmptyCheckpointRequest(true);
+    expected_mutexes_on_weak_ref_access_.push_back(mutex);
+  }
+}
+
+void Locks::RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock) {
+  if (need_lock) {
+    ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
+    mutex->SetShouldRespondToEmptyCheckpointRequest(false);
+    std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
+    auto it = std::find(list.begin(), list.end(), mutex);
+    DCHECK(it != list.end());
+    list.erase(it);
+  } else {
+    mutex->SetShouldRespondToEmptyCheckpointRequest(false);
+    std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
+    auto it = std::find(list.begin(), list.end(), mutex);
+    DCHECK(it != list.end());
+    list.erase(it);
+  }
+}
+
+bool Locks::IsExpectedOnWeakRefAccess(BaseMutex* mutex) {
+  ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
+  std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
+  return std::find(list.begin(), list.end(), mutex) != list.end();
+}
+
+}  // namespace art
diff --git a/runtime/base/locks.h b/runtime/base/locks.h
new file mode 100644
index 0000000..8cbe372
--- /dev/null
+++ b/runtime/base/locks.h
@@ -0,0 +1,364 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_LOCKS_H_
+#define ART_RUNTIME_BASE_LOCKS_H_
+
+#include <stdint.h>
+
+#include <iosfwd>
+#include <vector>
+
+#include "base/atomic.h"
+#include "base/macros.h"
+
+namespace art {
+
+class BaseMutex;
+class ConditionVariable;
+class SHARED_LOCKABLE ReaderWriterMutex;
+class SHARED_LOCKABLE MutatorMutex;
+class LOCKABLE Mutex;
+class Thread;
+
+// LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
+// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
+// partial ordering and thereby cause deadlock situations to fail checks.
+//
+// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
+enum LockLevel : uint8_t {
+  kLoggingLock = 0,
+  kSwapMutexesLock,
+  kUnexpectedSignalLock,
+  kThreadSuspendCountLock,
+  kAbortLock,
+  kNativeDebugInterfaceLock,
+  kSignalHandlingLock,
+  // A generic lock level for mutexs that should not allow any additional mutexes to be gained after
+  // acquiring it.
+  kGenericBottomLock,
+  // Tracks the second acquisition at the same lock level for kThreadWaitLock. This is an exception
+  // to the normal lock ordering, used to implement Monitor::Wait - while holding one kThreadWait
+  // level lock, it is permitted to acquire a second one - with internal safeguards to ensure that
+  // the second lock acquisition does not result in deadlock. This is implemented in the lock
+  // order by treating the second acquisition of a kThreadWaitLock as a kThreadWaitWakeLock
+  // acquisition. Thus, acquiring kThreadWaitWakeLock requires holding kThreadWaitLock. This entry
+  // is here near the bottom of the hierarchy because other locks should not be
+  // acquired while it is held. kThreadWaitLock cannot be moved here because GC
+  // activity acquires locks while holding the wait lock.
+  kThreadWaitWakeLock,
+  kJdwpAdbStateLock,
+  kJdwpSocketLock,
+  kRegionSpaceRegionLock,
+  kMarkSweepMarkStackLock,
+  // Can be held while GC related work is done, and thus must be above kMarkSweepMarkStackLock
+  kThreadWaitLock,
+  kCHALock,
+  kJitCodeCacheLock,
+  kRosAllocGlobalLock,
+  kRosAllocBracketLock,
+  kRosAllocBulkFreeLock,
+  kTaggingLockLevel,
+  kTransactionLogLock,
+  kCustomTlsLock,
+  kJniFunctionTableLock,
+  kJniWeakGlobalsLock,
+  kJniGlobalsLock,
+  kReferenceQueueSoftReferencesLock,
+  kReferenceQueuePhantomReferencesLock,
+  kReferenceQueueFinalizerReferencesLock,
+  kReferenceQueueWeakReferencesLock,
+  kReferenceQueueClearedReferencesLock,
+  kReferenceProcessorLock,
+  kJitDebugInterfaceLock,
+  kAllocSpaceLock,
+  kBumpPointerSpaceBlockLock,
+  kArenaPoolLock,
+  kInternTableLock,
+  kOatFileSecondaryLookupLock,
+  kHostDlOpenHandlesLock,
+  kVerifierDepsLock,
+  kOatFileManagerLock,
+  kTracingUniqueMethodsLock,
+  kTracingStreamingLock,
+  kClassLoaderClassesLock,
+  kDefaultMutexLevel,
+  kDexLock,
+  kMarkSweepLargeObjectLock,
+  kJdwpObjectRegistryLock,
+  kModifyLdtLock,
+  kAllocatedThreadIdsLock,
+  kMonitorPoolLock,
+  kClassLinkerClassesLock,  // TODO rename.
+  kDexToDexCompilerLock,
+  kSubtypeCheckLock,
+  kBreakpointLock,
+  kMonitorLock,
+  kMonitorListLock,
+  kJniLoadLibraryLock,
+  kThreadListLock,
+  kAllocTrackerLock,
+  kDeoptimizationLock,
+  kProfilerLock,
+  kJdwpShutdownLock,
+  kJdwpEventListLock,
+  kJdwpAttachLock,
+  kJdwpStartLock,
+  kRuntimeShutdownLock,
+  kTraceLock,
+  kHeapBitmapLock,
+  kMutatorLock,
+  kUserCodeSuspensionLock,
+  kInstrumentEntrypointsLock,
+  kZygoteCreationLock,
+
+  // The highest valid lock level. Use this if there is code that should only be called with no
+  // other locks held. Since this is the highest lock level we also allow it to be held even if the
+  // runtime or current thread is not fully set-up yet (for example during thread attach). Note that
+  // this lock also has special behavior around the mutator_lock_. Since the mutator_lock_ is not
+  // really a 'real' lock we allow this to be locked when the mutator_lock_ is held exclusive.
+  // Furthermore, the mutator_lock_ may not be acquired in any form when a lock of this level is
+  // held. Since the mutator_lock_ being held strong means that all other threads are suspended this
+  // will prevent deadlocks while still allowing this lock level to function as a "highest" level.
+  kTopLockLevel,
+
+  kLockLevelCount  // Must come last.
+};
+std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
+
+// For StartNoThreadSuspension and EndNoThreadSuspension.
+class CAPABILITY("role") Role {
+ public:
+  void Acquire() ACQUIRE() {}
+  void Release() RELEASE() {}
+  const Role& operator!() const { return *this; }
+};
+
+class Uninterruptible : public Role {
+};
+
+// Global mutexes corresponding to the levels above.
+class Locks {
+ public:
+  static void Init();
+  static void InitConditions() NO_THREAD_SAFETY_ANALYSIS;  // Condition variables.
+
+  // Destroying various lock types can emit errors that vary depending upon
+  // whether the client (art::Runtime) is currently active.  Allow the client
+  // to set a callback that is used to check when it is acceptable to call
+  // Abort.  The default behavior is that the client *is not* able to call
+  // Abort if no callback is established.
+  using ClientCallback = bool();
+  static void SetClientCallback(ClientCallback* is_safe_to_call_abort_cb) NO_THREAD_SAFETY_ANALYSIS;
+  // Checks for whether it is safe to call Abort() without using locks.
+  static bool IsSafeToCallAbortRacy() NO_THREAD_SAFETY_ANALYSIS;
+
+  // Add a mutex to expected_mutexes_on_weak_ref_access_.
+  static void AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
+  // Remove a mutex from expected_mutexes_on_weak_ref_access_.
+  static void RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
+  // Check if the given mutex is in expected_mutexes_on_weak_ref_access_.
+  static bool IsExpectedOnWeakRefAccess(BaseMutex* mutex);
+
+  // Guards allocation entrypoint instrumenting.
+  static Mutex* instrument_entrypoints_lock_;
+
+  // Guards code that deals with user-code suspension. This mutex must be held when suspending or
+  // resuming threads with SuspendReason::kForUserCode. It may be held by a suspended thread, but
+  // only if the suspension is not due to SuspendReason::kForUserCode.
+  static Mutex* user_code_suspension_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
+
+  // A barrier is used to synchronize the GC/Debugger thread with mutator threads. When GC/Debugger
+  // thread wants to suspend all mutator threads, it needs to wait for all mutator threads to pass
+  // a barrier. Threads that are already suspended will get their barrier passed by the GC/Debugger
+  // thread; threads in the runnable state will pass the barrier when they transit to the suspended
+  // state. GC/Debugger thread will be woken up when all mutator threads are suspended.
+  //
+  // Thread suspension:
+  // mutator thread                                | GC/Debugger
+  //   .. running ..                               |   .. running ..
+  //   .. running ..                               | Request thread suspension by:
+  //   .. running ..                               |   - acquiring thread_suspend_count_lock_
+  //   .. running ..                               |   - incrementing Thread::suspend_count_ on
+  //   .. running ..                               |     all mutator threads
+  //   .. running ..                               |   - releasing thread_suspend_count_lock_
+  //   .. running ..                               | Block wait for all threads to pass a barrier
+  // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
+  // suspend code.                                 |   .. blocked ..
+  // Change state to kSuspended (pass the barrier) | Wake up when all threads pass the barrier
+  // x: Acquire thread_suspend_count_lock_         |   .. running ..
+  // while Thread::suspend_count_ > 0              |   .. running ..
+  //   - wait on Thread::resume_cond_              |   .. running ..
+  //     (releases thread_suspend_count_lock_)     |   .. running ..
+  //   .. waiting ..                               | Request thread resumption by:
+  //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
+  //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
+  //   .. waiting ..                               |     all mutator threads
+  //   .. waiting ..                               |   - notifying on Thread::resume_cond_
+  //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
+  // Release thread_suspend_count_lock_            |  .. running ..
+  // Change to kRunnable                           |  .. running ..
+  //  - this uses a CAS operation to ensure the    |  .. running ..
+  //    suspend request flag isn't raised as the   |  .. running ..
+  //    state is changed                           |  .. running ..
+  //  - if the CAS operation fails then goto x     |  .. running ..
+  //  .. running ..                                |  .. running ..
+  static MutatorMutex* mutator_lock_ ACQUIRED_AFTER(user_code_suspension_lock_);
+
+  // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
+  static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
+
+  // Guards shutdown of the runtime.
+  static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
+
+  // Guards background profiler global state.
+  static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
+
+  // Guards trace (ie traceview) requests.
+  static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
+
+  // Guards debugger recent allocation records.
+  static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
+
+  // Guards updates to instrumentation to ensure mutual exclusion of
+  // events like deoptimization requests.
+  // TODO: improve name, perhaps instrumentation_update_lock_.
+  static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
+
+  // Guard the update of the SubtypeCheck data stores in each Class::status_ field.
+  // This lock is used in SubtypeCheck methods which are the interface for
+  // any SubtypeCheck-mutating methods.
+  // In Class::IsSubClass, the lock is not required since it does not update the SubtypeCheck data.
+  static Mutex* subtype_check_lock_ ACQUIRED_AFTER(deoptimization_lock_);
+
+  // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
+  // attaching and detaching.
+  static Mutex* thread_list_lock_ ACQUIRED_AFTER(subtype_check_lock_);
+
+  // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
+  static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
+
+  // Guards maintaining loading library data structures.
+  static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
+
+  // Guards breakpoints.
+  static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
+
+  // Guards lists of classes within the class linker.
+  static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
+
+  // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
+  // doesn't try to hold a higher level Mutex.
+  #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::classlinker_classes_lock_)
+
+  static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
+
+  // Guard the allocation/deallocation of thread ids.
+  static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
+
+  // Guards modification of the LDT on x86.
+  static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
+
+  static ReaderWriterMutex* dex_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
+
+  // Guards opened oat files in OatFileManager.
+  static ReaderWriterMutex* oat_file_manager_lock_ ACQUIRED_AFTER(dex_lock_);
+
+  // Guards extra string entries for VerifierDeps.
+  static ReaderWriterMutex* verifier_deps_lock_ ACQUIRED_AFTER(oat_file_manager_lock_);
+
+  // Guards dlopen_handles_ in DlOpenOatFile.
+  static Mutex* host_dlopen_handles_lock_ ACQUIRED_AFTER(verifier_deps_lock_);
+
+  // Guards intern table.
+  static Mutex* intern_table_lock_ ACQUIRED_AFTER(host_dlopen_handles_lock_);
+
+  // Guards reference processor.
+  static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
+
+  // Guards cleared references queue.
+  static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
+
+  // Guards weak references queue.
+  static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
+
+  // Guards finalizer references queue.
+  static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
+
+  // Guards phantom references queue.
+  static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
+
+  // Guards soft references queue.
+  static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
+
+  // Guard accesses to the JNI Global Reference table.
+  static ReaderWriterMutex* jni_globals_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
+
+  // Guard accesses to the JNI Weak Global Reference table.
+  static Mutex* jni_weak_globals_lock_ ACQUIRED_AFTER(jni_globals_lock_);
+
+  // Guard accesses to the JNI function table override.
+  static Mutex* jni_function_table_lock_ ACQUIRED_AFTER(jni_weak_globals_lock_);
+
+  // Guard accesses to the Thread::custom_tls_. We use this to allow the TLS of other threads to be
+  // read (the reader must hold the ThreadListLock or have some other way of ensuring the thread
+  // will not die in that case though). This is useful for (eg) the implementation of
+  // GetThreadLocalStorage.
+  static Mutex* custom_tls_lock_ ACQUIRED_AFTER(jni_function_table_lock_);
+
+  // Guards Class Hierarchy Analysis (CHA).
+  static Mutex* cha_lock_ ACQUIRED_AFTER(custom_tls_lock_);
+
+  // When declaring any Mutex add BOTTOM_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
+  // doesn't try to acquire a higher level Mutex. NB Due to the way the annotalysis works this
+  // actually only encodes the mutex being below jni_function_table_lock_ although having
+  // kGenericBottomLock level is lower than this.
+  #define BOTTOM_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::cha_lock_)
+
+  // Have an exclusive aborting thread.
+  static Mutex* abort_lock_ ACQUIRED_AFTER(custom_tls_lock_);
+
+  // Allow mutual exclusion when manipulating Thread::suspend_count_.
+  // TODO: Does the trade-off of a per-thread lock make sense?
+  static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
+
+  // One unexpected signal at a time lock.
+  static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
+
+  // Guards the magic global variables used by native tools (e.g. libunwind).
+  static Mutex* native_debug_interface_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
+
+  // Have an exclusive logging thread.
+  static Mutex* logging_lock_ ACQUIRED_AFTER(native_debug_interface_lock_);
+
+  // List of mutexes that we expect a thread may hold when accessing weak refs. This is used to
+  // avoid a deadlock in the empty checkpoint while weak ref access is disabled (b/34964016). If we
+  // encounter an unexpected mutex on accessing weak refs,
+  // Thread::CheckEmptyCheckpointFromWeakRefAccess will detect it.
+  static std::vector<BaseMutex*> expected_mutexes_on_weak_ref_access_;
+  static Atomic<const BaseMutex*> expected_mutexes_on_weak_ref_access_guard_;
+  class ScopedExpectedMutexesOnWeakRefAccessLock;
+};
+
+class Roles {
+ public:
+  // Uninterruptible means that the thread may not become suspended.
+  static Uninterruptible uninterruptible_;
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_BASE_LOCKS_H_
diff --git a/runtime/base/mutator_locked_dumpable.h b/runtime/base/mutator_locked_dumpable.h
index cf2199c..afbd732 100644
--- a/runtime/base/mutator_locked_dumpable.h
+++ b/runtime/base/mutator_locked_dumpable.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_BASE_MUTATOR_LOCKED_DUMPABLE_H_
 #define ART_RUNTIME_BASE_MUTATOR_LOCKED_DUMPABLE_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "thread-current-inl.h"
 
 namespace art {
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index ca2ed80..5a52818 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -34,51 +34,6 @@
 
 using android::base::StringPrintf;
 
-static Atomic<Locks::ClientCallback*> safe_to_call_abort_callback(nullptr);
-
-Mutex* Locks::abort_lock_ = nullptr;
-Mutex* Locks::alloc_tracker_lock_ = nullptr;
-Mutex* Locks::allocated_monitor_ids_lock_ = nullptr;
-Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
-ReaderWriterMutex* Locks::breakpoint_lock_ = nullptr;
-ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
-Mutex* Locks::custom_tls_lock_ = nullptr;
-Mutex* Locks::deoptimization_lock_ = nullptr;
-ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
-Mutex* Locks::instrument_entrypoints_lock_ = nullptr;
-Mutex* Locks::intern_table_lock_ = nullptr;
-Mutex* Locks::jni_function_table_lock_ = nullptr;
-Mutex* Locks::jni_libraries_lock_ = nullptr;
-Mutex* Locks::logging_lock_ = nullptr;
-Mutex* Locks::modify_ldt_lock_ = nullptr;
-MutatorMutex* Locks::mutator_lock_ = nullptr;
-Mutex* Locks::profiler_lock_ = nullptr;
-ReaderWriterMutex* Locks::verifier_deps_lock_ = nullptr;
-ReaderWriterMutex* Locks::oat_file_manager_lock_ = nullptr;
-Mutex* Locks::host_dlopen_handles_lock_ = nullptr;
-Mutex* Locks::reference_processor_lock_ = nullptr;
-Mutex* Locks::reference_queue_cleared_references_lock_ = nullptr;
-Mutex* Locks::reference_queue_finalizer_references_lock_ = nullptr;
-Mutex* Locks::reference_queue_phantom_references_lock_ = nullptr;
-Mutex* Locks::reference_queue_soft_references_lock_ = nullptr;
-Mutex* Locks::reference_queue_weak_references_lock_ = nullptr;
-Mutex* Locks::runtime_shutdown_lock_ = nullptr;
-Mutex* Locks::cha_lock_ = nullptr;
-Mutex* Locks::subtype_check_lock_ = nullptr;
-Mutex* Locks::thread_list_lock_ = nullptr;
-ConditionVariable* Locks::thread_exit_cond_ = nullptr;
-Mutex* Locks::thread_suspend_count_lock_ = nullptr;
-Mutex* Locks::trace_lock_ = nullptr;
-Mutex* Locks::unexpected_signal_lock_ = nullptr;
-Mutex* Locks::user_code_suspension_lock_ = nullptr;
-Uninterruptible Roles::uninterruptible_;
-ReaderWriterMutex* Locks::jni_globals_lock_ = nullptr;
-Mutex* Locks::jni_weak_globals_lock_ = nullptr;
-ReaderWriterMutex* Locks::dex_lock_ = nullptr;
-Mutex* Locks::native_debug_interface_lock_ = nullptr;
-std::vector<BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_;
-Atomic<const BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_guard_;
-
 struct AllMutexData {
   // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
   Atomic<const BaseMutex*> all_mutexes_guard;
@@ -144,27 +99,6 @@
   const BaseMutex* const mutex_;
 };
 
-class Locks::ScopedExpectedMutexesOnWeakRefAccessLock final {
- public:
-  explicit ScopedExpectedMutexesOnWeakRefAccessLock(const BaseMutex* mutex) : mutex_(mutex) {
-    for (uint32_t i = 0;
-         !Locks::expected_mutexes_on_weak_ref_access_guard_.CompareAndSetWeakAcquire(nullptr,
-                                                                                     mutex);
-         ++i) {
-      BackOff(i);
-    }
-  }
-
-  ~ScopedExpectedMutexesOnWeakRefAccessLock() {
-    DCHECK_EQ(Locks::expected_mutexes_on_weak_ref_access_guard_.load(std::memory_order_relaxed),
-              mutex_);
-    Locks::expected_mutexes_on_weak_ref_access_guard_.store(nullptr, std::memory_order_release);
-  }
-
- private:
-  const BaseMutex* const mutex_;
-};
-
 // Scoped class that generates events at the beginning and end of lock contention.
 class ScopedContentionRecorder final : public ValueObject {
  public:
@@ -1042,266 +976,4 @@
   return timed_out;
 }
 
-void Locks::Init() {
-  if (logging_lock_ != nullptr) {
-    // Already initialized.
-    if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) {
-      DCHECK(modify_ldt_lock_ != nullptr);
-    } else {
-      DCHECK(modify_ldt_lock_ == nullptr);
-    }
-    DCHECK(abort_lock_ != nullptr);
-    DCHECK(alloc_tracker_lock_ != nullptr);
-    DCHECK(allocated_monitor_ids_lock_ != nullptr);
-    DCHECK(allocated_thread_ids_lock_ != nullptr);
-    DCHECK(breakpoint_lock_ != nullptr);
-    DCHECK(classlinker_classes_lock_ != nullptr);
-    DCHECK(custom_tls_lock_ != nullptr);
-    DCHECK(deoptimization_lock_ != nullptr);
-    DCHECK(heap_bitmap_lock_ != nullptr);
-    DCHECK(oat_file_manager_lock_ != nullptr);
-    DCHECK(verifier_deps_lock_ != nullptr);
-    DCHECK(host_dlopen_handles_lock_ != nullptr);
-    DCHECK(intern_table_lock_ != nullptr);
-    DCHECK(jni_function_table_lock_ != nullptr);
-    DCHECK(jni_libraries_lock_ != nullptr);
-    DCHECK(logging_lock_ != nullptr);
-    DCHECK(mutator_lock_ != nullptr);
-    DCHECK(profiler_lock_ != nullptr);
-    DCHECK(cha_lock_ != nullptr);
-    DCHECK(subtype_check_lock_ != nullptr);
-    DCHECK(thread_list_lock_ != nullptr);
-    DCHECK(thread_suspend_count_lock_ != nullptr);
-    DCHECK(trace_lock_ != nullptr);
-    DCHECK(unexpected_signal_lock_ != nullptr);
-    DCHECK(user_code_suspension_lock_ != nullptr);
-    DCHECK(dex_lock_ != nullptr);
-    DCHECK(native_debug_interface_lock_ != nullptr);
-  } else {
-    // Create global locks in level order from highest lock level to lowest.
-    LockLevel current_lock_level = kInstrumentEntrypointsLock;
-    DCHECK(instrument_entrypoints_lock_ == nullptr);
-    instrument_entrypoints_lock_ = new Mutex("instrument entrypoint lock", current_lock_level);
-
-    #define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
-      if ((new_level) >= current_lock_level) { \
-        /* Do not use CHECKs or FATAL here, abort_lock_ is not setup yet. */ \
-        fprintf(stderr, "New local level %d is not less than current level %d\n", \
-                new_level, current_lock_level); \
-        exit(1); \
-      } \
-      current_lock_level = new_level;
-
-    UPDATE_CURRENT_LOCK_LEVEL(kUserCodeSuspensionLock);
-    DCHECK(user_code_suspension_lock_ == nullptr);
-    user_code_suspension_lock_ = new Mutex("user code suspension lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock);
-    DCHECK(mutator_lock_ == nullptr);
-    mutator_lock_ = new MutatorMutex("mutator lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock);
-    DCHECK(heap_bitmap_lock_ == nullptr);
-    heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
-    DCHECK(trace_lock_ == nullptr);
-    trace_lock_ = new Mutex("trace lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
-    DCHECK(runtime_shutdown_lock_ == nullptr);
-    runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
-    DCHECK(profiler_lock_ == nullptr);
-    profiler_lock_ = new Mutex("profiler lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kDeoptimizationLock);
-    DCHECK(deoptimization_lock_ == nullptr);
-    deoptimization_lock_ = new Mutex("Deoptimization lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kAllocTrackerLock);
-    DCHECK(alloc_tracker_lock_ == nullptr);
-    alloc_tracker_lock_ = new Mutex("AllocTracker lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
-    DCHECK(thread_list_lock_ == nullptr);
-    thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kJniLoadLibraryLock);
-    DCHECK(jni_libraries_lock_ == nullptr);
-    jni_libraries_lock_ = new Mutex("JNI shared libraries map lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
-    DCHECK(breakpoint_lock_ == nullptr);
-    breakpoint_lock_ = new ReaderWriterMutex("breakpoint lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kSubtypeCheckLock);
-    DCHECK(subtype_check_lock_ == nullptr);
-    subtype_check_lock_ = new Mutex("SubtypeCheck lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
-    DCHECK(classlinker_classes_lock_ == nullptr);
-    classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
-                                                      current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kMonitorPoolLock);
-    DCHECK(allocated_monitor_ids_lock_ == nullptr);
-    allocated_monitor_ids_lock_ =  new Mutex("allocated monitor ids lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock);
-    DCHECK(allocated_thread_ids_lock_ == nullptr);
-    allocated_thread_ids_lock_ =  new Mutex("allocated thread ids lock", current_lock_level);
-
-    if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) {
-      UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock);
-      DCHECK(modify_ldt_lock_ == nullptr);
-      modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
-    }
-
-    UPDATE_CURRENT_LOCK_LEVEL(kDexLock);
-    DCHECK(dex_lock_ == nullptr);
-    dex_lock_ = new ReaderWriterMutex("ClassLinker dex lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kOatFileManagerLock);
-    DCHECK(oat_file_manager_lock_ == nullptr);
-    oat_file_manager_lock_ = new ReaderWriterMutex("OatFile manager lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kVerifierDepsLock);
-    DCHECK(verifier_deps_lock_ == nullptr);
-    verifier_deps_lock_ = new ReaderWriterMutex("verifier deps lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kHostDlOpenHandlesLock);
-    DCHECK(host_dlopen_handles_lock_ == nullptr);
-    host_dlopen_handles_lock_ = new Mutex("host dlopen handles lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock);
-    DCHECK(intern_table_lock_ == nullptr);
-    intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kReferenceProcessorLock);
-    DCHECK(reference_processor_lock_ == nullptr);
-    reference_processor_lock_ = new Mutex("ReferenceProcessor lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueClearedReferencesLock);
-    DCHECK(reference_queue_cleared_references_lock_ == nullptr);
-    reference_queue_cleared_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueWeakReferencesLock);
-    DCHECK(reference_queue_weak_references_lock_ == nullptr);
-    reference_queue_weak_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueFinalizerReferencesLock);
-    DCHECK(reference_queue_finalizer_references_lock_ == nullptr);
-    reference_queue_finalizer_references_lock_ = new Mutex("ReferenceQueue finalizer references lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueuePhantomReferencesLock);
-    DCHECK(reference_queue_phantom_references_lock_ == nullptr);
-    reference_queue_phantom_references_lock_ = new Mutex("ReferenceQueue phantom references lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueSoftReferencesLock);
-    DCHECK(reference_queue_soft_references_lock_ == nullptr);
-    reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kJniGlobalsLock);
-    DCHECK(jni_globals_lock_ == nullptr);
-    jni_globals_lock_ =
-        new ReaderWriterMutex("JNI global reference table lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kJniWeakGlobalsLock);
-    DCHECK(jni_weak_globals_lock_ == nullptr);
-    jni_weak_globals_lock_ = new Mutex("JNI weak global reference table lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kJniFunctionTableLock);
-    DCHECK(jni_function_table_lock_ == nullptr);
-    jni_function_table_lock_ = new Mutex("JNI function table lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kCustomTlsLock);
-    DCHECK(custom_tls_lock_ == nullptr);
-    custom_tls_lock_ = new Mutex("Thread::custom_tls_ lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kCHALock);
-    DCHECK(cha_lock_ == nullptr);
-    cha_lock_ = new Mutex("CHA lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kNativeDebugInterfaceLock);
-    DCHECK(native_debug_interface_lock_ == nullptr);
-    native_debug_interface_lock_ = new Mutex("Native debug interface lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
-    DCHECK(abort_lock_ == nullptr);
-    abort_lock_ = new Mutex("abort lock", current_lock_level, true);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kThreadSuspendCountLock);
-    DCHECK(thread_suspend_count_lock_ == nullptr);
-    thread_suspend_count_lock_ = new Mutex("thread suspend count lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kUnexpectedSignalLock);
-    DCHECK(unexpected_signal_lock_ == nullptr);
-    unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
-    DCHECK(logging_lock_ == nullptr);
-    logging_lock_ = new Mutex("logging lock", current_lock_level, true);
-
-    #undef UPDATE_CURRENT_LOCK_LEVEL
-
-    // List of mutexes that we may hold when accessing a weak ref.
-    AddToExpectedMutexesOnWeakRefAccess(dex_lock_, /*need_lock=*/ false);
-    AddToExpectedMutexesOnWeakRefAccess(classlinker_classes_lock_, /*need_lock=*/ false);
-    AddToExpectedMutexesOnWeakRefAccess(jni_libraries_lock_, /*need_lock=*/ false);
-
-    InitConditions();
-  }
-}
-
-void Locks::InitConditions() {
-  thread_exit_cond_ = new ConditionVariable("thread exit condition variable", *thread_list_lock_);
-}
-
-void Locks::SetClientCallback(ClientCallback* safe_to_call_abort_cb) {
-  safe_to_call_abort_callback.store(safe_to_call_abort_cb, std::memory_order_release);
-}
-
-// Helper to allow checking shutdown while ignoring locking requirements.
-bool Locks::IsSafeToCallAbortRacy() {
-  Locks::ClientCallback* safe_to_call_abort_cb =
-      safe_to_call_abort_callback.load(std::memory_order_acquire);
-  return safe_to_call_abort_cb != nullptr && safe_to_call_abort_cb();
-}
-
-void Locks::AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock) {
-  if (need_lock) {
-    ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
-    mutex->SetShouldRespondToEmptyCheckpointRequest(true);
-    expected_mutexes_on_weak_ref_access_.push_back(mutex);
-  } else {
-    mutex->SetShouldRespondToEmptyCheckpointRequest(true);
-    expected_mutexes_on_weak_ref_access_.push_back(mutex);
-  }
-}
-
-void Locks::RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock) {
-  if (need_lock) {
-    ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
-    mutex->SetShouldRespondToEmptyCheckpointRequest(false);
-    std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
-    auto it = std::find(list.begin(), list.end(), mutex);
-    DCHECK(it != list.end());
-    list.erase(it);
-  } else {
-    mutex->SetShouldRespondToEmptyCheckpointRequest(false);
-    std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
-    auto it = std::find(list.begin(), list.end(), mutex);
-    DCHECK(it != list.end());
-    list.erase(it);
-  }
-}
-
-bool Locks::IsExpectedOnWeakRefAccess(BaseMutex* mutex) {
-  ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
-  std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
-  return std::find(list.begin(), list.end(), mutex) != list.end();
-}
-
 }  // namespace art
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index e391a15..41a47af 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -30,6 +30,7 @@
 #include "base/atomic.h"
 #include "base/globals.h"
 #include "base/macros.h"
+#include "locks.h"
 
 #if defined(__linux__)
 #define ART_USE_FUTEXES 1
@@ -50,112 +51,7 @@
 class SHARED_LOCKABLE MutatorMutex;
 class ScopedContentionRecorder;
 class Thread;
-class Mutex;
-
-// LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
-// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
-// partial ordering and thereby cause deadlock situations to fail checks.
-//
-// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
-enum LockLevel : uint8_t {
-  kLoggingLock = 0,
-  kSwapMutexesLock,
-  kUnexpectedSignalLock,
-  kThreadSuspendCountLock,
-  kAbortLock,
-  kNativeDebugInterfaceLock,
-  kSignalHandlingLock,
-  // A generic lock level for mutexs that should not allow any additional mutexes to be gained after
-  // acquiring it.
-  kGenericBottomLock,
-  // Tracks the second acquisition at the same lock level for kThreadWaitLock. This is an exception
-  // to the normal lock ordering, used to implement Monitor::Wait - while holding one kThreadWait
-  // level lock, it is permitted to acquire a second one - with internal safeguards to ensure that
-  // the second lock acquisition does not result in deadlock. This is implemented in the lock
-  // order by treating the second acquisition of a kThreadWaitLock as a kThreadWaitWakeLock
-  // acquisition. Thus, acquiring kThreadWaitWakeLock requires holding kThreadWaitLock. This entry
-  // is here near the bottom of the hierarchy because other locks should not be
-  // acquired while it is held. kThreadWaitLock cannot be moved here because GC
-  // activity acquires locks while holding the wait lock.
-  kThreadWaitWakeLock,
-  kJdwpAdbStateLock,
-  kJdwpSocketLock,
-  kRegionSpaceRegionLock,
-  kMarkSweepMarkStackLock,
-  // Can be held while GC related work is done, and thus must be above kMarkSweepMarkStackLock
-  kThreadWaitLock,
-  kCHALock,
-  kJitCodeCacheLock,
-  kRosAllocGlobalLock,
-  kRosAllocBracketLock,
-  kRosAllocBulkFreeLock,
-  kTaggingLockLevel,
-  kTransactionLogLock,
-  kCustomTlsLock,
-  kJniFunctionTableLock,
-  kJniWeakGlobalsLock,
-  kJniGlobalsLock,
-  kReferenceQueueSoftReferencesLock,
-  kReferenceQueuePhantomReferencesLock,
-  kReferenceQueueFinalizerReferencesLock,
-  kReferenceQueueWeakReferencesLock,
-  kReferenceQueueClearedReferencesLock,
-  kReferenceProcessorLock,
-  kJitDebugInterfaceLock,
-  kAllocSpaceLock,
-  kBumpPointerSpaceBlockLock,
-  kArenaPoolLock,
-  kInternTableLock,
-  kOatFileSecondaryLookupLock,
-  kHostDlOpenHandlesLock,
-  kVerifierDepsLock,
-  kOatFileManagerLock,
-  kTracingUniqueMethodsLock,
-  kTracingStreamingLock,
-  kClassLoaderClassesLock,
-  kDefaultMutexLevel,
-  kDexLock,
-  kMarkSweepLargeObjectLock,
-  kJdwpObjectRegistryLock,
-  kModifyLdtLock,
-  kAllocatedThreadIdsLock,
-  kMonitorPoolLock,
-  kClassLinkerClassesLock,  // TODO rename.
-  kDexToDexCompilerLock,
-  kSubtypeCheckLock,
-  kBreakpointLock,
-  kMonitorLock,
-  kMonitorListLock,
-  kJniLoadLibraryLock,
-  kThreadListLock,
-  kAllocTrackerLock,
-  kDeoptimizationLock,
-  kProfilerLock,
-  kJdwpShutdownLock,
-  kJdwpEventListLock,
-  kJdwpAttachLock,
-  kJdwpStartLock,
-  kRuntimeShutdownLock,
-  kTraceLock,
-  kHeapBitmapLock,
-  kMutatorLock,
-  kUserCodeSuspensionLock,
-  kInstrumentEntrypointsLock,
-  kZygoteCreationLock,
-
-  // The highest valid lock level. Use this if there is code that should only be called with no
-  // other locks held. Since this is the highest lock level we also allow it to be held even if the
-  // runtime or current thread is not fully set-up yet (for example during thread attach). Note that
-  // this lock also has special behavior around the mutator_lock_. Since the mutator_lock_ is not
-  // really a 'real' lock we allow this to be locked when the mutator_lock_ is held exclusive.
-  // Furthermore, the mutator_lock_ may not be acquired in any form when a lock of this level is
-  // held. Since the mutator_lock_ being held strong means that all other threads are suspended this
-  // will prevent deadlocks while still allowing this lock level to function as a "highest" level.
-  kTopLockLevel,
-
-  kLockLevelCount  // Must come last.
-};
-std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
+class LOCKABLE Mutex;
 
 constexpr bool kDebugLocking = kIsDebugBuild;
 
@@ -578,226 +474,6 @@
   DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
 };
 
-// For StartNoThreadSuspension and EndNoThreadSuspension.
-class CAPABILITY("role") Role {
- public:
-  void Acquire() ACQUIRE() {}
-  void Release() RELEASE() {}
-  const Role& operator!() const { return *this; }
-};
-
-class Uninterruptible : public Role {
-};
-
-// Global mutexes corresponding to the levels above.
-class Locks {
- public:
-  static void Init();
-  static void InitConditions() NO_THREAD_SAFETY_ANALYSIS;  // Condition variables.
-
-  // Destroying various lock types can emit errors that vary depending upon
-  // whether the client (art::Runtime) is currently active.  Allow the client
-  // to set a callback that is used to check when it is acceptable to call
-  // Abort.  The default behavior is that the client *is not* able to call
-  // Abort if no callback is established.
-  using ClientCallback = bool();
-  static void SetClientCallback(ClientCallback* is_safe_to_call_abort_cb) NO_THREAD_SAFETY_ANALYSIS;
-  // Checks for whether it is safe to call Abort() without using locks.
-  static bool IsSafeToCallAbortRacy() NO_THREAD_SAFETY_ANALYSIS;
-
-  // Add a mutex to expected_mutexes_on_weak_ref_access_.
-  static void AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
-  // Remove a mutex from expected_mutexes_on_weak_ref_access_.
-  static void RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
-  // Check if the given mutex is in expected_mutexes_on_weak_ref_access_.
-  static bool IsExpectedOnWeakRefAccess(BaseMutex* mutex);
-
-  // Guards allocation entrypoint instrumenting.
-  static Mutex* instrument_entrypoints_lock_;
-
-  // Guards code that deals with user-code suspension. This mutex must be held when suspending or
-  // resuming threads with SuspendReason::kForUserCode. It may be held by a suspended thread, but
-  // only if the suspension is not due to SuspendReason::kForUserCode.
-  static Mutex* user_code_suspension_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
-
-  // A barrier is used to synchronize the GC/Debugger thread with mutator threads. When GC/Debugger
-  // thread wants to suspend all mutator threads, it needs to wait for all mutator threads to pass
-  // a barrier. Threads that are already suspended will get their barrier passed by the GC/Debugger
-  // thread; threads in the runnable state will pass the barrier when they transit to the suspended
-  // state. GC/Debugger thread will be woken up when all mutator threads are suspended.
-  //
-  // Thread suspension:
-  // mutator thread                                | GC/Debugger
-  //   .. running ..                               |   .. running ..
-  //   .. running ..                               | Request thread suspension by:
-  //   .. running ..                               |   - acquiring thread_suspend_count_lock_
-  //   .. running ..                               |   - incrementing Thread::suspend_count_ on
-  //   .. running ..                               |     all mutator threads
-  //   .. running ..                               |   - releasing thread_suspend_count_lock_
-  //   .. running ..                               | Block wait for all threads to pass a barrier
-  // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
-  // suspend code.                                 |   .. blocked ..
-  // Change state to kSuspended (pass the barrier) | Wake up when all threads pass the barrier
-  // x: Acquire thread_suspend_count_lock_         |   .. running ..
-  // while Thread::suspend_count_ > 0              |   .. running ..
-  //   - wait on Thread::resume_cond_              |   .. running ..
-  //     (releases thread_suspend_count_lock_)     |   .. running ..
-  //   .. waiting ..                               | Request thread resumption by:
-  //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
-  //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
-  //   .. waiting ..                               |     all mutator threads
-  //   .. waiting ..                               |   - notifying on Thread::resume_cond_
-  //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
-  // Release thread_suspend_count_lock_            |  .. running ..
-  // Change to kRunnable                           |  .. running ..
-  //  - this uses a CAS operation to ensure the    |  .. running ..
-  //    suspend request flag isn't raised as the   |  .. running ..
-  //    state is changed                           |  .. running ..
-  //  - if the CAS operation fails then goto x     |  .. running ..
-  //  .. running ..                                |  .. running ..
-  static MutatorMutex* mutator_lock_ ACQUIRED_AFTER(user_code_suspension_lock_);
-
-  // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
-  static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
-
-  // Guards shutdown of the runtime.
-  static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
-
-  // Guards background profiler global state.
-  static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
-
-  // Guards trace (ie traceview) requests.
-  static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
-
-  // Guards debugger recent allocation records.
-  static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
-
-  // Guards updates to instrumentation to ensure mutual exclusion of
-  // events like deoptimization requests.
-  // TODO: improve name, perhaps instrumentation_update_lock_.
-  static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
-
-  // Guard the update of the SubtypeCheck data stores in each Class::status_ field.
-  // This lock is used in SubtypeCheck methods which are the interface for
-  // any SubtypeCheck-mutating methods.
-  // In Class::IsSubClass, the lock is not required since it does not update the SubtypeCheck data.
-  static Mutex* subtype_check_lock_ ACQUIRED_AFTER(deoptimization_lock_);
-
-  // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
-  // attaching and detaching.
-  static Mutex* thread_list_lock_ ACQUIRED_AFTER(subtype_check_lock_);
-
-  // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
-  static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
-
-  // Guards maintaining loading library data structures.
-  static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
-
-  // Guards breakpoints.
-  static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
-
-  // Guards lists of classes within the class linker.
-  static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
-
-  // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
-  // doesn't try to hold a higher level Mutex.
-  #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::classlinker_classes_lock_)
-
-  static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
-
-  // Guard the allocation/deallocation of thread ids.
-  static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
-
-  // Guards modification of the LDT on x86.
-  static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
-
-  static ReaderWriterMutex* dex_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
-
-  // Guards opened oat files in OatFileManager.
-  static ReaderWriterMutex* oat_file_manager_lock_ ACQUIRED_AFTER(dex_lock_);
-
-  // Guards extra string entries for VerifierDeps.
-  static ReaderWriterMutex* verifier_deps_lock_ ACQUIRED_AFTER(oat_file_manager_lock_);
-
-  // Guards dlopen_handles_ in DlOpenOatFile.
-  static Mutex* host_dlopen_handles_lock_ ACQUIRED_AFTER(verifier_deps_lock_);
-
-  // Guards intern table.
-  static Mutex* intern_table_lock_ ACQUIRED_AFTER(host_dlopen_handles_lock_);
-
-  // Guards reference processor.
-  static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
-
-  // Guards cleared references queue.
-  static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
-
-  // Guards weak references queue.
-  static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
-
-  // Guards finalizer references queue.
-  static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
-
-  // Guards phantom references queue.
-  static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
-
-  // Guards soft references queue.
-  static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
-
-  // Guard accesses to the JNI Global Reference table.
-  static ReaderWriterMutex* jni_globals_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
-
-  // Guard accesses to the JNI Weak Global Reference table.
-  static Mutex* jni_weak_globals_lock_ ACQUIRED_AFTER(jni_globals_lock_);
-
-  // Guard accesses to the JNI function table override.
-  static Mutex* jni_function_table_lock_ ACQUIRED_AFTER(jni_weak_globals_lock_);
-
-  // Guard accesses to the Thread::custom_tls_. We use this to allow the TLS of other threads to be
-  // read (the reader must hold the ThreadListLock or have some other way of ensuring the thread
-  // will not die in that case though). This is useful for (eg) the implementation of
-  // GetThreadLocalStorage.
-  static Mutex* custom_tls_lock_ ACQUIRED_AFTER(jni_function_table_lock_);
-
-  // Guards Class Hierarchy Analysis (CHA).
-  static Mutex* cha_lock_ ACQUIRED_AFTER(custom_tls_lock_);
-
-  // When declaring any Mutex add BOTTOM_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
-  // doesn't try to acquire a higher level Mutex. NB Due to the way the annotalysis works this
-  // actually only encodes the mutex being below jni_function_table_lock_ although having
-  // kGenericBottomLock level is lower than this.
-  #define BOTTOM_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::cha_lock_)
-
-  // Have an exclusive aborting thread.
-  static Mutex* abort_lock_ ACQUIRED_AFTER(custom_tls_lock_);
-
-  // Allow mutual exclusion when manipulating Thread::suspend_count_.
-  // TODO: Does the trade-off of a per-thread lock make sense?
-  static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
-
-  // One unexpected signal at a time lock.
-  static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
-
-  // Guards the magic global variables used by native tools (e.g. libunwind).
-  static Mutex* native_debug_interface_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
-
-  // Have an exclusive logging thread.
-  static Mutex* logging_lock_ ACQUIRED_AFTER(native_debug_interface_lock_);
-
-  // List of mutexes that we expect a thread may hold when accessing weak refs. This is used to
-  // avoid a deadlock in the empty checkpoint while weak ref access is disabled (b/34964016). If we
-  // encounter an unexpected mutex on accessing weak refs,
-  // Thread::CheckEmptyCheckpointFromWeakRefAccess will detect it.
-  static std::vector<BaseMutex*> expected_mutexes_on_weak_ref_access_;
-  static Atomic<const BaseMutex*> expected_mutexes_on_weak_ref_access_guard_;
-  class ScopedExpectedMutexesOnWeakRefAccessLock;
-};
-
-class Roles {
- public:
-  // Uninterruptible means that the thread may not become suspended.
-  static Uninterruptible uninterruptible_;
-};
-
 }  // namespace art
 
 #endif  // ART_RUNTIME_BASE_MUTEX_H_
diff --git a/runtime/base/timing_logger.cc b/runtime/base/timing_logger.cc
index 23ec3e1..0a4cddd 100644
--- a/runtime/base/timing_logger.cc
+++ b/runtime/base/timing_logger.cc
@@ -21,6 +21,7 @@
 #include <android-base/logging.h>
 
 #include "base/histogram-inl.h"
+#include "base/mutex.h"
 #include "base/stl_util.h"
 #include "base/systrace.h"
 #include "base/time_utils.h"
@@ -40,7 +41,7 @@
 CumulativeLogger::CumulativeLogger(const std::string& name)
     : name_(name),
       lock_name_("CumulativeLoggerLock" + name),
-      lock_(lock_name_.c_str(), kDefaultMutexLevel, true) {
+      lock_(new Mutex(lock_name_.c_str(), kDefaultMutexLevel, true)) {
   Reset();
 }
 
@@ -49,7 +50,7 @@
 }
 
 void CumulativeLogger::SetName(const std::string& name) {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *GetLock());
   name_.assign(name);
 }
 
@@ -57,19 +58,19 @@
 }
 
 void CumulativeLogger::End() {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *GetLock());
   ++iterations_;
 }
 
 void CumulativeLogger::Reset() {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *GetLock());
   iterations_ = 0;
   total_time_ = 0;
   STLDeleteElements(&histograms_);
 }
 
 void CumulativeLogger::AddLogger(const TimingLogger &logger) {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *GetLock());
   TimingLogger::TimingData timing_data(logger.CalculateTimingData());
   const std::vector<TimingLogger::Timing>& timings = logger.GetTimings();
   for (size_t i = 0; i < timings.size(); ++i) {
@@ -81,12 +82,12 @@
 }
 
 size_t CumulativeLogger::GetIterations() const {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *GetLock());
   return iterations_;
 }
 
 void CumulativeLogger::Dump(std::ostream &os) const {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *GetLock());
   DumpHistogram(os);
 }
 
diff --git a/runtime/base/timing_logger.h b/runtime/base/timing_logger.h
index a8a6701..974a14d 100644
--- a/runtime/base/timing_logger.h
+++ b/runtime/base/timing_logger.h
@@ -18,10 +18,11 @@
 #define ART_RUNTIME_BASE_TIMING_LOGGER_H_
 
 #include "base/histogram.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "base/time_utils.h"
 
+#include <memory>
 #include <set>
 #include <string>
 #include <vector>
@@ -34,17 +35,17 @@
   explicit CumulativeLogger(const std::string& name);
   ~CumulativeLogger();
   void Start();
-  void End() REQUIRES(!lock_);
-  void Reset() REQUIRES(!lock_);
-  void Dump(std::ostream& os) const REQUIRES(!lock_);
+  void End() REQUIRES(!GetLock());
+  void Reset() REQUIRES(!GetLock());
+  void Dump(std::ostream& os) const REQUIRES(!GetLock());
   uint64_t GetTotalNs() const {
     return GetTotalTime() * kAdjust;
   }
   // Allow the name to be modified, particularly when the cumulative logger is a field within a
   // parent class that is unable to determine the "name" of a sub-class.
-  void SetName(const std::string& name) REQUIRES(!lock_);
-  void AddLogger(const TimingLogger& logger) REQUIRES(!lock_);
-  size_t GetIterations() const REQUIRES(!lock_);
+  void SetName(const std::string& name) REQUIRES(!GetLock());
+  void AddLogger(const TimingLogger& logger) REQUIRES(!GetLock());
+  size_t GetIterations() const REQUIRES(!GetLock());
 
  private:
   class HistogramComparator {
@@ -58,18 +59,22 @@
   static constexpr size_t kDefaultBucketCount = 100;
   static constexpr size_t kInitialBucketSize = 50;  // 50 microseconds.
 
-  void AddPair(const std::string &label, uint64_t delta_time)
-      REQUIRES(lock_);
-  void DumpHistogram(std::ostream &os) const REQUIRES(lock_);
+  void AddPair(const std::string &label, uint64_t delta_time) REQUIRES(GetLock());
+  void DumpHistogram(std::ostream &os) const REQUIRES(GetLock());
   uint64_t GetTotalTime() const {
     return total_time_;
   }
+
+  Mutex* GetLock() const {
+    return lock_.get();
+  }
+
   static const uint64_t kAdjust = 1000;
-  std::set<Histogram<uint64_t>*, HistogramComparator> histograms_ GUARDED_BY(lock_);
+  std::set<Histogram<uint64_t>*, HistogramComparator> histograms_ GUARDED_BY(GetLock());
   std::string name_;
   const std::string lock_name_;
-  mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-  size_t iterations_ GUARDED_BY(lock_);
+  mutable std::unique_ptr<Mutex> lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  size_t iterations_ GUARDED_BY(GetLock());
   uint64_t total_time_;
 
   DISALLOW_COPY_AND_ASSIGN(CumulativeLogger);
diff --git a/runtime/cha.cc b/runtime/cha.cc
index 8e06fda..5110b7a 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -18,6 +18,7 @@
 
 #include "art_method-inl.h"
 #include "base/logging.h"  // For VLOG
+#include "base/mutex.h"
 #include "jit/jit.h"
 #include "jit/jit_code_cache.h"
 #include "linear_alloc.h"
diff --git a/runtime/cha.h b/runtime/cha.h
index d1a1b7c..a07ee91 100644
--- a/runtime/cha.h
+++ b/runtime/cha.h
@@ -21,7 +21,7 @@
 #include <unordered_set>
 
 #include "base/enums.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "handle.h"
 #include "mirror/class.h"
 #include "oat_quick_method_header.h"
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 6703205..43f3ed3 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -21,6 +21,7 @@
 
 #include "art_field-inl.h"
 #include "art_method-inl.h"
+#include "base/mutex.h"
 #include "class_linker.h"
 #include "gc_root-inl.h"
 #include "handle_scope-inl.h"
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 9b2e1a1..f6a3e32 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1037,13 +1037,15 @@
   runtime->SetSentinel(heap->AllocNonMovableObject<true>(
       self, java_lang_Object, java_lang_Object->GetObjectSize(), VoidFunctor()));
 
-  for (gc::space::ImageSpace* image_space : spaces) {
+  const std::vector<std::string>& boot_class_path_locations = runtime->GetBootClassPathLocations();
+  CHECK_LE(spaces.size(), boot_class_path_locations.size());
+  for (size_t i = 0u, size = spaces.size(); i != size; ++i) {
     // Boot class loader, use a null handle.
     std::vector<std::unique_ptr<const DexFile>> dex_files;
-    if (!AddImageSpace(image_space,
+    if (!AddImageSpace(spaces[i],
                        ScopedNullHandle<mirror::ClassLoader>(),
-                       /*dex_elements=*/nullptr,
-                       /*dex_location=*/nullptr,
+                       /*dex_elements=*/ nullptr,
+                       /*dex_location=*/ boot_class_path_locations[i].c_str(),
                        /*out*/&dex_files,
                        error_msg)) {
       return false;
@@ -1062,6 +1064,15 @@
   return true;
 }
 
+void ClassLinker::AddExtraBootDexFiles(
+    Thread* self,
+    std::vector<std::unique_ptr<const DexFile>>&& additional_dex_files) {
+  for (std::unique_ptr<const DexFile>& dex_file : additional_dex_files) {
+    AppendToBootClassPath(self, *dex_file);
+    boot_dex_files_.push_back(std::move(dex_file));
+  }
+}
+
 bool ClassLinker::IsBootClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
                                     ObjPtr<mirror::ClassLoader> class_loader) {
   return class_loader == nullptr ||
@@ -1981,13 +1992,7 @@
     std::string dex_file_location(dex_cache->GetLocation()->ToModifiedUtf8());
     // TODO: Only store qualified paths.
     // If non qualified, qualify it.
-    if (dex_file_location.find('/') == std::string::npos) {
-      std::string dex_location_path = dex_location;
-      const size_t pos = dex_location_path.find_last_of('/');
-      CHECK_NE(pos, std::string::npos);
-      dex_location_path = dex_location_path.substr(0, pos + 1);  // Keep trailing '/'
-      dex_file_location = dex_location_path + dex_file_location;
-    }
+    dex_file_location = OatFile::ResolveRelativeEncodedDexLocation(dex_location, dex_file_location);
     std::unique_ptr<const DexFile> dex_file = OpenOatDexFile(oat_file,
                                                              dex_file_location.c_str(),
                                                              error_msg);
@@ -2026,7 +2031,7 @@
     // Image class loader [A][B][C][image dex files]
     // Class loader = [???][dex_elements][image dex files]
     // Need to ensure that [???][dex_elements] == [A][B][C].
-    // For each class loader, PathClassLoader, the laoder checks the parent first. Also the logic
+    // For each class loader, PathClassLoader, the loader checks the parent first. Also the logic
     // for PathClassLoader does this by looping through the array of dex files. To ensure they
     // resolve the same way, simply flatten the hierarchy in the way the resolution order would be,
     // and check that the dex file names are the same.
@@ -2662,7 +2667,7 @@
     return true;
   }
 
-  if (IsPathOrDexClassLoader(soa, class_loader)) {
+  if (IsPathOrDexClassLoader(soa, class_loader) || IsInMemoryDexClassLoader(soa, class_loader)) {
     // For regular path or dex class loader the search order is:
     //    - parent
     //    - shared libraries
@@ -2756,7 +2761,9 @@
     const char* descriptor,
     size_t hash,
     Handle<mirror::ClassLoader> class_loader) {
-  DCHECK(IsPathOrDexClassLoader(soa, class_loader) || IsDelegateLastClassLoader(soa, class_loader))
+  DCHECK(IsPathOrDexClassLoader(soa, class_loader) ||
+         IsInMemoryDexClassLoader(soa, class_loader) ||
+         IsDelegateLastClassLoader(soa, class_loader))
       << "Unexpected class loader for descriptor " << descriptor;
 
   ObjPtr<mirror::Class> ret;
@@ -3733,8 +3740,7 @@
   data.weak_root = dex_cache_jweak;
   data.dex_file = dex_cache->GetDexFile();
   data.class_table = ClassTableForClassLoader(class_loader);
-  AddNativeDebugInfoForDex(self, ArrayRef<const uint8_t>(data.dex_file->Begin(),
-                                                         data.dex_file->Size()));
+  AddNativeDebugInfoForDex(self, data.dex_file);
   DCHECK(data.class_table != nullptr);
   // Make sure to hold the dex cache live in the class table. This case happens for the boot class
   // path dex caches without an image.
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index dd5f911..d0a7c9b 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -25,8 +25,8 @@
 #include <vector>
 
 #include "base/enums.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "dex/class_accessor.h"
 #include "dex/dex_cache_resolved_classes.h"
 #include "dex/dex_file.h"
@@ -127,6 +127,12 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::dex_lock_);
 
+  // Add boot class path dex files that were not included in the boot image.
+  // ClassLinker takes ownership of these dex files.
+  void AddExtraBootDexFiles(Thread* self,
+                            std::vector<std::unique_ptr<const DexFile>>&& additional_dex_files)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   // Add an image space to the class linker, may fix up classloader fields and dex cache fields.
   // The dex files that were newly opened for the space are placed in the out argument
   // out_dex_files. Returns true if the operation succeeded.
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 56fdd06..fe45b9e 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -611,6 +611,10 @@
     addOffset(OFFSETOF_MEMBER(mirror::ClassExt, obsolete_dex_caches_), "obsoleteDexCaches");
     addOffset(OFFSETOF_MEMBER(mirror::ClassExt, obsolete_methods_), "obsoleteMethods");
     addOffset(OFFSETOF_MEMBER(mirror::ClassExt, original_dex_file_), "originalDexFile");
+    addOffset(OFFSETOF_MEMBER(mirror::ClassExt, pre_redefine_class_def_index_),
+              "preRedefineClassDefIndex");
+    addOffset(OFFSETOF_MEMBER(mirror::ClassExt, pre_redefine_dex_file_ptr_),
+              "preRedefineDexFilePtr");
     addOffset(OFFSETOF_MEMBER(mirror::ClassExt, verify_error_), "verifyError");
   }
 };
diff --git a/runtime/class_loader_utils.h b/runtime/class_loader_utils.h
index 945d659..562dc47 100644
--- a/runtime/class_loader_utils.h
+++ b/runtime/class_loader_utils.h
@@ -18,7 +18,7 @@
 #define ART_RUNTIME_CLASS_LOADER_UTILS_H_
 
 #include "art_field-inl.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "handle_scope.h"
 #include "jni/jni_internal.h"
 #include "mirror/class_loader.h"
@@ -29,7 +29,7 @@
 namespace art {
 
 // Returns true if the given class loader is either a PathClassLoader or a DexClassLoader.
-// (they both have the same behaviour with respect to class lockup order)
+// (they both have the same behaviour with respect to class lookup order)
 inline bool IsPathOrDexClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
                                    Handle<mirror::ClassLoader> class_loader)
     REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -41,6 +41,15 @@
           soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_DexClassLoader));
 }
 
+// Returns true if the given class loader is an InMemoryDexClassLoader.
+inline bool IsInMemoryDexClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
+                                     Handle<mirror::ClassLoader> class_loader)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  mirror::Class* class_loader_class = class_loader->GetClass();
+  return (class_loader_class ==
+      soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_InMemoryDexClassLoader));
+}
+
 inline bool IsDelegateLastClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
                                       Handle<mirror::ClassLoader> class_loader)
     REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/common_dex_operations.h b/runtime/common_dex_operations.h
index 15ab5f0..5c5431d 100644
--- a/runtime/common_dex_operations.h
+++ b/runtime/common_dex_operations.h
@@ -20,8 +20,8 @@
 #include "android-base/logging.h"
 #include "art_field.h"
 #include "art_method.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "class_linker.h"
 #include "dex/code_item_accessors.h"
 #include "dex/primitive.h"
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 319c7c7..29b7813 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -27,7 +27,7 @@
 #include "arch/instruction_set.h"
 #include "base/common_art_test.h"
 #include "base/globals.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "base/os.h"
 #include "base/unix_file/fd_file.h"
 #include "dex/art_dex_file_loader.h"
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index 6acff6f..ca9c96a 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_COMMON_THROWS_H_
 #define ART_RUNTIME_COMMON_THROWS_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "obj_ptr.h"
 
 namespace art {
diff --git a/runtime/compiler_callbacks.h b/runtime/compiler_callbacks.h
index b29eb70..18632dc 100644
--- a/runtime/compiler_callbacks.h
+++ b/runtime/compiler_callbacks.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_COMPILER_CALLBACKS_H_
 #define ART_RUNTIME_COMPILER_CALLBACKS_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "dex/class_reference.h"
 #include "class_status.h"
 
diff --git a/runtime/debug_print.h b/runtime/debug_print.h
index df00f06..e2990d4 100644
--- a/runtime/debug_print.h
+++ b/runtime/debug_print.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_DEBUG_PRINT_H_
 #define ART_RUNTIME_DEBUG_PRINT_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "mirror/object.h"
 
 // Helper functions for printing extra information for certain hard to diagnose bugs.
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
index b46c933..7f697d1 100644
--- a/runtime/dexopt_test.cc
+++ b/runtime/dexopt_test.cc
@@ -116,19 +116,19 @@
   ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
   EXPECT_EQ(filter, odex_file->GetCompilerFilter());
 
-  std::unique_ptr<ImageHeader> image_header(
-          gc::space::ImageSpace::ReadImageHeader(image_location.c_str(),
-                                                 kRuntimeISA,
-                                                 &error_msg));
-  ASSERT_TRUE(image_header != nullptr) << error_msg;
+  std::string boot_image_checksums = gc::space::ImageSpace::GetBootClassPathChecksums(
+      Runtime::Current()->GetBootClassPath(), image_location, kRuntimeISA, &error_msg);
+  ASSERT_FALSE(boot_image_checksums.empty()) << error_msg;
+
   const OatHeader& oat_header = odex_file->GetOatHeader();
-  uint32_t boot_image_checksum = image_header->GetImageChecksum();
 
   if (CompilerFilter::DependsOnImageChecksum(filter)) {
+    const char* checksums = oat_header.GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey);
+    ASSERT_TRUE(checksums != nullptr);
     if (with_alternate_image) {
-      EXPECT_NE(boot_image_checksum, oat_header.GetBootImageChecksum());
+      EXPECT_NE(boot_image_checksums, checksums);
     } else {
-      EXPECT_EQ(boot_image_checksum, oat_header.GetBootImageChecksum());
+      EXPECT_EQ(boot_image_checksums, checksums);
     }
   }
 }
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index c8bf6d0..e10a6e8 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -21,8 +21,8 @@
 #include <stdint.h>
 
 #include "base/callee_save_type.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "dex/dex_file_types.h"
 #include "dex/dex_instruction.h"
 #include "gc/allocator_type.h"
diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
index 1e30907..e555d68 100644
--- a/runtime/entrypoints/quick/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -20,7 +20,7 @@
 #include "arch/instruction_set.h"
 #include "base/callee_save_type.h"
 #include "base/enums.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "quick/quick_method_frame_info.h"
 #include "thread-inl.h"
 
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.h b/runtime/entrypoints/quick/quick_alloc_entrypoints.h
index bd1e295..937ba8e 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ALLOC_ENTRYPOINTS_H_
 #define ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ALLOC_ENTRYPOINTS_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "gc/allocator_type.h"
 #include "quick_entrypoints.h"
 
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index 795faa8..243f7ec 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -19,8 +19,8 @@
 
 #include <jni.h>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "deoptimization_kind.h"
 #include "offsets.h"
 
diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h
index 24ef0b1..d3be51f 100644
--- a/runtime/fault_handler.h
+++ b/runtime/fault_handler.h
@@ -23,7 +23,8 @@
 
 #include <vector>
 
-#include "base/mutex.h"  // For annotalysis.
+#include "base/globals.h"  // For CanDoImplicitNullCheckOn.
+#include "base/locks.h"  // For annotalysis.
 
 namespace art {
 
diff --git a/runtime/gc/accounting/bitmap.h b/runtime/gc/accounting/bitmap.h
index ffef566..bdc686e 100644
--- a/runtime/gc/accounting/bitmap.h
+++ b/runtime/gc/accounting/bitmap.h
@@ -24,8 +24,8 @@
 #include <vector>
 
 #include "base/globals.h"
+#include "base/locks.h"
 #include "base/mem_map.h"
-#include "base/mutex.h"
 
 namespace art {
 
diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h
index 1e7d76c..df50682 100644
--- a/runtime/gc/accounting/card_table-inl.h
+++ b/runtime/gc/accounting/card_table-inl.h
@@ -127,14 +127,6 @@
   return cards_scanned;
 }
 
-/*
- * Visitor is expected to take in a card and return the new value. When a value is modified, the
- * modify visitor is called.
- * visitor: The visitor which modifies the cards. Returns the new value for a card given an old
- * value.
- * modified: Whenever the visitor modifies a card, this visitor is called on the card. Enables
- * us to know which cards got cleared.
- */
 template <typename Visitor, typename ModifiedVisitor>
 inline void CardTable::ModifyCardsAtomic(uint8_t* scan_begin,
                                          uint8_t* scan_end,
@@ -144,6 +136,7 @@
   uint8_t* card_end = CardFromAddr(AlignUp(scan_end, kCardSize));
   CheckCardValid(card_cur);
   CheckCardValid(card_end);
+  DCHECK(visitor(kCardClean) == kCardClean);
 
   // Handle any unaligned cards at the start.
   while (!IsAligned<sizeof(intptr_t)>(card_cur) && card_cur < card_end) {
@@ -188,7 +181,8 @@
   while (word_cur < word_end) {
     while (true) {
       expected_word = *word_cur;
-      if (LIKELY(expected_word == 0)) {
+      static_assert(kCardClean == 0);
+      if (LIKELY(expected_word == 0 /* All kCardClean */ )) {
         break;
       }
       for (size_t i = 0; i < sizeof(uintptr_t); ++i) {
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index f163898..c99ed4b 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -20,8 +20,8 @@
 #include <memory>
 
 #include "base/globals.h"
+#include "base/locks.h"
 #include "base/mem_map.h"
-#include "base/mutex.h"
 
 namespace art {
 
@@ -95,12 +95,10 @@
   }
 
   /*
-   * Visitor is expected to take in a card and return the new value. When a value is modified, the
-   * modify visitor is called.
-   * visitor: The visitor which modifies the cards. Returns the new value for a card given an old
-   * value.
-   * modified: Whenever the visitor modifies a card, this visitor is called on the card. Enables
-   * us to know which cards got cleared.
+   * Modify cards in the range from scan_begin (inclusive) to scan_end (exclusive). Each card
+   * value v is replaced by visitor(v). Visitor() should not have side-effects.
+   * Whenever a card value is changed, modified(card_address, old_value, new_value) is invoked.
+   * For opportunistic performance reasons, this assumes that visitor(kCardClean) is kCardClean!
    */
   template <typename Visitor, typename ModifiedVisitor>
   void ModifyCardsAtomic(uint8_t* scan_begin,
diff --git a/runtime/gc/accounting/card_table_test.cc b/runtime/gc/accounting/card_table_test.cc
index 87965ed..12baaa4 100644
--- a/runtime/gc/accounting/card_table_test.cc
+++ b/runtime/gc/accounting/card_table_test.cc
@@ -60,7 +60,7 @@
   uint8_t* HeapLimit() const {
     return HeapBegin() + heap_size_;
   }
-  // Return a pseudo random card for an address.
+  // Return a non-zero pseudo random card for an address.
   uint8_t PseudoRandomCard(const uint8_t* addr) const {
     size_t offset = RoundDown(addr - heap_begin_, CardTable::kCardSize);
     return 1 + offset % 254;
@@ -97,7 +97,8 @@
 class UpdateVisitor {
  public:
   uint8_t operator()(uint8_t c) const {
-    return c * 93 + 123;
+    // Must map zero to zero. Never applied to zero.
+    return c == 0 ? 0 : c * 93 + 123;
   }
   void operator()(uint8_t* /*card*/, uint8_t /*expected_value*/, uint8_t /*new_value*/) const {
   }
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index c997f8d..e477556 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -20,8 +20,8 @@
 #include <android-base/logging.h>
 
 #include "base/allocator.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "space_bitmap.h"
 
 namespace art {
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index 7eca792..2e42f8d 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -21,8 +21,8 @@
 
 #include "base/bit_utils.h"
 #include "base/globals.h"
+#include "base/locks.h"
 #include "base/mem_map.h"
-#include "base/mutex.h"
 #include "gc/space/space.h"
 
 namespace art {
diff --git a/runtime/gc/accounting/remembered_set.h b/runtime/gc/accounting/remembered_set.h
index b96f0d3..469074f 100644
--- a/runtime/gc/accounting/remembered_set.h
+++ b/runtime/gc/accounting/remembered_set.h
@@ -19,7 +19,7 @@
 
 #include "base/allocator.h"
 #include "base/globals.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "base/safe_map.h"
 
 #include <set>
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index fcc3007..8561f06 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -24,8 +24,8 @@
 #include <vector>
 
 #include "base/globals.h"
+#include "base/locks.h"
 #include "base/mem_map.h"
-#include "base/mutex.h"
 
 namespace art {
 
diff --git a/runtime/gc/allocation_listener.h b/runtime/gc/allocation_listener.h
index 7675a22..a578252 100644
--- a/runtime/gc/allocation_listener.h
+++ b/runtime/gc/allocation_listener.h
@@ -20,8 +20,8 @@
 #include <list>
 #include <memory>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "gc_root.h"
 #include "obj_ptr.h"
 
diff --git a/runtime/gc/allocator/dlmalloc.cc b/runtime/gc/allocator/dlmalloc.cc
index e508d5f..11ad8a8 100644
--- a/runtime/gc/allocator/dlmalloc.cc
+++ b/runtime/gc/allocator/dlmalloc.cc
@@ -38,6 +38,7 @@
 #pragma GCC diagnostic ignored "-Wempty-body"
 #pragma GCC diagnostic ignored "-Wstrict-aliasing"
 #pragma GCC diagnostic ignored "-Wnull-pointer-arithmetic"
+#pragma GCC diagnostic ignored "-Wexpansion-to-defined"
 #include "../../../external/dlmalloc/malloc.c"
 // Note: malloc.c uses a DEBUG define to drive debug code. This interferes with the DEBUG severity
 //       of libbase, so undefine it now.
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 6b394c7..3160422 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -242,15 +242,33 @@
   // Use load-acquire on the read barrier pointer to ensure that we never see a black (non-gray)
   // read barrier state with an unmarked bit due to reordering.
   DCHECK(region_space_->IsInUnevacFromSpace(from_ref));
-  if (kEnableGenerationalConcurrentCopyingCollection
-      && young_gen_
-      && !done_scanning_.load(std::memory_order_acquire)) {
-    return from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState();
-  }
   if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
     return true;
+  } else if (!(kEnableGenerationalConcurrentCopyingCollection && young_gen_)
+             || done_scanning_.load(std::memory_order_acquire)) {
+    // If the card table scanning is not finished yet, then only read-barrier
+    // state should be checked. Checking the mark bitmap is unreliable as there
+    // may be some objects - whose corresponding card is dirty - which are
+    // marked in the mark bitmap, but cannot be considered marked unless their
+    // read-barrier state is set to Gray.
+    //
+    // Why read read-barrier state before checking done_scanning_?
+    // If the read-barrier state was read *after* done_scanning_, then there
+    // exists a concurrency race due to which even after the object is marked,
+    // read-barrier state is checked *after* that, this function will return
+    // false. The following scenario may cause the race:
+    //
+    // 1. Mutator thread reads done_scanning_ and upon finding it false, gets
+    // suspended before reading the object's read-barrier state.
+    // 2. GC thread finishes card-table scan and then sets done_scanning_ to
+    // true.
+    // 3. GC thread grays the object, scans it, marks in the bitmap, and then
+    // changes its read-barrier state back to non-gray.
+    // 4. Mutator thread resumes, reads the object's read-barrier state and
+    // returns false.
+    return region_space_bitmap_->Test(from_ref);
   }
-  return region_space_bitmap_->Test(from_ref);
+  return false;
 }
 
 }  // namespace collector
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index fefe9ab..53aa9ba 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -332,11 +332,6 @@
               << reinterpret_cast<void*>(region_space_->Limit());
   }
   CheckEmptyMarkStack();
-  if (kIsDebugBuild) {
-    MutexLock mu(Thread::Current(), mark_stack_lock_);
-    CHECK(false_gray_stack_.empty());
-  }
-
   rb_mark_bit_stack_full_ = false;
   mark_from_read_barrier_measurements_ = measure_read_barrier_slow_path_;
   if (measure_read_barrier_slow_path_) {
@@ -865,6 +860,21 @@
   ConcurrentCopying* const collector_;
 };
 
+template <bool kNoUnEvac>
+void ConcurrentCopying::ScanDirtyObject(mirror::Object* obj) {
+  Scan<kNoUnEvac>(obj);
+  // Set the read-barrier state of a reference-type object to gray if its
+  // referent is not marked yet. This is to ensure that if GetReferent() is
+  // called, it triggers the read-barrier to process the referent before use.
+  if (UNLIKELY((obj->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass()))) {
+    mirror::Object* referent =
+        obj->AsReference<kVerifyNone, kWithoutReadBarrier>()->GetReferent<kWithoutReadBarrier>();
+    if (referent != nullptr && !IsInToSpace(referent)) {
+      obj->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState());
+    }
+  }
+}
+
 // Concurrently mark roots that are guarded by read barriers and process the mark stack.
 void ConcurrentCopying::MarkingPhase() {
   TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
@@ -929,7 +939,7 @@
                 LOG(FATAL) << "Scanning " << obj << " not in unevac space";
               }
             }
-            Scan<true>(obj);
+            ScanDirtyObject</*kNoUnEvac*/ true>(obj);
           },
           accounting::CardTable::kCardDirty - 1);
     }
@@ -1056,9 +1066,6 @@
     Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
     // Marking is done. Disable marking.
     DisableMarking();
-    if (kUseBakerReadBarrier) {
-      ProcessFalseGrayStack();
-    }
     CheckEmptyMarkStack();
   }
 
@@ -1170,32 +1177,6 @@
   mark_stack_mode_.store(kMarkStackModeOff, std::memory_order_seq_cst);
 }
 
-void ConcurrentCopying::PushOntoFalseGrayStack(Thread* const self, mirror::Object* ref) {
-  CHECK(kUseBakerReadBarrier);
-  DCHECK(ref != nullptr);
-  MutexLock mu(self, mark_stack_lock_);
-  false_gray_stack_.push_back(ref);
-}
-
-void ConcurrentCopying::ProcessFalseGrayStack() {
-  CHECK(kUseBakerReadBarrier);
-  // Change the objects on the false gray stack from gray to non-gray (conceptually black).
-  MutexLock mu(Thread::Current(), mark_stack_lock_);
-  for (mirror::Object* obj : false_gray_stack_) {
-    DCHECK(IsMarked(obj));
-    // The object could be non-gray (conceptually black) here if a thread got preempted after a
-    // success at the AtomicSetReadBarrierState in MarkNonMoving(), GC started marking through it
-    // (but not finished so still gray), the thread ran to register it onto the false gray stack,
-    // and then GC eventually marked it black (non-gray) after it finished scanning it.
-    if (obj->GetReadBarrierState() == ReadBarrier::GrayState()) {
-      bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
-                                                    ReadBarrier::NonGrayState());
-      DCHECK(success);
-    }
-  }
-  false_gray_stack_.clear();
-}
-
 void ConcurrentCopying::IssueEmptyCheckpoint() {
   Thread* self = Thread::Current();
   ThreadList* thread_list = Runtime::Current()->GetThreadList();
@@ -1418,24 +1399,6 @@
 }
 
 // The following visitors are used to assert the to-space invariant.
-class ConcurrentCopying::AssertToSpaceInvariantRefsVisitor {
- public:
-  explicit AssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
-      : collector_(collector) {}
-
-  void operator()(mirror::Object* ref) const
-      REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
-    if (ref == nullptr) {
-      // OK.
-      return;
-    }
-    collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
-  }
-
- private:
-  ConcurrentCopying* const collector_;
-};
-
 class ConcurrentCopying::AssertToSpaceInvariantFieldVisitor {
  public:
   explicit AssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
@@ -1447,8 +1410,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
     mirror::Object* ref =
         obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
-    AssertToSpaceInvariantRefsVisitor visitor(collector_);
-    visitor(ref);
+    collector_->AssertToSpaceInvariant(obj.Ptr(), offset, ref);
   }
   void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const
       REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
@@ -1464,8 +1426,8 @@
 
   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    AssertToSpaceInvariantRefsVisitor visitor(collector_);
-    visitor(root->AsMirrorPtr());
+    mirror::Object* ref = root->AsMirrorPtr();
+    collector_->AssertToSpaceInvariant(/* obj */ nullptr, MemberOffset(0), ref);
   }
 
  private:
@@ -1671,30 +1633,69 @@
   // Invariant: There should be no object from a newly-allocated
   // region (either large or non-large) on the mark stack.
   DCHECK(!region_space_->IsInNewlyAllocatedRegion(to_ref)) << to_ref;
-  if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
-    // Mark the bitmap only in the GC thread here so that we don't need a CAS.
-    if (!kUseBakerReadBarrier ||
-        !region_space_bitmap_->Set(to_ref)) {
-      // It may be already marked if we accidentally pushed the same object twice due to the racy
-      // bitmap read in MarkUnevacFromSpaceRegion.
-      if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
-        CHECK(region_space_->IsLargeObject(to_ref));
-        region_space_->ZeroLiveBytesForLargeObject(to_ref);
-        Scan<true>(to_ref);
-      } else {
-        Scan<false>(to_ref);
+  bool perform_scan = false;
+  switch (rtype) {
+    case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace:
+      // Mark the bitmap only in the GC thread here so that we don't need a CAS.
+      if (!kUseBakerReadBarrier || !region_space_bitmap_->Set(to_ref)) {
+        // It may be already marked if we accidentally pushed the same object twice due to the racy
+        // bitmap read in MarkUnevacFromSpaceRegion.
+        if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+          CHECK(region_space_->IsLargeObject(to_ref));
+          region_space_->ZeroLiveBytesForLargeObject(to_ref);
+        }
+        perform_scan = true;
+        // Only add to the live bytes if the object was not already marked and we are not the young
+        // GC.
+        add_to_live_bytes = true;
       }
-      // Only add to the live bytes if the object was not already marked and we are not the young
-      // GC.
-      add_to_live_bytes = true;
-    }
-  } else {
-    if (kEnableGenerationalConcurrentCopyingCollection) {
-      if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
+      break;
+    case space::RegionSpace::RegionType::kRegionTypeToSpace:
+      if (kEnableGenerationalConcurrentCopyingCollection) {
         // Copied to to-space, set the bit so that the next GC can scan objects.
         region_space_bitmap_->Set(to_ref);
       }
-    }
+      perform_scan = true;
+      break;
+    default:
+      DCHECK(!region_space_->HasAddress(to_ref)) << to_ref;
+      DCHECK(!immune_spaces_.ContainsObject(to_ref));
+      // Non-moving or large-object space.
+      if (kUseBakerReadBarrier) {
+        accounting::ContinuousSpaceBitmap* mark_bitmap =
+            heap_->GetNonMovingSpace()->GetMarkBitmap();
+        const bool is_los = !mark_bitmap->HasAddress(to_ref);
+        if (is_los) {
+          if (!IsAligned<kPageSize>(to_ref)) {
+            // Ref is a large object that is not aligned, it must be heap
+            // corruption. Remove memory protection and dump data before
+            // AtomicSetReadBarrierState since it will fault if the address is not
+            // valid.
+            region_space_->Unprotect();
+            heap_->GetVerification()->LogHeapCorruption(/* obj */ nullptr,
+                                                        MemberOffset(0),
+                                                        to_ref,
+                                                        /* fatal */ true);
+          }
+          DCHECK(heap_->GetLargeObjectsSpace())
+              << "ref=" << to_ref
+              << " doesn't belong to non-moving space and large object space doesn't exist";
+          accounting::LargeObjectBitmap* los_bitmap =
+              heap_->GetLargeObjectsSpace()->GetMarkBitmap();
+          DCHECK(los_bitmap->HasAddress(to_ref));
+          // Only the GC thread could be setting the LOS bit map hence doesn't
+          // need to be atomically done.
+          perform_scan = !los_bitmap->Set(to_ref);
+        } else {
+          // Only the GC thread could be setting the non-moving space bit map
+          // hence doesn't need to be atomically done.
+          perform_scan = !mark_bitmap->Set(to_ref);
+        }
+      } else {
+        perform_scan = true;
+      }
+  }
+  if (perform_scan) {
     if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
       Scan<true>(to_ref);
     } else {
@@ -2079,6 +2080,9 @@
       LOG(INFO) << "(after) num_bytes_allocated="
                 << heap_->num_bytes_allocated_.load();
     }
+
+    float reclaimed_bytes_ratio = static_cast<float>(freed_bytes) / num_bytes_allocated_before_gc_;
+    reclaimed_bytes_ratio_sum_ += reclaimed_bytes_ratio;
   }
 
   {
@@ -2094,11 +2098,6 @@
 
   CheckEmptyMarkStack();
 
-  int64_t num_bytes_allocated_after_gc = static_cast<int64_t>(heap_->GetBytesAllocated());
-  int64_t diff = num_bytes_allocated_before_gc_ - num_bytes_allocated_after_gc;
-  auto ratio = static_cast<float>(diff) / num_bytes_allocated_before_gc_;
-  reclaimed_bytes_ratio_sum_ += ratio;
-
   if (kVerboseMode) {
     LOG(INFO) << "GC end of ReclaimPhase";
   }
@@ -2145,7 +2144,10 @@
                                                mirror::Object* ref) {
   CHECK_EQ(heap_->collector_type_, kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
   if (is_asserting_to_space_invariant_) {
-    if (region_space_->HasAddress(ref)) {
+    if (ref == nullptr) {
+      // OK.
+      return;
+    } else if (region_space_->HasAddress(ref)) {
       // Check to-space invariant in region space (moving space).
       using RegionType = space::RegionSpace::RegionType;
       space::RegionSpace::RegionType type = region_space_->GetRegionTypeUnsafe(ref);
@@ -2248,7 +2250,10 @@
                                                mirror::Object* ref) {
   CHECK_EQ(heap_->collector_type_, kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
   if (is_asserting_to_space_invariant_) {
-    if (region_space_->HasAddress(ref)) {
+    if (ref == nullptr) {
+      // OK.
+      return;
+    } else if (region_space_->HasAddress(ref)) {
       // Check to-space invariant in region space (moving space).
       using RegionType = space::RegionSpace::RegionType;
       space::RegionSpace::RegionType type = region_space_->GetRegionTypeUnsafe(ref);
@@ -2326,14 +2331,17 @@
       LOG(INFO) << "holder is in an immune image or the zygote space.";
     } else {
       LOG(INFO) << "holder is in a non-immune, non-moving (or main) space.";
-      accounting::ContinuousSpaceBitmap* mark_bitmap =
-          heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
-      accounting::LargeObjectBitmap* los_bitmap =
-          heap_mark_bitmap_->GetLargeObjectBitmap(obj);
-      CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
-      bool is_los = mark_bitmap == nullptr;
+      accounting::ContinuousSpaceBitmap* mark_bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
+      accounting::LargeObjectBitmap* los_bitmap = nullptr;
+      const bool is_los = !mark_bitmap->HasAddress(obj);
+      if (is_los) {
+        DCHECK(heap_->GetLargeObjectsSpace() && heap_->GetLargeObjectsSpace()->Contains(obj))
+            << "obj=" << obj
+            << " LOS bit map covers the entire lower 4GB address range";
+        los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
+      }
       if (!is_los && mark_bitmap->Test(obj)) {
-        LOG(INFO) << "holder is marked in the mark bit map.";
+        LOG(INFO) << "holder is marked in the non-moving space mark bit map.";
       } else if (is_los && los_bitmap->Test(obj)) {
         LOG(INFO) << "holder is marked in the los bit map.";
       } else {
@@ -2350,6 +2358,30 @@
   LOG(INFO) << "offset=" << offset.SizeValue();
 }
 
+bool ConcurrentCopying::IsMarkedInNonMovingSpace(mirror::Object* from_ref) {
+  DCHECK(!region_space_->HasAddress(from_ref)) << "ref=" << from_ref;
+  DCHECK(!immune_spaces_.ContainsObject(from_ref)) << "ref=" << from_ref;
+  if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
+    return true;
+  } else if (!(kEnableGenerationalConcurrentCopyingCollection && young_gen_)
+             || done_scanning_.load(std::memory_order_acquire)) {
+    // Read the comment in IsMarkedInUnevacFromSpace()
+    accounting::ContinuousSpaceBitmap* mark_bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
+    accounting::LargeObjectBitmap* los_bitmap = nullptr;
+    const bool is_los = !mark_bitmap->HasAddress(from_ref);
+    if (is_los) {
+      DCHECK(heap_->GetLargeObjectsSpace() && heap_->GetLargeObjectsSpace()->Contains(from_ref))
+          << "ref=" << from_ref
+          << " doesn't belong to non-moving space and large object space doesn't exist";
+      los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
+    }
+    if (is_los ? los_bitmap->Test(from_ref) : mark_bitmap->Test(from_ref)) {
+      return true;
+    }
+  }
+  return IsOnAllocStack(from_ref);
+}
+
 void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj,
                                                                mirror::Object* ref) {
   CHECK(ref != nullptr);
@@ -2371,62 +2403,14 @@
     }
   } else {
     // Non-moving space and large-object space (LOS) cases.
-    accounting::ContinuousSpaceBitmap* mark_bitmap =
-        heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
-    accounting::LargeObjectBitmap* los_bitmap =
-        heap_mark_bitmap_->GetLargeObjectBitmap(ref);
-    bool is_los = (mark_bitmap == nullptr);
-
-    bool marked_in_non_moving_space_or_los =
-        (kUseBakerReadBarrier
-         && kEnableGenerationalConcurrentCopyingCollection
-         && young_gen_
-         && !done_scanning_.load(std::memory_order_acquire))
-        // Don't use the mark bitmap to ensure `ref` is marked: check that the
-        // read barrier state is gray instead. This is to take into account a
-        // potential race between two read barriers on the same reference when the
-        // young-generation collector is still scanning the dirty cards.
-        //
-        // For instance consider two concurrent read barriers on the same GC root
-        // reference during the dirty-card-scanning step of a young-generation
-        // collection. Both threads would call ReadBarrier::BarrierForRoot, which
-        // would:
-        // a. mark the reference (leading to a call to
-        //    ConcurrentCopying::MarkNonMoving); then
-        // b. check the to-space invariant (leading to a call this
-        //    ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace -- this
-        //    method).
-        //
-        // In this situation, the following race could happen:
-        // 1. Thread A successfully changes `ref`'s read barrier state from
-        //    non-gray (white) to gray (with AtomicSetReadBarrierState) in
-        //    ConcurrentCopying::MarkNonMoving, then gets preempted.
-        // 2. Thread B also tries to change `ref`'s read barrier state with
-        //    AtomicSetReadBarrierState from non-gray to gray in
-        //    ConcurrentCopying::MarkNonMoving, but fails, as Thread A already
-        //    changed it.
-        // 3. Because Thread B failed the previous CAS, it does *not* set the
-        //    bit in the mark bitmap for `ref`.
-        // 4. Thread B checks the to-space invariant and calls
-        //    ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace: the bit
-        //    is not set in the mark bitmap for `ref`; checking that this bit is
-        //    set to check the to-space invariant is therefore not a reliable
-        //    test.
-        // 5. (Note that eventually, Thread A will resume its execution and set
-        //    the bit for `ref` in the mark bitmap.)
-        ? (ref->GetReadBarrierState() == ReadBarrier::GrayState())
-        // It is safe to use the heap mark bitmap otherwise.
-        : (!is_los && mark_bitmap->Test(ref)) || (is_los && los_bitmap->Test(ref));
-
     // If `ref` is on the allocation stack, then it may not be
     // marked live, but considered marked/alive (but not
     // necessarily on the live stack).
-    CHECK(marked_in_non_moving_space_or_los || IsOnAllocStack(ref))
+    CHECK(IsMarkedInNonMovingSpace(ref))
         << "Unmarked ref that's not on the allocation stack."
         << " obj=" << obj
         << " ref=" << ref
         << " rb_state=" << ref->GetReadBarrierState()
-        << " is_los=" << std::boolalpha << is_los << std::noboolalpha
         << " is_marking=" << std::boolalpha << is_marking_ << std::noboolalpha
         << " young_gen=" << std::boolalpha << young_gen_ << std::noboolalpha
         << " done_scanning="
@@ -2780,12 +2764,6 @@
         LOG(FATAL) << "Object address=" << from_ref << " type=" << from_ref->PrettyTypeOf();
       }
       bytes_allocated = non_moving_space_bytes_allocated;
-      // Mark it in the mark bitmap.
-      accounting::ContinuousSpaceBitmap* mark_bitmap =
-          heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
-      CHECK(mark_bitmap != nullptr);
-      bool previously_marked_in_bitmap = mark_bitmap->AtomicTestAndSet(to_ref);
-      CHECK(!previously_marked_in_bitmap);
     }
   }
   DCHECK(to_ref != nullptr);
@@ -2832,10 +2810,6 @@
         DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
         DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
         // Free the non-moving-space chunk.
-        accounting::ContinuousSpaceBitmap* mark_bitmap =
-            heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
-        CHECK(mark_bitmap != nullptr);
-        CHECK(mark_bitmap->Clear(to_ref));
         heap_->non_moving_space_->Free(self, to_ref);
       }
 
@@ -2884,6 +2858,14 @@
       } else {
         DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
         DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
+        if (!kEnableGenerationalConcurrentCopyingCollection || !young_gen_) {
+          // Mark it in the live bitmap.
+          CHECK(!heap_->non_moving_space_->GetLiveBitmap()->AtomicTestAndSet(to_ref));
+        }
+        if (!kUseBakerReadBarrier) {
+          // Mark it in the mark bitmap.
+          CHECK(!heap_->non_moving_space_->GetMarkBitmap()->AtomicTestAndSet(to_ref));
+        }
       }
       if (kUseBakerReadBarrier) {
         DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState());
@@ -2928,34 +2910,11 @@
       to_ref = from_ref;
     } else {
       // Non-immune non-moving space. Use the mark bitmap.
-      accounting::ContinuousSpaceBitmap* mark_bitmap =
-          heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
-      bool is_los = mark_bitmap == nullptr;
-      if (!is_los && mark_bitmap->Test(from_ref)) {
+      if (IsMarkedInNonMovingSpace(from_ref)) {
         // Already marked.
         to_ref = from_ref;
       } else {
-        accounting::LargeObjectBitmap* los_bitmap =
-            heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
-        // We may not have a large object space for dex2oat, don't assume it exists.
-        if (los_bitmap == nullptr) {
-          CHECK(heap_->GetLargeObjectsSpace() == nullptr)
-              << "LOS bitmap covers the entire address range " << from_ref
-              << " " << heap_->DumpSpaces();
-        }
-        if (los_bitmap != nullptr && is_los && los_bitmap->Test(from_ref)) {
-          // Already marked in LOS.
-          to_ref = from_ref;
-        } else {
-          // Not marked.
-          if (IsOnAllocStack(from_ref)) {
-            // If on the allocation stack, it's considered marked.
-            to_ref = from_ref;
-          } else {
-            // Not marked.
-            to_ref = nullptr;
-          }
-        }
+        to_ref = nullptr;
       }
     }
   }
@@ -2977,11 +2936,24 @@
   DCHECK(!region_space_->HasAddress(ref)) << ref;
   DCHECK(!immune_spaces_.ContainsObject(ref));
   // Use the mark bitmap.
-  accounting::ContinuousSpaceBitmap* mark_bitmap =
-      heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
-  accounting::LargeObjectBitmap* los_bitmap =
-      heap_mark_bitmap_->GetLargeObjectBitmap(ref);
-  bool is_los = mark_bitmap == nullptr;
+  accounting::ContinuousSpaceBitmap* mark_bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
+  accounting::LargeObjectBitmap* los_bitmap = nullptr;
+  const bool is_los = !mark_bitmap->HasAddress(ref);
+  if (is_los) {
+    if (!IsAligned<kPageSize>(ref)) {
+      // Ref is a large object that is not aligned, it must be heap
+      // corruption. Remove memory protection and dump data before
+      // AtomicSetReadBarrierState since it will fault if the address is not
+      // valid.
+      region_space_->Unprotect();
+      heap_->GetVerification()->LogHeapCorruption(holder, offset, ref, /* fatal= */ true);
+    }
+    DCHECK(heap_->GetLargeObjectsSpace())
+        << "ref=" << ref
+        << " doesn't belong to non-moving space and large object space doesn't exist";
+    los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
+    DCHECK(los_bitmap->HasAddress(ref));
+  }
   if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
     // The sticky-bit CC collector is only compatible with Baker-style read barriers.
     DCHECK(kUseBakerReadBarrier);
@@ -2999,12 +2971,6 @@
           ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState())) {
         // TODO: We don't actually need to scan this object later, we just need to clear the gray
         // bit.
-        // Also make sure the object is marked.
-        if (is_los) {
-          los_bitmap->AtomicTestAndSet(ref);
-        } else {
-          mark_bitmap->AtomicTestAndSet(ref);
-        }
         // We don't need to mark newly allocated objects (those in allocation stack) as they can
         // only point to to-space objects. Also, they are considered live till the next GC cycle.
         PushOntoMarkStack(self, ref);
@@ -3016,65 +2982,34 @@
     // Already marked.
   } else if (is_los && los_bitmap->Test(ref)) {
     // Already marked in LOS.
-  } else {
-    // Not marked.
-    if (IsOnAllocStack(ref)) {
-      // If it's on the allocation stack, it's considered marked. Keep it white (non-gray).
-      // Objects on the allocation stack need not be marked.
-      if (!is_los) {
-        DCHECK(!mark_bitmap->Test(ref));
-      } else {
-        DCHECK(!los_bitmap->Test(ref));
-      }
-      if (kUseBakerReadBarrier) {
-        DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::NonGrayState());
-      }
+  } else if (IsOnAllocStack(ref)) {
+    // If it's on the allocation stack, it's considered marked. Keep it white (non-gray).
+    // Objects on the allocation stack need not be marked.
+    if (!is_los) {
+      DCHECK(!mark_bitmap->Test(ref));
     } else {
-      // For the baker-style RB, we need to handle 'false-gray' cases. See the
-      // kRegionTypeUnevacFromSpace-case comment in Mark().
+      DCHECK(!los_bitmap->Test(ref));
+    }
+    if (kUseBakerReadBarrier) {
+      DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::NonGrayState());
+    }
+  } else {
+    // Not marked nor on the allocation stack. Try to mark it.
+    // This may or may not succeed, which is ok.
+    bool success = false;
+    if (kUseBakerReadBarrier) {
+      success = ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(),
+                                               ReadBarrier::GrayState());
+    } else {
+      success = is_los ?
+          !los_bitmap->AtomicTestAndSet(ref) :
+          !mark_bitmap->AtomicTestAndSet(ref);
+    }
+    if (success) {
       if (kUseBakerReadBarrier) {
-        // Test the bitmap first to reduce the chance of false gray cases.
-        if ((!is_los && mark_bitmap->Test(ref)) ||
-            (is_los && los_bitmap->Test(ref))) {
-          return ref;
-        }
+        DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState());
       }
-      if (is_los && !IsAligned<kPageSize>(ref)) {
-        // Ref is a large object that is not aligned, it must be heap
-        // corruption. Remove memory protection and dump data before
-        // AtomicSetReadBarrierState since it will fault if the address is not
-        // valid.
-        region_space_->Unprotect();
-        heap_->GetVerification()->LogHeapCorruption(holder, offset, ref, /* fatal= */ true);
-      }
-      // Not marked nor on the allocation stack. Try to mark it.
-      // This may or may not succeed, which is ok.
-      bool cas_success = false;
-      if (kUseBakerReadBarrier) {
-        cas_success = ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(),
-                                                     ReadBarrier::GrayState());
-      }
-      if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
-        // Already marked.
-        if (kUseBakerReadBarrier &&
-            cas_success &&
-            ref->GetReadBarrierState() == ReadBarrier::GrayState()) {
-          PushOntoFalseGrayStack(self, ref);
-        }
-      } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
-        // Already marked in LOS.
-        if (kUseBakerReadBarrier &&
-            cas_success &&
-            ref->GetReadBarrierState() == ReadBarrier::GrayState()) {
-          PushOntoFalseGrayStack(self, ref);
-        }
-      } else {
-        // Newly marked.
-        if (kUseBakerReadBarrier) {
-          DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState());
-        }
-        PushOntoMarkStack(self, ref);
-      }
+      PushOntoMarkStack(self, ref);
     }
   }
   return ref;
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 6535b11..e251fbc 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -161,6 +161,13 @@
   template <bool kNoUnEvac>
   void Scan(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_);
+  // Scan the reference fields of object 'obj' in the dirty cards during
+  // card-table scan. In addition to visiting the references, it also sets the
+  // read-barrier state to gray for Reference-type objects to ensure that
+  // GetReferent() called on these objects calls the read-barrier on the referent.
+  template <bool kNoUnEvac>
+  void ScanDirtyObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(!mark_stack_lock_);
   // Process a field.
   template <bool kNoUnEvac>
   void Process(mirror::Object* obj, MemberOffset offset)
@@ -218,6 +225,8 @@
       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
   bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
       REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsMarkedInNonMovingSpace(mirror::Object* from_ref)
+      REQUIRES_SHARED(Locks::mutator_lock_);
   bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
                                    bool do_atomic_update) override
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -283,11 +292,6 @@
   ALWAYS_INLINE mirror::Object* MarkImmuneSpace(Thread* const self,
                                                 mirror::Object* from_ref)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!immune_gray_stack_lock_);
-  void PushOntoFalseGrayStack(Thread* const self, mirror::Object* obj)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!mark_stack_lock_);
-  void ProcessFalseGrayStack() REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!mark_stack_lock_);
   void ScanImmuneObject(mirror::Object* obj)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
   mirror::Object* MarkFromReadBarrierWithMeasurements(Thread* const self,
@@ -315,7 +319,6 @@
   // (see use case in ConcurrentCopying::MarkFromReadBarrier).
   bool rb_mark_bit_stack_full_;
 
-  std::vector<mirror::Object*> false_gray_stack_ GUARDED_BY(mark_stack_lock_);
   Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   std::vector<accounting::ObjectStack*> revoked_mark_stacks_
       GUARDED_BY(mark_stack_lock_);
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 8477c9d..2ef3d92 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -90,12 +90,14 @@
   Thread* self = Thread::Current();
   uint64_t start_time = NanoTime();
   uint64_t thread_cpu_start_time = ThreadCpuNanoTime();
+  GetHeap()->CalculatePreGcWeightedAllocatedBytes();
   Iteration* current_iteration = GetCurrentIteration();
   current_iteration->Reset(gc_cause, clear_soft_references);
   // Note transaction mode is single-threaded and there's no asynchronous GC and this flag doesn't
   // change in the middle of a GC.
   is_transaction_active_ = Runtime::Current()->IsActiveTransaction();
   RunPhases();  // Run all the GC phases.
+  GetHeap()->CalculatePostGcWeightedAllocatedBytes();
   // Add the current timings to the cumulative timings.
   cumulative_timings_.AddLogger(*GetTimings());
   // Update cumulative statistics with how many bytes the GC iteration freed.
diff --git a/runtime/gc/collector/immune_region.h b/runtime/gc/collector/immune_region.h
index c9ac435..80ee44c 100644
--- a/runtime/gc/collector/immune_region.h
+++ b/runtime/gc/collector/immune_region.h
@@ -18,7 +18,6 @@
 #define ART_RUNTIME_GC_COLLECTOR_IMMUNE_REGION_H_
 
 #include "base/macros.h"
-#include "base/mutex.h"
 
 namespace art {
 namespace mirror {
diff --git a/runtime/gc/collector/immune_spaces.h b/runtime/gc/collector/immune_spaces.h
index 72cb60d..5a8441a 100644
--- a/runtime/gc/collector/immune_spaces.h
+++ b/runtime/gc/collector/immune_spaces.h
@@ -17,8 +17,8 @@
 #ifndef ART_RUNTIME_GC_COLLECTOR_IMMUNE_SPACES_H_
 #define ART_RUNTIME_GC_COLLECTOR_IMMUNE_SPACES_H_
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "gc/space/space.h"
 #include "immune_region.h"
 
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index c29b79c..b0d09ba 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -112,6 +112,8 @@
     // Create image header.
     ImageSection sections[ImageHeader::kSectionCount];
     new (image_map.Begin()) ImageHeader(
+        /*image_reservation_size=*/ image_size,
+        /*component_count=*/ 1u,
         /*image_begin=*/ PointerToLowMemUInt32(image_map.Begin()),
         /*image_size=*/ image_size,
         sections,
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index bb42be6..6fab371 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -20,8 +20,8 @@
 #include <memory>
 
 #include "base/atomic.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "garbage_collector.h"
 #include "gc/accounting/heap_bitmap.h"
 #include "gc_root.h"
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index e253dfb..1c09b5c 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -22,7 +22,6 @@
 #include "allocation_listener.h"
 #include "base/quasi_atomic.h"
 #include "base/time_utils.h"
-#include "base/utils.h"
 #include "gc/accounting/atomic_stack.h"
 #include "gc/accounting/card_table-inl.h"
 #include "gc/allocation_record.h"
@@ -215,7 +214,7 @@
   if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent()) {
     // New_num_bytes_allocated is zero if we didn't update num_bytes_allocated_.
     // That's fine.
-    CheckConcurrentGC(self, new_num_bytes_allocated, &obj);
+    CheckConcurrentGCForJava(self, new_num_bytes_allocated, &obj);
   }
   VerifyObject(obj);
   self->VerifyStack();
@@ -255,8 +254,8 @@
                                            size_t* bytes_allocated,
                                            size_t* usable_size,
                                            size_t* bytes_tl_bulk_allocated) {
-  if (allocator_type != kAllocatorTypeTLAB &&
-      allocator_type != kAllocatorTypeRegionTLAB &&
+  if (allocator_type != kAllocatorTypeRegionTLAB &&
+      allocator_type != kAllocatorTypeTLAB &&
       allocator_type != kAllocatorTypeRosAlloc &&
       UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, alloc_size, kGrow))) {
     return nullptr;
@@ -397,30 +396,46 @@
 inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
                                             size_t alloc_size,
                                             bool grow) {
-  size_t new_footprint = num_bytes_allocated_.load(std::memory_order_relaxed) + alloc_size;
-  if (UNLIKELY(new_footprint > max_allowed_footprint_)) {
-    if (UNLIKELY(new_footprint > growth_limit_)) {
+  size_t old_target = target_footprint_.load(std::memory_order_relaxed);
+  while (true) {
+    size_t old_allocated = num_bytes_allocated_.load(std::memory_order_relaxed);
+    size_t new_footprint = old_allocated + alloc_size;
+    // Tests against heap limits are inherently approximate, since multiple allocations may
+    // race, and this is not atomic with the allocation.
+    if (UNLIKELY(new_footprint <= old_target)) {
+      return false;
+    } else if (UNLIKELY(new_footprint > growth_limit_)) {
       return true;
     }
-    if (!AllocatorMayHaveConcurrentGC(allocator_type) || !IsGcConcurrent()) {
-      if (!grow) {
+    // We are between target_footprint_ and growth_limit_ .
+    if (AllocatorMayHaveConcurrentGC(allocator_type) && IsGcConcurrent()) {
+      return false;
+    } else {
+      if (grow) {
+        if (target_footprint_.compare_exchange_weak(/*inout ref*/old_target, new_footprint,
+                                                    std::memory_order_relaxed)) {
+          VlogHeapGrowth(old_target, new_footprint, alloc_size);
+          return false;
+        }  // else try again.
+      } else {
         return true;
       }
-      // TODO: Grow for allocation is racy, fix it.
-      VlogHeapGrowth(max_allowed_footprint_, new_footprint, alloc_size);
-      max_allowed_footprint_ = new_footprint;
     }
   }
-  return false;
 }
 
-// Request a GC if new_num_bytes_allocated is sufficiently large.
-// A call with new_num_bytes_allocated == 0 is a fast no-op.
-inline void Heap::CheckConcurrentGC(Thread* self,
+inline bool Heap::ShouldConcurrentGCForJava(size_t new_num_bytes_allocated) {
+  // For a Java allocation, we only check whether the number of Java allocated bytes excceeds a
+  // threshold. By not considering native allocation here, we (a) ensure that Java heap bounds are
+  // maintained, and (b) reduce the cost of the check here.
+  return new_num_bytes_allocated >= concurrent_start_bytes_;
+}
+
+inline void Heap::CheckConcurrentGCForJava(Thread* self,
                                     size_t new_num_bytes_allocated,
                                     ObjPtr<mirror::Object>* obj) {
-  if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
-    RequestConcurrentGCAndSaveObject(self, false, obj);
+  if (UNLIKELY(ShouldConcurrentGCForJava(new_num_bytes_allocated))) {
+    RequestConcurrentGCAndSaveObject(self, false /* force_full */, obj);
   }
 }
 
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index f767360..77254ce 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -17,6 +17,7 @@
 #include "heap.h"
 
 #include <limits>
+#include <malloc.h>  // For mallinfo()
 #include <memory>
 #include <vector>
 
@@ -36,6 +37,7 @@
 #include "base/stl_util.h"
 #include "base/systrace.h"
 #include "base/time_utils.h"
+#include "base/utils.h"
 #include "common_throws.h"
 #include "debugger.h"
 #include "dex/dex_file-inl.h"
@@ -173,6 +175,8 @@
            double foreground_heap_growth_multiplier,
            size_t capacity,
            size_t non_moving_space_capacity,
+           const std::vector<std::string>& boot_class_path,
+           const std::vector<std::string>& boot_class_path_locations,
            const std::string& image_file_name,
            const InstructionSet image_instruction_set,
            CollectorType foreground_collector_type,
@@ -184,7 +188,7 @@
            bool low_memory_mode,
            size_t long_pause_log_threshold,
            size_t long_gc_log_threshold,
-           bool ignore_max_footprint,
+           bool ignore_target_footprint,
            bool use_tlab,
            bool verify_pre_gc_heap,
            bool verify_pre_sweeping_heap,
@@ -210,7 +214,12 @@
       low_memory_mode_(low_memory_mode),
       long_pause_log_threshold_(long_pause_log_threshold),
       long_gc_log_threshold_(long_gc_log_threshold),
-      ignore_max_footprint_(ignore_max_footprint),
+      process_cpu_start_time_ns_(ProcessCpuNanoTime()),
+      pre_gc_last_process_cpu_time_ns_(process_cpu_start_time_ns_),
+      post_gc_last_process_cpu_time_ns_(process_cpu_start_time_ns_),
+      pre_gc_weighted_allocated_bytes_(0.0),
+      post_gc_weighted_allocated_bytes_(0.0),
+      ignore_target_footprint_(ignore_target_footprint),
       zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
       zygote_space_(nullptr),
       large_object_threshold_(large_object_threshold),
@@ -223,13 +232,14 @@
       next_gc_type_(collector::kGcTypePartial),
       capacity_(capacity),
       growth_limit_(growth_limit),
-      max_allowed_footprint_(initial_size),
+      target_footprint_(initial_size),
       concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
       total_bytes_freed_ever_(0),
       total_objects_freed_ever_(0),
       num_bytes_allocated_(0),
-      new_native_bytes_allocated_(0),
+      native_bytes_registered_(0),
       old_native_bytes_allocated_(0),
+      native_objects_notified_(0),
       num_bytes_freed_revoke_(0),
       verify_missing_card_marks_(false),
       verify_system_weaks_(false),
@@ -347,7 +357,9 @@
   // Load image space(s).
   std::vector<std::unique_ptr<space::ImageSpace>> boot_image_spaces;
   MemMap heap_reservation;
-  if (space::ImageSpace::LoadBootImage(image_file_name,
+  if (space::ImageSpace::LoadBootImage(boot_class_path,
+                                       boot_class_path_locations,
+                                       image_file_name,
                                        image_instruction_set,
                                        heap_reservation_size,
                                        &boot_image_spaces,
@@ -606,11 +618,11 @@
   task_processor_.reset(new TaskProcessor());
   reference_processor_.reset(new ReferenceProcessor());
   pending_task_lock_ = new Mutex("Pending task lock");
-  if (ignore_max_footprint_) {
+  if (ignore_target_footprint_) {
     SetIdealFootprint(std::numeric_limits<size_t>::max());
     concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
   }
-  CHECK_NE(max_allowed_footprint_, 0U);
+  CHECK_NE(target_footprint_.load(std::memory_order_relaxed), 0U);
   // Create our garbage collectors.
   for (size_t i = 0; i < 2; ++i) {
     const bool concurrent = i != 0;
@@ -1062,6 +1074,27 @@
   }
 }
 
+double Heap::CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns,
+                                               uint64_t current_process_cpu_time) const {
+  uint64_t bytes_allocated = GetBytesAllocated();
+  double weight = current_process_cpu_time - gc_last_process_cpu_time_ns;
+  return weight * bytes_allocated;
+}
+
+void Heap::CalculatePreGcWeightedAllocatedBytes() {
+  uint64_t current_process_cpu_time = ProcessCpuNanoTime();
+  pre_gc_weighted_allocated_bytes_ +=
+    CalculateGcWeightedAllocatedBytes(pre_gc_last_process_cpu_time_ns_, current_process_cpu_time);
+  pre_gc_last_process_cpu_time_ns_ = current_process_cpu_time;
+}
+
+void Heap::CalculatePostGcWeightedAllocatedBytes() {
+  uint64_t current_process_cpu_time = ProcessCpuNanoTime();
+  post_gc_weighted_allocated_bytes_ +=
+    CalculateGcWeightedAllocatedBytes(post_gc_last_process_cpu_time_ns_, current_process_cpu_time);
+  post_gc_last_process_cpu_time_ns_ = current_process_cpu_time;
+}
+
 uint64_t Heap::GetTotalGcCpuTime() {
   uint64_t sum = 0;
   for (auto* collector : garbage_collectors_) {
@@ -1127,10 +1160,11 @@
     rosalloc_space_->DumpStats(os);
   }
 
-  os << "Registered native bytes allocated: "
-     << (old_native_bytes_allocated_.load(std::memory_order_relaxed) +
-         new_native_bytes_allocated_.load(std::memory_order_relaxed))
-     << "\n";
+  os << "Native bytes total: " << GetNativeBytes()
+     << " registered: " << native_bytes_registered_.load(std::memory_order_relaxed) << "\n";
+
+  os << "Total native bytes at last GC: "
+     << old_native_bytes_allocated_.load(std::memory_order_relaxed) << "\n";
 
   BaseMutex::DumpAll(os);
 }
@@ -1139,6 +1173,15 @@
   for (auto* collector : garbage_collectors_) {
     collector->ResetMeasurements();
   }
+
+  process_cpu_start_time_ns_ = ProcessCpuNanoTime();
+
+  pre_gc_last_process_cpu_time_ns_ = process_cpu_start_time_ns_;
+  pre_gc_weighted_allocated_bytes_ = 0u;
+
+  post_gc_last_process_cpu_time_ns_ = process_cpu_start_time_ns_;
+  post_gc_weighted_allocated_bytes_ = 0u;
+
   total_bytes_freed_ever_ = 0;
   total_objects_freed_ever_ = 0;
   total_wait_time_ = 0;
@@ -1297,7 +1340,8 @@
   size_t total_bytes_free = GetFreeMemory();
   oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
       << " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM,"
-      << " max allowed footprint " << max_allowed_footprint_ << ", growth limit "
+      << " target footprint " << target_footprint_.load(std::memory_order_relaxed)
+      << ", growth limit "
       << growth_limit_;
   // If the allocation failed due to fragmentation, print out the largest continuous allocation.
   if (total_bytes_free >= byte_count) {
@@ -1832,7 +1876,7 @@
 }
 
 void Heap::SetTargetHeapUtilization(float target) {
-  DCHECK_GT(target, 0.0f);  // asserted in Java code
+  DCHECK_GT(target, 0.1f);  // asserted in Java code
   DCHECK_LT(target, 1.0f);
   target_utilization_ = target;
 }
@@ -2246,8 +2290,8 @@
     }
     if (IsGcConcurrent()) {
       concurrent_start_bytes_ =
-          std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) -
-          kMinConcurrentRemainingBytes;
+          UnsignedDifference(target_footprint_.load(std::memory_order_relaxed),
+                             kMinConcurrentRemainingBytes);
     } else {
       concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
     }
@@ -2576,6 +2620,39 @@
   ATRACE_INT("Heap size (KB)", heap_size / KB);
 }
 
+size_t Heap::GetNativeBytes() {
+  size_t malloc_bytes;
+  size_t mmapped_bytes;
+#if defined(__BIONIC__) || defined(__GLIBC__)
+  struct mallinfo mi = mallinfo();
+  // In spite of the documentation, the jemalloc version of this call seems to do what we want,
+  // and it is thread-safe.
+  if (sizeof(size_t) > sizeof(mi.uordblks) && sizeof(size_t) > sizeof(mi.hblkhd)) {
+    // Shouldn't happen, but glibc declares uordblks as int.
+    // Avoiding sign extension gets us correct behavior for another 2 GB.
+    malloc_bytes = (unsigned int)mi.uordblks;
+    mmapped_bytes = (unsigned int)mi.hblkhd;
+  } else {
+    malloc_bytes = mi.uordblks;
+    mmapped_bytes = mi.hblkhd;
+  }
+  // From the spec, we clearly have mmapped_bytes <= malloc_bytes. Reality is sometimes
+  // dramatically different. (b/119580449) If so, fudge it.
+  if (mmapped_bytes > malloc_bytes) {
+    malloc_bytes = mmapped_bytes;
+  }
+#else
+  // We should hit this case only in contexts in which GC triggering is not critical. Effectively
+  // disable GC triggering based on malloc().
+  malloc_bytes = 1000;
+#endif
+  return malloc_bytes + native_bytes_registered_.load(std::memory_order_relaxed);
+  // An alternative would be to get RSS from /proc/self/statm. Empirically, that's no
+  // more expensive, and it would allow us to count memory allocated by means other than malloc.
+  // However it would change as pages are unmapped and remapped due to memory pressure, among
+  // other things. It seems risky to trigger GCs as a result of such changes.
+}
+
 collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
                                                GcCause gc_cause,
                                                bool clear_soft_references) {
@@ -2626,16 +2703,7 @@
     ++runtime->GetStats()->gc_for_alloc_count;
     ++self->GetStats()->gc_for_alloc_count;
   }
-  const uint64_t bytes_allocated_before_gc = GetBytesAllocated();
-
-  if (gc_type == NonStickyGcType()) {
-    // Move all bytes from new_native_bytes_allocated_ to
-    // old_native_bytes_allocated_ now that GC has been triggered, resetting
-    // new_native_bytes_allocated_ to zero in the process.
-    old_native_bytes_allocated_.fetch_add(
-        new_native_bytes_allocated_.exchange(0, std::memory_order_relaxed),
-        std::memory_order_relaxed);
-  }
+  const size_t bytes_allocated_before_gc = GetBytesAllocated();
 
   DCHECK_LT(gc_type, collector::kGcTypeMax);
   DCHECK_NE(gc_type, collector::kGcTypeNone);
@@ -2707,6 +2775,9 @@
   FinishGC(self, gc_type);
   // Inform DDMS that a GC completed.
   Dbg::GcDidFinish();
+
+  old_native_bytes_allocated_.store(GetNativeBytes());
+
   // Unload native libraries for class unloading. We do this after calling FinishGC to prevent
   // deadlocks in case the JNI_OnUnload function does allocations.
   {
@@ -3481,16 +3552,17 @@
 }
 
 size_t Heap::GetPercentFree() {
-  return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / max_allowed_footprint_);
+  return static_cast<size_t>(100.0f * static_cast<float>(
+      GetFreeMemory()) / target_footprint_.load(std::memory_order_relaxed));
 }
 
-void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
-  if (max_allowed_footprint > GetMaxMemory()) {
-    VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
+void Heap::SetIdealFootprint(size_t target_footprint) {
+  if (target_footprint > GetMaxMemory()) {
+    VLOG(gc) << "Clamp target GC heap from " << PrettySize(target_footprint) << " to "
              << PrettySize(GetMaxMemory());
-    max_allowed_footprint = GetMaxMemory();
+    target_footprint = GetMaxMemory();
   }
-  max_allowed_footprint_ = max_allowed_footprint;
+  target_footprint_.store(target_footprint, std::memory_order_relaxed);
 }
 
 bool Heap::IsMovableObject(ObjPtr<mirror::Object> obj) const {
@@ -3523,10 +3595,10 @@
 }
 
 void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
-                              uint64_t bytes_allocated_before_gc) {
+                              size_t bytes_allocated_before_gc) {
   // We know what our utilization is at this moment.
   // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
-  const uint64_t bytes_allocated = GetBytesAllocated();
+  const size_t bytes_allocated = GetBytesAllocated();
   // Trace the new heap size after the GC is finished.
   TraceHeapSize(bytes_allocated);
   uint64_t target_size;
@@ -3534,16 +3606,18 @@
   // Use the multiplier to grow more for foreground.
   const double multiplier = HeapGrowthMultiplier();  // Use the multiplier to grow more for
   // foreground.
-  const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier);
-  const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier);
+  const size_t adjusted_min_free = static_cast<size_t>(min_free_ * multiplier);
+  const size_t adjusted_max_free = static_cast<size_t>(max_free_ * multiplier);
   if (gc_type != collector::kGcTypeSticky) {
     // Grow the heap for non sticky GC.
-    ssize_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
-    CHECK_GE(delta, 0) << "bytes_allocated=" << bytes_allocated
-                       << " target_utilization_=" << target_utilization_;
+    uint64_t delta = bytes_allocated * (1.0 / GetTargetHeapUtilization() - 1.0);
+    DCHECK_LE(delta, std::numeric_limits<size_t>::max()) << "bytes_allocated=" << bytes_allocated
+        << " target_utilization_=" << target_utilization_;
     target_size = bytes_allocated + delta * multiplier;
-    target_size = std::min(target_size, bytes_allocated + adjusted_max_free);
-    target_size = std::max(target_size, bytes_allocated + adjusted_min_free);
+    target_size = std::min(target_size,
+                           static_cast<uint64_t>(bytes_allocated + adjusted_max_free));
+    target_size = std::max(target_size,
+                           static_cast<uint64_t>(bytes_allocated + adjusted_min_free));
     next_gc_type_ = collector::kGcTypeSticky;
   } else {
     collector::GcType non_sticky_gc_type = NonStickyGcType();
@@ -3560,22 +3634,24 @@
     // We also check that the bytes allocated aren't over the footprint limit in order to prevent a
     // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
     // if the sticky GC throughput always remained >= the full/partial throughput.
+    size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
     if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >=
         non_sticky_collector->GetEstimatedMeanThroughput() &&
         non_sticky_collector->NumberOfIterations() > 0 &&
-        bytes_allocated <= max_allowed_footprint_) {
+        bytes_allocated <= target_footprint) {
       next_gc_type_ = collector::kGcTypeSticky;
     } else {
       next_gc_type_ = non_sticky_gc_type;
     }
     // If we have freed enough memory, shrink the heap back down.
-    if (bytes_allocated + adjusted_max_free < max_allowed_footprint_) {
+    if (bytes_allocated + adjusted_max_free < target_footprint) {
       target_size = bytes_allocated + adjusted_max_free;
     } else {
-      target_size = std::max(bytes_allocated, static_cast<uint64_t>(max_allowed_footprint_));
+      target_size = std::max(bytes_allocated, target_footprint);
     }
   }
-  if (!ignore_max_footprint_) {
+  CHECK_LE(target_size, std::numeric_limits<size_t>::max());
+  if (!ignore_target_footprint_) {
     SetIdealFootprint(target_size);
     if (IsGcConcurrent()) {
       const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() +
@@ -3584,26 +3660,25 @@
       // Bytes allocated will shrink by freed_bytes after the GC runs, so if we want to figure out
       // how many bytes were allocated during the GC we need to add freed_bytes back on.
       CHECK_GE(bytes_allocated + freed_bytes, bytes_allocated_before_gc);
-      const uint64_t bytes_allocated_during_gc = bytes_allocated + freed_bytes -
+      const size_t bytes_allocated_during_gc = bytes_allocated + freed_bytes -
           bytes_allocated_before_gc;
       // Calculate when to perform the next ConcurrentGC.
       // Estimate how many remaining bytes we will have when we need to start the next GC.
       size_t remaining_bytes = bytes_allocated_during_gc;
       remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
       remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
-      if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
+      size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
+      if (UNLIKELY(remaining_bytes > target_footprint)) {
         // A never going to happen situation that from the estimated allocation rate we will exceed
         // the applications entire footprint with the given estimated allocation rate. Schedule
         // another GC nearly straight away.
-        remaining_bytes = kMinConcurrentRemainingBytes;
+        remaining_bytes = std::min(kMinConcurrentRemainingBytes, target_footprint);
       }
-      DCHECK_LE(remaining_bytes, max_allowed_footprint_);
-      DCHECK_LE(max_allowed_footprint_, GetMaxMemory());
+      DCHECK_LE(target_footprint_.load(std::memory_order_relaxed), GetMaxMemory());
       // Start a concurrent GC when we get close to the estimated remaining bytes. When the
       // allocation rate is very high, remaining_bytes could tell us that we should start a GC
       // right away.
-      concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes,
-                                         static_cast<size_t>(bytes_allocated));
+      concurrent_start_bytes_ = std::max(target_footprint - remaining_bytes, bytes_allocated);
     }
   }
 }
@@ -3631,11 +3706,11 @@
 }
 
 void Heap::ClearGrowthLimit() {
-  if (max_allowed_footprint_ == growth_limit_ && growth_limit_ < capacity_) {
-    max_allowed_footprint_ = capacity_;
+  if (target_footprint_.load(std::memory_order_relaxed) == growth_limit_
+      && growth_limit_ < capacity_) {
+    target_footprint_.store(capacity_, std::memory_order_relaxed);
     concurrent_start_bytes_ =
-         std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) -
-         kMinConcurrentRemainingBytes;
+        UnsignedDifference(capacity_, kMinConcurrentRemainingBytes);
   }
   growth_limit_ = capacity_;
   ScopedObjectAccess soa(Thread::Current());
@@ -3875,40 +3950,101 @@
                             static_cast<jlong>(timeout));
 }
 
-void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
-  size_t old_value = new_native_bytes_allocated_.fetch_add(bytes, std::memory_order_relaxed);
+// For GC triggering purposes, we count old (pre-last-GC) and new native allocations as
+// different fractions of Java allocations.
+// For now, we essentially do not count old native allocations at all, so that we can preserve the
+// existing behavior of not limiting native heap size. If we seriously considered it, we would
+// have to adjust collection thresholds when we encounter large amounts of old native memory,
+// and handle native out-of-memory situations.
 
-  if (old_value > NativeAllocationGcWatermark() * HeapGrowthMultiplier() &&
-             !IsGCRequestPending()) {
-    // Trigger another GC because there have been enough native bytes
-    // allocated since the last GC.
+static constexpr size_t kOldNativeDiscountFactor = 65536;  // Approximately infinite for now.
+static constexpr size_t kNewNativeDiscountFactor = 2;
+
+// If weighted java + native memory use exceeds our target by kStopForNativeFactor, and
+// newly allocated memory exceeds kHugeNativeAlloc, we wait for GC to complete to avoid
+// running out of memory.
+static constexpr float kStopForNativeFactor = 2.0;
+static constexpr size_t kHugeNativeAllocs = 200*1024*1024;
+
+// Return the ratio of the weighted native + java allocated bytes to its target value.
+// A return value > 1.0 means we should collect. Significantly larger values mean we're falling
+// behind.
+inline float Heap::NativeMemoryOverTarget(size_t current_native_bytes) {
+  // Collection check for native allocation. Does not enforce Java heap bounds.
+  // With adj_start_bytes defined below, effectively checks
+  // <java bytes allocd> + c1*<old native allocd> + c2*<new native allocd) >= adj_start_bytes,
+  // where c3 > 1, and currently c1 and c2 are 1 divided by the values defined above.
+  size_t old_native_bytes = old_native_bytes_allocated_.load(std::memory_order_relaxed);
+  if (old_native_bytes > current_native_bytes) {
+    // Net decrease; skip the check, but update old value.
+    // It's OK to lose an update if two stores race.
+    old_native_bytes_allocated_.store(current_native_bytes, std::memory_order_relaxed);
+    return 0.0;
+  } else {
+    size_t new_native_bytes = UnsignedDifference(current_native_bytes, old_native_bytes);
+    size_t weighted_native_bytes = new_native_bytes / kNewNativeDiscountFactor
+        + old_native_bytes / kOldNativeDiscountFactor;
+    size_t adj_start_bytes = concurrent_start_bytes_
+        + NativeAllocationGcWatermark() / kNewNativeDiscountFactor;
+    return static_cast<float>(GetBytesAllocated() + weighted_native_bytes)
+         / static_cast<float>(adj_start_bytes);
+  }
+}
+
+inline void Heap::CheckConcurrentGCForNative(Thread* self) {
+  size_t current_native_bytes = GetNativeBytes();
+  float gc_urgency = NativeMemoryOverTarget(current_native_bytes);
+  if (UNLIKELY(gc_urgency >= 1.0)) {
     if (IsGcConcurrent()) {
-      RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAlloc, /*force_full=*/true);
+      RequestConcurrentGC(self, kGcCauseForNativeAlloc, /*force_full=*/true);
+      if (gc_urgency > kStopForNativeFactor
+          && current_native_bytes > kHugeNativeAllocs) {
+        // We're in danger of running out of memory due to rampant native allocation.
+        if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+          LOG(INFO) << "Stopping for native allocation, urgency: " << gc_urgency;
+        }
+        WaitForGcToComplete(kGcCauseForAlloc, self);
+      }
     } else {
       CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
     }
   }
 }
 
-void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) {
-  // Take the bytes freed out of new_native_bytes_allocated_ first. If
-  // new_native_bytes_allocated_ reaches zero, take the remaining bytes freed
-  // out of old_native_bytes_allocated_ to ensure all freed bytes are
-  // accounted for.
-  size_t allocated;
-  size_t new_freed_bytes;
-  do {
-    allocated = new_native_bytes_allocated_.load(std::memory_order_relaxed);
-    new_freed_bytes = std::min(allocated, bytes);
-  } while (!new_native_bytes_allocated_.CompareAndSetWeakRelaxed(allocated,
-                                                                   allocated - new_freed_bytes));
-  if (new_freed_bytes < bytes) {
-    old_native_bytes_allocated_.fetch_sub(bytes - new_freed_bytes, std::memory_order_relaxed);
+// About kNotifyNativeInterval allocations have occurred. Check whether we should garbage collect.
+void Heap::NotifyNativeAllocations(JNIEnv* env) {
+  native_objects_notified_.fetch_add(kNotifyNativeInterval, std::memory_order_relaxed);
+  CheckConcurrentGCForNative(ThreadForEnv(env));
+}
+
+// Register a native allocation with an explicit size.
+// This should only be done for large allocations of non-malloc memory, which we wouldn't
+// otherwise see.
+void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
+  native_bytes_registered_.fetch_add(bytes, std::memory_order_relaxed);
+  uint32_t objects_notified =
+      native_objects_notified_.fetch_add(1, std::memory_order_relaxed);
+  if (objects_notified % kNotifyNativeInterval == kNotifyNativeInterval - 1
+      || bytes > kCheckImmediatelyThreshold) {
+    CheckConcurrentGCForNative(ThreadForEnv(env));
   }
 }
 
+void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) {
+  size_t allocated;
+  size_t new_freed_bytes;
+  do {
+    allocated = native_bytes_registered_.load(std::memory_order_relaxed);
+    new_freed_bytes = std::min(allocated, bytes);
+    // We should not be registering more free than allocated bytes.
+    // But correctly keep going in non-debug builds.
+    DCHECK_EQ(new_freed_bytes, bytes);
+  } while (!native_bytes_registered_.CompareAndSetWeakRelaxed(allocated,
+                                                              allocated - new_freed_bytes));
+}
+
 size_t Heap::GetTotalMemory() const {
-  return std::max(max_allowed_footprint_, GetBytesAllocated());
+  return std::max(target_footprint_.load(std::memory_order_relaxed), GetBytesAllocated());
 }
 
 void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
@@ -4210,8 +4346,8 @@
   return verification_.get();
 }
 
-void Heap::VlogHeapGrowth(size_t max_allowed_footprint, size_t new_footprint, size_t alloc_size) {
-  VLOG(heap) << "Growing heap from " << PrettySize(max_allowed_footprint) << " to "
+void Heap::VlogHeapGrowth(size_t old_footprint, size_t new_footprint, size_t alloc_size) {
+  VLOG(heap) << "Growing heap from " << PrettySize(old_footprint) << " to "
              << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation";
 }
 
@@ -4222,20 +4358,21 @@
     gc::Heap* heap = Runtime::Current()->GetHeap();
     // Trigger a GC, if not already done. The first GC after fork, whenever it
     // takes place, will adjust the thresholds to normal levels.
-    if (heap->max_allowed_footprint_ == heap->growth_limit_) {
+    if (heap->target_footprint_.load(std::memory_order_relaxed) == heap->growth_limit_) {
       heap->RequestConcurrentGC(self, kGcCauseBackground, false);
     }
   }
 };
 
 void Heap::PostForkChildAction(Thread* self) {
-  // Temporarily increase max_allowed_footprint_ and concurrent_start_bytes_ to
+  // Temporarily increase target_footprint_ and concurrent_start_bytes_ to
   // max values to avoid GC during app launch.
   if (collector_type_ == kCollectorTypeCC && !IsLowMemoryMode()) {
-    // Set max_allowed_footprint_ to the largest allowed value.
+    // Set target_footprint_ to the largest allowed value.
     SetIdealFootprint(growth_limit_);
     // Set concurrent_start_bytes_ to half of the heap size.
-    concurrent_start_bytes_ = std::max(max_allowed_footprint_ / 2, GetBytesAllocated());
+    size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
+    concurrent_start_bytes_ = std::max(target_footprint / 2, GetBytesAllocated());
 
     GetTaskProcessor()->AddTask(
         self, new TriggerPostForkCCGcTask(NanoTime() + MsToNs(kPostForkMaxHeapDurationMS)));
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index a43f315..de65f023 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -126,7 +126,6 @@
 
 class Heap {
  public:
-  // If true, measure the total allocation time.
   static constexpr size_t kDefaultStartingSize = kPageSize;
   static constexpr size_t kDefaultInitialSize = 2 * MB;
   static constexpr size_t kDefaultMaximumSize = 256 * MB;
@@ -155,6 +154,16 @@
   // Used so that we don't overflow the allocation time atomic integer.
   static constexpr size_t kTimeAdjust = 1024;
 
+  // Client should call NotifyNativeAllocation every kNotifyNativeInterval allocations.
+  // Should be chosen so that time_to_call_mallinfo / kNotifyNativeInterval is on the same order
+  // as object allocation time. time_to_call_mallinfo seems to be on the order of 1 usec.
+  static constexpr uint32_t kNotifyNativeInterval = 32;
+
+  // RegisterNativeAllocation checks immediately whether GC is needed if size exceeds the
+  // following. kCheckImmediatelyThreshold * kNotifyNativeInterval should be small enough to
+  // make it safe to allocate that many bytes between checks.
+  static constexpr size_t kCheckImmediatelyThreshold = 300000;
+
   // How often we allow heap trimming to happen (nanoseconds).
   static constexpr uint64_t kHeapTrimWait = MsToNs(5000);
   // How long we wait after a transition request to perform a collector transition (nanoseconds).
@@ -174,7 +183,9 @@
        double foreground_heap_growth_multiplier,
        size_t capacity,
        size_t non_moving_space_capacity,
-       const std::string& original_image_file_name,
+       const std::vector<std::string>& boot_class_path,
+       const std::vector<std::string>& boot_class_path_locations,
+       const std::string& image_file_name,
        InstructionSet image_instruction_set,
        CollectorType foreground_collector_type,
        CollectorType background_collector_type,
@@ -185,7 +196,7 @@
        bool low_memory_mode,
        size_t long_pause_threshold,
        size_t long_gc_threshold,
-       bool ignore_max_footprint,
+       bool ignore_target_footprint,
        bool use_tlab,
        bool verify_pre_gc_heap,
        bool verify_pre_sweeping_heap,
@@ -267,10 +278,22 @@
   void CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Inform the garbage collector of a non-malloc allocated native memory that might become
+  // reclaimable in the future as a result of Java garbage collection.
   void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
   void RegisterNativeFree(JNIEnv* env, size_t bytes);
 
+  // Notify the garbage collector of malloc allocations that might be reclaimable
+  // as a result of Java garbage collection. Each such call represents approximately
+  // kNotifyNativeInterval such allocations.
+  void NotifyNativeAllocations(JNIEnv* env)
+      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
+
+  uint32_t GetNotifyNativeInterval() {
+    return kNotifyNativeInterval;
+  }
+
   // Change the allocator, updates entrypoints.
   void ChangeAllocator(AllocatorType allocator)
       REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_);
@@ -395,8 +418,22 @@
     REQUIRES(!Locks::heap_bitmap_lock_)
     REQUIRES(Locks::mutator_lock_);
 
+  double GetPreGcWeightedAllocatedBytes() const {
+    return pre_gc_weighted_allocated_bytes_;
+  }
+
+  double GetPostGcWeightedAllocatedBytes() const {
+    return post_gc_weighted_allocated_bytes_;
+  }
+
+  void CalculatePreGcWeightedAllocatedBytes();
+  void CalculatePostGcWeightedAllocatedBytes();
   uint64_t GetTotalGcCpuTime();
 
+  uint64_t GetProcessCpuStartTime() const {
+    return process_cpu_start_time_ns_;
+  }
+
   // Set target ideal heap utilization ratio, implements
   // dalvik.system.VMRuntime.setTargetHeapUtilization.
   void SetTargetHeapUtilization(float target);
@@ -520,21 +557,20 @@
 
   // Returns approximately how much free memory we have until the next GC happens.
   size_t GetFreeMemoryUntilGC() const {
-    return max_allowed_footprint_ - GetBytesAllocated();
+    return UnsignedDifference(target_footprint_.load(std::memory_order_relaxed),
+                              GetBytesAllocated());
   }
 
   // Returns approximately how much free memory we have until the next OOME happens.
   size_t GetFreeMemoryUntilOOME() const {
-    return growth_limit_ - GetBytesAllocated();
+    return UnsignedDifference(growth_limit_, GetBytesAllocated());
   }
 
   // Returns how much free memory we have until we need to grow the heap to perform an allocation.
   // Similar to GetFreeMemoryUntilGC. Implements java.lang.Runtime.freeMemory.
   size_t GetFreeMemory() const {
-    size_t byte_allocated = num_bytes_allocated_.load(std::memory_order_relaxed);
-    size_t total_memory = GetTotalMemory();
-    // Make sure we don't get a negative number.
-    return total_memory - std::min(total_memory, byte_allocated);
+    return UnsignedDifference(GetTotalMemory(),
+                              num_bytes_allocated_.load(std::memory_order_relaxed));
   }
 
   // Get the space that corresponds to an object's address. Current implementation searches all
@@ -847,6 +883,9 @@
       REQUIRES(!*gc_complete_lock_);
   void FinishGC(Thread* self, collector::GcType gc_type) REQUIRES(!*gc_complete_lock_);
 
+  double CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns,
+                                           uint64_t current_process_cpu_time) const;
+
   // Create a mem map with a preferred base address.
   static MemMap MapAnonymousPreferredAddress(const char* name,
                                              uint8_t* request_begin,
@@ -858,12 +897,16 @@
     return main_space_backup_ != nullptr;
   }
 
+  static ALWAYS_INLINE size_t UnsignedDifference(size_t x, size_t y) {
+    return x > y ? x - y : 0;
+  }
+
   static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
     return
+        allocator_type != kAllocatorTypeRegionTLAB &&
         allocator_type != kAllocatorTypeBumpPointer &&
         allocator_type != kAllocatorTypeTLAB &&
-        allocator_type != kAllocatorTypeRegion &&
-        allocator_type != kAllocatorTypeRegionTLAB;
+        allocator_type != kAllocatorTypeRegion;
   }
   static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) {
     if (kUseReadBarrier) {
@@ -871,24 +914,30 @@
       return true;
     }
     return
-        allocator_type != kAllocatorTypeBumpPointer &&
-        allocator_type != kAllocatorTypeTLAB;
+        allocator_type != kAllocatorTypeTLAB &&
+        allocator_type != kAllocatorTypeBumpPointer;
   }
   static bool IsMovingGc(CollectorType collector_type) {
     return
+        collector_type == kCollectorTypeCC ||
         collector_type == kCollectorTypeSS ||
         collector_type == kCollectorTypeGSS ||
-        collector_type == kCollectorTypeCC ||
         collector_type == kCollectorTypeCCBackground ||
         collector_type == kCollectorTypeHomogeneousSpaceCompact;
   }
   bool ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const
       REQUIRES_SHARED(Locks::mutator_lock_);
-  ALWAYS_INLINE void CheckConcurrentGC(Thread* self,
-                                       size_t new_num_bytes_allocated,
-                                       ObjPtr<mirror::Object>* obj)
+
+  // Checks whether we should garbage collect:
+  ALWAYS_INLINE bool ShouldConcurrentGCForJava(size_t new_num_bytes_allocated);
+  float NativeMemoryOverTarget(size_t current_native_bytes);
+  ALWAYS_INLINE void CheckConcurrentGCForJava(Thread* self,
+                                              size_t new_num_bytes_allocated,
+                                              ObjPtr<mirror::Object>* obj)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
+  void CheckConcurrentGCForNative(Thread* self)
+      REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
 
   accounting::ObjectStack* GetMarkStack() {
     return mark_stack_.get();
@@ -949,6 +998,11 @@
   void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Are we out of memory, and thus should force a GC or fail?
+  // For concurrent collectors, out of memory is defined by growth_limit_.
+  // For nonconcurrent collectors it is defined by target_footprint_ unless grow is
+  // set. If grow is set, the limit is growth_limit_ and we adjust target_footprint_
+  // to accomodate the allocation.
   ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
                                                size_t alloc_size,
                                                bool grow);
@@ -1012,7 +1066,7 @@
   // collection. bytes_allocated_before_gc is used to measure bytes / second for the period which
   // the GC was run.
   void GrowForUtilization(collector::GarbageCollector* collector_ran,
-                          uint64_t bytes_allocated_before_gc = 0);
+                          size_t bytes_allocated_before_gc = 0);
 
   size_t GetPercentFree();
 
@@ -1046,8 +1100,8 @@
   // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
   // sweep GC, false for other GC types.
   bool IsGcConcurrent() const ALWAYS_INLINE {
-    return collector_type_ == kCollectorTypeCMS ||
-        collector_type_ == kCollectorTypeCC ||
+    return collector_type_ == kCollectorTypeCC ||
+        collector_type_ == kCollectorTypeCMS ||
         collector_type_ == kCollectorTypeCCBackground;
   }
 
@@ -1076,15 +1130,19 @@
     return HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
   }
 
-  // How large new_native_bytes_allocated_ can grow before we trigger a new
-  // GC.
+  // Return the amount of space we allow for native memory when deciding whether to
+  // collect. We collect when a weighted sum of Java memory plus native memory exceeds
+  // the similarly weighted sum of the Java heap size target and this value.
   ALWAYS_INLINE size_t NativeAllocationGcWatermark() const {
-    // Reuse max_free_ for the native allocation gc watermark, so that the
-    // native heap is treated in the same way as the Java heap in the case
-    // where the gc watermark update would exceed max_free_. Using max_free_
-    // instead of the target utilization means the watermark doesn't depend on
-    // the current number of registered native allocations.
-    return max_free_;
+    // It probably makes most sense to use a constant multiple of target_footprint_ .
+    // This is a good indication of the live data size, together with the
+    // intended space-time trade-off, as expressed by SetTargetHeapUtilization.
+    // For a fixed target utilization, the amount of GC effort per native
+    // allocated byte remains roughly constant as the Java heap size changes.
+    // But we previously triggered on max_free_ native allocation which is often much
+    // smaller. To avoid unexpected growth, we partially keep that limit in place for now.
+    // TODO: Consider HeapGrowthMultiplier(). Maybe.
+    return std::min(target_footprint_.load(std::memory_order_relaxed), 2 * max_free_);
   }
 
   ALWAYS_INLINE void IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke);
@@ -1094,6 +1152,11 @@
   // Remove a vlog code from heap-inl.h which is transitively included in half the world.
   static void VlogHeapGrowth(size_t max_allowed_footprint, size_t new_footprint, size_t alloc_size);
 
+  // Return our best approximation of the number of bytes of native memory that
+  // are currently in use, and could possibly be reclaimed as an indirect result
+  // of a garbage collection.
+  size_t GetNativeBytes();
+
   // All-known continuous spaces, where objects lie within fixed bounds.
   std::vector<space::ContinuousSpace*> continuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
 
@@ -1161,9 +1224,21 @@
   // If we get a GC longer than long GC log threshold, then we print out the GC after it finishes.
   const size_t long_gc_log_threshold_;
 
-  // If we ignore the max footprint it lets the heap grow until it hits the heap capacity, this is
-  // useful for benchmarking since it reduces time spent in GC to a low %.
-  const bool ignore_max_footprint_;
+  // Starting time of the new process; meant to be used for measuring total process CPU time.
+  uint64_t process_cpu_start_time_ns_;
+
+  // Last time (before and after) GC started; meant to be used to measure the
+  // duration between two GCs.
+  uint64_t pre_gc_last_process_cpu_time_ns_;
+  uint64_t post_gc_last_process_cpu_time_ns_;
+
+  // allocated_bytes * (current_process_cpu_time - [pre|post]_gc_last_process_cpu_time)
+  double pre_gc_weighted_allocated_bytes_;
+  double post_gc_weighted_allocated_bytes_;
+
+  // If we ignore the target footprint it lets the heap grow until it hits the heap capacity, this
+  // is useful for benchmarking since it reduces time spent in GC to a low %.
+  const bool ignore_target_footprint_;
 
   // Lock which guards zygote space creation.
   Mutex zygote_creation_lock_;
@@ -1212,14 +1287,18 @@
 
   // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap
   // programs it is "cleared" making it the same as capacity.
+  // Only weakly enforced for simultaneous allocations.
   size_t growth_limit_;
 
-  // When the number of bytes allocated exceeds the footprint TryAllocate returns null indicating
-  // a GC should be triggered.
-  size_t max_allowed_footprint_;
+  // Target size (as in maximum allocatable bytes) for the heap. Weakly enforced as a limit for
+  // non-concurrent GC. Used as a guideline for computing concurrent_start_bytes_ in the
+  // concurrent GC case.
+  Atomic<size_t> target_footprint_;
 
   // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
   // it completes ahead of an allocation failing.
+  // A multiple of this is also used to determine when to trigger a GC in response to native
+  // allocation.
   size_t concurrent_start_bytes_;
 
   // Since the heap was created, how many bytes have been freed.
@@ -1232,19 +1311,18 @@
   // TLABS in their entirety, even if they have not yet been parceled out.
   Atomic<size_t> num_bytes_allocated_;
 
-  // Number of registered native bytes allocated since the last time GC was
-  // triggered. Adjusted after each RegisterNativeAllocation and
-  // RegisterNativeFree. Used to determine when to trigger GC for native
-  // allocations.
-  // See the REDESIGN section of go/understanding-register-native-allocation.
-  Atomic<size_t> new_native_bytes_allocated_;
+  // Number of registered native bytes allocated. Adjusted after each RegisterNativeAllocation and
+  // RegisterNativeFree. Used to  help determine when to trigger GC for native allocations. Should
+  // not include bytes allocated through the system malloc, since those are implicitly included.
+  Atomic<size_t> native_bytes_registered_;
 
-  // Number of registered native bytes allocated prior to the last time GC was
-  // triggered, for debugging purposes. The current number of registered
-  // native bytes is determined by taking the sum of
-  // old_native_bytes_allocated_ and new_native_bytes_allocated_.
+  // Approximately the smallest value of GetNativeBytes() we've seen since the last GC.
   Atomic<size_t> old_native_bytes_allocated_;
 
+  // Total number of native objects of which we were notified since the beginning of time, mod 2^32.
+  // Allows us to check for GC only roughly every kNotifyNativeInterval allocations.
+  Atomic<uint32_t> native_objects_notified_;
+
   // Number of bytes freed by thread local buffer revokes. This will
   // cancel out the ahead-of-time bulk counting of bytes allocated in
   // rosalloc thread-local buffers.  It is temporarily accumulated
@@ -1329,10 +1407,10 @@
 
   // Minimum free guarantees that you always have at least min_free_ free bytes after growing for
   // utilization, regardless of target utilization ratio.
-  size_t min_free_;
+  const size_t min_free_;
 
   // The ideal maximum free size, when we grow the heap for utilization.
-  size_t max_free_;
+  const size_t max_free_;
 
   // Target ideal heap utilization ratio.
   double target_utilization_;
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index d4af117..4944639 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -17,6 +17,7 @@
 #include "reference_processor.h"
 
 #include "art_field-inl.h"
+#include "base/mutex.h"
 #include "base/time_utils.h"
 #include "base/utils.h"
 #include "class_root.h"
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index c6c7836..17b546a 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -18,7 +18,7 @@
 #define ART_RUNTIME_GC_REFERENCE_PROCESSOR_H_
 
 #include "base/globals.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "jni.h"
 #include "reference_queue.h"
 
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 5c11e50..95871da 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -17,6 +17,7 @@
 #include "reference_queue.h"
 
 #include "accounting/card_table-inl.h"
+#include "base/mutex.h"
 #include "collector/concurrent_copying.h"
 #include "heap.h"
 #include "mirror/class-inl.h"
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index 09ab51a..53518cc 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -23,7 +23,7 @@
 
 #include "base/atomic.h"
 #include "base/globals.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "base/timing_logger.h"
 #include "jni.h"
 #include "obj_ptr.h"
@@ -31,6 +31,9 @@
 #include "thread_pool.h"
 
 namespace art {
+
+class Mutex;
+
 namespace mirror {
 class Reference;
 }  // namespace mirror
diff --git a/runtime/gc/scoped_gc_critical_section.h b/runtime/gc/scoped_gc_critical_section.h
index 864bf87..8ad0158 100644
--- a/runtime/gc/scoped_gc_critical_section.h
+++ b/runtime/gc/scoped_gc_critical_section.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_GC_SCOPED_GC_CRITICAL_SECTION_H_
 #define ART_RUNTIME_GC_SCOPED_GC_CRITICAL_SECTION_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "collector_type.h"
 #include "gc_cause.h"
 
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 383bf7a..6d9fd04 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -19,6 +19,8 @@
 
 #include "space.h"
 
+#include "base/mutex.h"
+
 namespace art {
 
 namespace mirror {
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index c772bda..3f1d879 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -21,12 +21,14 @@
 #include <unistd.h>
 
 #include <random>
+#include <thread>
 
 #include "android-base/stringprintf.h"
 #include "android-base/strings.h"
 
 #include "art_field-inl.h"
 #include "art_method-inl.h"
+#include "base/array_ref.h"
 #include "base/bit_memory_region.h"
 #include "base/callee_save_type.h"
 #include "base/enums.h"
@@ -58,6 +60,7 @@
 namespace gc {
 namespace space {
 
+using android::base::StringAppendF;
 using android::base::StringPrintf;
 
 Atomic<uint32_t> ImageSpace::bitmap_index_(0);
@@ -103,9 +106,8 @@
 static bool GenerateImage(const std::string& image_filename,
                           InstructionSet image_isa,
                           std::string* error_msg) {
-  const std::string boot_class_path_string(Runtime::Current()->GetBootClassPathString());
-  std::vector<std::string> boot_class_path;
-  Split(boot_class_path_string, ':', &boot_class_path);
+  Runtime* runtime = Runtime::Current();
+  const std::vector<std::string>& boot_class_path = runtime->GetBootClassPath();
   if (boot_class_path.empty()) {
     *error_msg = "Failed to generate image because no boot class path specified";
     return false;
@@ -125,8 +127,11 @@
   image_option_string += image_filename;
   arg_vector.push_back(image_option_string);
 
-  for (size_t i = 0; i < boot_class_path.size(); i++) {
+  const std::vector<std::string>& boot_class_path_locations = runtime->GetBootClassPathLocations();
+  DCHECK_EQ(boot_class_path.size(), boot_class_path_locations.size());
+  for (size_t i = 0u; i < boot_class_path.size(); i++) {
     arg_vector.push_back(std::string("--dex-file=") + boot_class_path[i]);
+    arg_vector.push_back(std::string("--dex-location=") + boot_class_path_locations[i]);
   }
 
   std::string oat_file_option_string("--oat-file=");
@@ -376,6 +381,297 @@
             << reinterpret_cast<const void*>(reloc.Dest() + reloc.Length()) << ")";
 }
 
+template <PointerSize kPointerSize, typename ReferenceVisitor>
+class ImageSpace::PatchObjectVisitor final {
+ public:
+  explicit PatchObjectVisitor(ReferenceVisitor reference_visitor)
+      : reference_visitor_(reference_visitor) {}
+
+  void VisitClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) {
+    // A mirror::Class object consists of
+    //  - instance fields inherited from j.l.Object,
+    //  - instance fields inherited from j.l.Class,
+    //  - embedded tables (vtable, interface method table),
+    //  - static fields of the class itself.
+    // The reference fields are at the start of each field section (this is how the
+    // ClassLinker orders fields; except when that would create a gap between superclass
+    // fields and the first reference of the subclass due to alignment, it can be filled
+    // with smaller fields - but that's not the case for j.l.Object and j.l.Class).
+
+    DCHECK_ALIGNED(klass, kObjectAlignment);
+    static_assert(IsAligned<kHeapReferenceSize>(kObjectAlignment), "Object alignment check.");
+    // First, patch the `klass->klass_`, known to be a reference to the j.l.Class.class.
+    // This should be the only reference field in j.l.Object and we assert that below.
+    PatchReferenceField</*kMayBeNull=*/ false>(klass, mirror::Object::ClassOffset());
+    // Then patch the reference instance fields described by j.l.Class.class.
+    // Use the sizeof(Object) to determine where these reference fields start;
+    // this is the same as `class_class->GetFirstReferenceInstanceFieldOffset()`
+    // after patching but the j.l.Class may not have been patched yet.
+    mirror::Class* class_class = klass->GetClass<kVerifyNone, kWithoutReadBarrier>();
+    size_t num_reference_instance_fields = class_class->NumReferenceInstanceFields<kVerifyNone>();
+    DCHECK_NE(num_reference_instance_fields, 0u);
+    static_assert(IsAligned<kHeapReferenceSize>(sizeof(mirror::Object)), "Size alignment check.");
+    MemberOffset instance_field_offset(sizeof(mirror::Object));
+    for (size_t i = 0; i != num_reference_instance_fields; ++i) {
+      PatchReferenceField(klass, instance_field_offset);
+      static_assert(sizeof(mirror::HeapReference<mirror::Object>) == kHeapReferenceSize,
+                    "Heap reference sizes equality check.");
+      instance_field_offset =
+          MemberOffset(instance_field_offset.Uint32Value() + kHeapReferenceSize);
+    }
+    // Now that we have patched the `super_class_`, if this is the j.l.Class.class,
+    // we can get a reference to j.l.Object.class and assert that it has only one
+    // reference instance field (the `klass_` patched above).
+    if (kIsDebugBuild && klass == class_class) {
+      ObjPtr<mirror::Class> object_class =
+          klass->GetSuperClass<kVerifyNone, kWithoutReadBarrier>();
+      CHECK_EQ(object_class->NumReferenceInstanceFields<kVerifyNone>(), 1u);
+    }
+    // Then patch static fields.
+    size_t num_reference_static_fields = klass->NumReferenceStaticFields<kVerifyNone>();
+    if (num_reference_static_fields != 0u) {
+      MemberOffset static_field_offset =
+          klass->GetFirstReferenceStaticFieldOffset<kVerifyNone>(kPointerSize);
+      for (size_t i = 0; i != num_reference_static_fields; ++i) {
+        PatchReferenceField(klass, static_field_offset);
+        static_assert(sizeof(mirror::HeapReference<mirror::Object>) == kHeapReferenceSize,
+                      "Heap reference sizes equality check.");
+        static_field_offset =
+            MemberOffset(static_field_offset.Uint32Value() + kHeapReferenceSize);
+      }
+    }
+    // Then patch native pointers.
+    klass->FixupNativePointers<kVerifyNone>(klass, kPointerSize, *this);
+  }
+
+  template <typename T>
+  T* operator()(T* ptr, void** dest_addr ATTRIBUTE_UNUSED) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (ptr != nullptr) {
+      ptr = reference_visitor_(ptr);
+    }
+    return ptr;
+  }
+
+  void VisitPointerArray(mirror::PointerArray* pointer_array)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    // Fully patch the pointer array, including the `klass_` field.
+    PatchReferenceField</*kMayBeNull=*/ false>(pointer_array, mirror::Object::ClassOffset());
+
+    int32_t length = pointer_array->GetLength<kVerifyNone>();
+    for (int32_t i = 0; i != length; ++i) {
+      ArtMethod** method_entry = reinterpret_cast<ArtMethod**>(
+          pointer_array->ElementAddress<kVerifyNone>(i, kPointerSize));
+      PatchNativePointer</*kMayBeNull=*/ false>(method_entry);
+    }
+  }
+
+  void VisitObject(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
+    // Visit all reference fields.
+    object->VisitReferences</*kVisitNativeRoots=*/ false,
+                            kVerifyNone,
+                            kWithoutReadBarrier>(*this, *this);
+    // This function should not be called for classes.
+    DCHECK(!object->IsClass<kVerifyNone>());
+  }
+
+  // Visitor for VisitReferences().
+  ALWAYS_INLINE void operator()(mirror::Object* object, MemberOffset field_offset, bool is_static)
+      const REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(!is_static);
+    PatchReferenceField(object, field_offset);
+  }
+  // Visitor for VisitReferences(), java.lang.ref.Reference case.
+  ALWAYS_INLINE void operator()(ObjPtr<mirror::Class> klass, mirror::Reference* ref) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(klass->IsTypeOfReferenceClass());
+    this->operator()(ref, mirror::Reference::ReferentOffset(), /*is_static=*/ false);
+  }
+  // Ignore class native roots; not called from VisitReferences() for kVisitNativeRoots == false.
+  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
+      const {}
+  void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
+
+  void VisitDexCacheArrays(mirror::DexCache* dex_cache) REQUIRES_SHARED(Locks::mutator_lock_) {
+    FixupDexCacheArray<mirror::StringDexCacheType>(dex_cache,
+                                                   mirror::DexCache::StringsOffset(),
+                                                   dex_cache->NumStrings<kVerifyNone>());
+    FixupDexCacheArray<mirror::TypeDexCacheType>(dex_cache,
+                                                 mirror::DexCache::ResolvedTypesOffset(),
+                                                 dex_cache->NumResolvedTypes<kVerifyNone>());
+    FixupDexCacheArray<mirror::MethodDexCacheType>(dex_cache,
+                                                   mirror::DexCache::ResolvedMethodsOffset(),
+                                                   dex_cache->NumResolvedMethods<kVerifyNone>());
+    FixupDexCacheArray<mirror::FieldDexCacheType>(dex_cache,
+                                                  mirror::DexCache::ResolvedFieldsOffset(),
+                                                  dex_cache->NumResolvedFields<kVerifyNone>());
+    FixupDexCacheArray<mirror::MethodTypeDexCacheType>(
+        dex_cache,
+        mirror::DexCache::ResolvedMethodTypesOffset(),
+        dex_cache->NumResolvedMethodTypes<kVerifyNone>());
+    FixupDexCacheArray<GcRoot<mirror::CallSite>>(
+        dex_cache,
+        mirror::DexCache::ResolvedCallSitesOffset(),
+        dex_cache->NumResolvedCallSites<kVerifyNone>());
+    FixupDexCacheArray<GcRoot<mirror::String>>(
+        dex_cache,
+        mirror::DexCache::PreResolvedStringsOffset(),
+        dex_cache->NumPreResolvedStrings<kVerifyNone>());
+  }
+
+  template <bool kMayBeNull = true, typename T>
+  ALWAYS_INLINE void PatchGcRoot(/*inout*/GcRoot<T>* root) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    static_assert(sizeof(GcRoot<mirror::Class*>) == sizeof(uint32_t), "GcRoot size check");
+    T* old_value = root->template Read<kWithoutReadBarrier>();
+    DCHECK(kMayBeNull || old_value != nullptr);
+    if (!kMayBeNull || old_value != nullptr) {
+      *root = GcRoot<T>(reference_visitor_(old_value));
+    }
+  }
+
+  template <bool kMayBeNull = true, typename T>
+  ALWAYS_INLINE void PatchNativePointer(/*inout*/T** entry) const {
+    if (kPointerSize == PointerSize::k64) {
+      uint64_t* raw_entry = reinterpret_cast<uint64_t*>(entry);
+      T* old_value = reinterpret_cast64<T*>(*raw_entry);
+      DCHECK(kMayBeNull || old_value != nullptr);
+      if (!kMayBeNull || old_value != nullptr) {
+        T* new_value = reference_visitor_(old_value);
+        *raw_entry = reinterpret_cast64<uint64_t>(new_value);
+      }
+    } else {
+      uint32_t* raw_entry = reinterpret_cast<uint32_t*>(entry);
+      T* old_value = reinterpret_cast32<T*>(*raw_entry);
+      DCHECK(kMayBeNull || old_value != nullptr);
+      if (!kMayBeNull || old_value != nullptr) {
+        T* new_value = reference_visitor_(old_value);
+        *raw_entry = reinterpret_cast32<uint32_t>(new_value);
+      }
+    }
+  }
+
+  template <bool kMayBeNull = true>
+  ALWAYS_INLINE void PatchReferenceField(mirror::Object* object, MemberOffset offset) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    mirror::Object* old_value =
+        object->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset);
+    DCHECK(kMayBeNull || old_value != nullptr);
+    if (!kMayBeNull || old_value != nullptr) {
+      mirror::Object* new_value = reference_visitor_(old_value);
+      object->SetFieldObjectWithoutWriteBarrier</*kTransactionActive=*/ false,
+                                                /*kCheckTransaction=*/ true,
+                                                kVerifyNone>(offset, new_value);
+    }
+  }
+
+  template <typename T>
+  void FixupDexCacheArrayEntry(std::atomic<mirror::DexCachePair<T>>* array, uint32_t index)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    static_assert(sizeof(std::atomic<mirror::DexCachePair<T>>) == sizeof(mirror::DexCachePair<T>),
+                  "Size check for removing std::atomic<>.");
+    PatchGcRoot(&(reinterpret_cast<mirror::DexCachePair<T>*>(array)[index].object));
+  }
+
+  template <typename T>
+  void FixupDexCacheArrayEntry(std::atomic<mirror::NativeDexCachePair<T>>* array, uint32_t index)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    static_assert(sizeof(std::atomic<mirror::NativeDexCachePair<T>>) ==
+                      sizeof(mirror::NativeDexCachePair<T>),
+                  "Size check for removing std::atomic<>.");
+    mirror::NativeDexCachePair<T> pair =
+        mirror::DexCache::GetNativePairPtrSize(array, index, kPointerSize);
+    if (pair.object != nullptr) {
+      pair.object = reference_visitor_(pair.object);
+      mirror::DexCache::SetNativePairPtrSize(array, index, pair, kPointerSize);
+    }
+  }
+
+  void FixupDexCacheArrayEntry(GcRoot<mirror::CallSite>* array, uint32_t index)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    PatchGcRoot(&array[index]);
+  }
+
+  void FixupDexCacheArrayEntry(GcRoot<mirror::String>* array, uint32_t index)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    PatchGcRoot(&array[index]);
+  }
+
+  template <typename EntryType>
+  void FixupDexCacheArray(mirror::DexCache* dex_cache,
+                          MemberOffset array_offset,
+                          uint32_t size) REQUIRES_SHARED(Locks::mutator_lock_) {
+    EntryType* old_array =
+        reinterpret_cast64<EntryType*>(dex_cache->GetField64<kVerifyNone>(array_offset));
+    DCHECK_EQ(old_array != nullptr, size != 0u);
+    if (old_array != nullptr) {
+      EntryType* new_array = reference_visitor_(old_array);
+      dex_cache->SetField64<kVerifyNone>(array_offset, reinterpret_cast64<uint64_t>(new_array));
+      for (uint32_t i = 0; i != size; ++i) {
+        FixupDexCacheArrayEntry(new_array, i);
+      }
+    }
+  }
+
+ private:
+  ReferenceVisitor reference_visitor_;
+};
+
+template <typename ObjectVisitor>
+class ImageSpace::PatchArtFieldVisitor final : public ArtFieldVisitor {
+ public:
+  explicit PatchArtFieldVisitor(const ObjectVisitor& visitor) : visitor_(visitor) {}
+
+  void Visit(ArtField* field) override REQUIRES_SHARED(Locks::mutator_lock_) {
+    visitor_.template PatchGcRoot</*kMayBeNull=*/ false>(&field->DeclaringClassRoot());
+  }
+
+ private:
+  const ObjectVisitor visitor_;
+};
+
+template <PointerSize kPointerSize, typename ObjectVisitor, typename CodeVisitor>
+class ImageSpace::PatchArtMethodVisitor final : public ArtMethodVisitor {
+ public:
+  explicit PatchArtMethodVisitor(const ObjectVisitor& object_visitor,
+                                 const CodeVisitor& code_visitor)
+      : object_visitor_(object_visitor),
+        code_visitor_(code_visitor) {}
+
+  void Visit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_) {
+    object_visitor_.PatchGcRoot(&method->DeclaringClassRoot());
+    void** data_address = PointerAddress(method, ArtMethod::DataOffset(kPointerSize));
+    object_visitor_.PatchNativePointer(data_address);
+    void** entrypoint_address =
+        PointerAddress(method, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kPointerSize));
+    code_visitor_.PatchNativePointer(entrypoint_address);
+  }
+
+ private:
+  void** PointerAddress(ArtMethod* method, MemberOffset offset) {
+    return reinterpret_cast<void**>(reinterpret_cast<uint8_t*>(method) + offset.Uint32Value());
+  }
+
+  const ObjectVisitor object_visitor_;
+  const CodeVisitor code_visitor_;
+};
+
+template <typename ReferenceVisitor>
+class ImageSpace::ClassTableVisitor final {
+ public:
+  explicit ClassTableVisitor(const ReferenceVisitor& reference_visitor)
+      : reference_visitor_(reference_visitor) {}
+
+  void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(root->AsMirrorPtr() != nullptr);
+    root->Assign(reference_visitor_(root->AsMirrorPtr()));
+  }
+
+ private:
+  ReferenceVisitor reference_visitor_;
+};
+
 // Helper class encapsulating loading, so we can access private ImageSpace members (this is a
 // nested class), but not declare functions in the header.
 class ImageSpace::Loader {
@@ -387,20 +683,58 @@
                                                   /*out*/std::string* error_msg)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
+
+    const bool create_thread_pool = true;
+    std::unique_ptr<ThreadPool> thread_pool;
+    if (create_thread_pool) {
+      TimingLogger::ScopedTiming timing("CreateThreadPool", &logger);
+      ScopedThreadStateChange stsc(Thread::Current(), kNative);
+      constexpr size_t kStackSize = 64 * KB;
+      constexpr size_t kMaxRuntimeWorkers = 4u;
+      const size_t num_workers =
+          std::min(static_cast<size_t>(std::thread::hardware_concurrency()), kMaxRuntimeWorkers);
+      thread_pool.reset(new ThreadPool("Runtime", num_workers, /*create_peers=*/false, kStackSize));
+      thread_pool->StartWorkers(Thread::Current());
+    }
+
     std::unique_ptr<ImageSpace> space = Init(image_filename,
                                              image_location,
                                              oat_file,
                                              &logger,
+                                             thread_pool.get(),
                                              image_reservation,
                                              error_msg);
+    if (thread_pool != nullptr) {
+      TimingLogger::ScopedTiming timing("CreateThreadPool", &logger);
+      ScopedThreadStateChange stsc(Thread::Current(), kNative);
+      thread_pool.reset();
+    }
     if (space != nullptr) {
+      uint32_t expected_reservation_size =
+          RoundUp(space->GetImageHeader().GetImageSize(), kPageSize);
+      if (!CheckImageReservationSize(*space, expected_reservation_size, error_msg) ||
+          !CheckImageComponentCount(*space, /*expected_component_count=*/ 1u, error_msg)) {
+        return nullptr;
+      }
+
       TimingLogger::ScopedTiming timing("RelocateImage", &logger);
       ImageHeader* image_header = reinterpret_cast<ImageHeader*>(space->GetMemMap()->Begin());
-      if (!RelocateInPlace(*image_header,
-                           space->GetMemMap()->Begin(),
-                           space->GetLiveBitmap(),
-                           oat_file,
-                           error_msg)) {
+      const PointerSize pointer_size = image_header->GetPointerSize();
+      bool result;
+      if (pointer_size == PointerSize::k64) {
+        result = RelocateInPlace<PointerSize::k64>(*image_header,
+                                                   space->GetMemMap()->Begin(),
+                                                   space->GetLiveBitmap(),
+                                                   oat_file,
+                                                   error_msg);
+      } else {
+        result = RelocateInPlace<PointerSize::k32>(*image_header,
+                                                   space->GetMemMap()->Begin(),
+                                                   space->GetLiveBitmap(),
+                                                   oat_file,
+                                                   error_msg);
+      }
+      if (!result) {
         return nullptr;
       }
       Runtime* runtime = Runtime::Current();
@@ -435,6 +769,7 @@
                                           const char* image_location,
                                           const OatFile* oat_file,
                                           TimingLogger* logger,
+                                          ThreadPool* thread_pool,
                                           /*inout*/MemMap* image_reservation,
                                           /*out*/std::string* error_msg)
       REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -523,6 +858,7 @@
         *image_header,
         file->Fd(),
         logger,
+        thread_pool,
         image_reservation,
         error_msg);
     if (!map.IsValid()) {
@@ -579,12 +915,41 @@
     return space;
   }
 
+  static bool CheckImageComponentCount(const ImageSpace& space,
+                                       uint32_t expected_component_count,
+                                       /*out*/std::string* error_msg) {
+    const ImageHeader& header = space.GetImageHeader();
+    if (header.GetComponentCount() != expected_component_count) {
+      *error_msg = StringPrintf("Unexpected component count in %s, received %u, expected %u",
+                                space.GetImageFilename().c_str(),
+                                header.GetComponentCount(),
+                                expected_component_count);
+      return false;
+    }
+    return true;
+  }
+
+  static bool CheckImageReservationSize(const ImageSpace& space,
+                                        uint32_t expected_reservation_size,
+                                        /*out*/std::string* error_msg) {
+    const ImageHeader& header = space.GetImageHeader();
+    if (header.GetImageReservationSize() != expected_reservation_size) {
+      *error_msg = StringPrintf("Unexpected reservation size in %s, received %u, expected %u",
+                                space.GetImageFilename().c_str(),
+                                header.GetImageReservationSize(),
+                                expected_reservation_size);
+      return false;
+    }
+    return true;
+  }
+
  private:
   static MemMap LoadImageFile(const char* image_filename,
                               const char* image_location,
                               const ImageHeader& image_header,
                               int fd,
                               TimingLogger* logger,
+                              ThreadPool* pool,
                               /*inout*/MemMap* image_reservation,
                               /*out*/std::string* error_msg) {
     TimingLogger::ScopedTiming timing("MapImageFile", logger);
@@ -627,16 +992,35 @@
         return MemMap::Invalid();
       }
       memcpy(map.Begin(), &image_header, sizeof(ImageHeader));
+
       const uint64_t start = NanoTime();
+      Thread* const self = Thread::Current();
+      const size_t kMinBlocks = 2;
+      const bool use_parallel = pool != nullptr && image_header.GetBlockCount() >= kMinBlocks;
       for (const ImageHeader::Block& block : image_header.GetBlocks(temp_map.Begin())) {
-        TimingLogger::ScopedTiming timing2("LZ4 decompress image", logger);
-        if (!block.Decompress(/*out_ptr=*/map.Begin(), /*in_ptr=*/temp_map.Begin(), error_msg)) {
-          if (error_msg != nullptr) {
-            *error_msg = "Failed to decompress image block " + *error_msg;
+        auto function = [&](Thread*) {
+          const uint64_t start2 = NanoTime();
+          ScopedTrace trace("LZ4 decompress block");
+          if (!block.Decompress(/*out_ptr=*/map.Begin(),
+                                /*in_ptr=*/temp_map.Begin(),
+                                error_msg)) {
+            if (error_msg != nullptr) {
+              *error_msg = "Failed to decompress image block " + *error_msg;
+            }
           }
-          return MemMap::Invalid();
+          VLOG(image) << "Decompress block " << block.GetDataSize() << " -> "
+                      << block.GetImageSize() << " in " << PrettyDuration(NanoTime() - start2);
+        };
+        if (use_parallel) {
+          pool->AddTask(self, new FunctionTask(std::move(function)));
+        } else {
+          function(self);
         }
       }
+      if (use_parallel) {
+        ScopedTrace trace("Waiting for workers");
+        pool->Wait(self, true, false);
+      }
       const uint64_t time = NanoTime() - start;
       // Add one 1 ns to prevent possible divide by 0.
       VLOG(image) << "Decompressing image took " << PrettyDuration(time) << " ("
@@ -734,11 +1118,8 @@
   class FixupObjectVisitor : public FixupVisitor {
    public:
     template<typename... Args>
-    explicit FixupObjectVisitor(gc::accounting::ContinuousSpaceBitmap* visited,
-                                const PointerSize pointer_size,
-                                Args... args)
+    explicit FixupObjectVisitor(gc::accounting::ContinuousSpaceBitmap* visited, Args... args)
         : FixupVisitor(args...),
-          pointer_size_(pointer_size),
           visited_(visited) {}
 
     // Fix up separately since we also need to fix up method entrypoints.
@@ -750,39 +1131,14 @@
 
     ALWAYS_INLINE void operator()(ObjPtr<mirror::Object> obj,
                                   MemberOffset offset,
-
                                   bool is_static ATTRIBUTE_UNUSED) const
         NO_THREAD_SAFETY_ANALYSIS {
-      // There could be overlap between ranges, we must avoid visiting the same reference twice.
-      // Avoid the class field since we already fixed it up in FixupClassVisitor.
-      if (offset.Uint32Value() != mirror::Object::ClassOffset().Uint32Value()) {
-        // Space is not yet added to the heap, don't do a read barrier.
-        mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
-            offset);
-        // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
-        // image.
-        obj->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(offset, ForwardObject(ref));
-      }
-    }
-
-    // Visit a pointer array and forward corresponding native data. Ignores pointer arrays in the
-    // boot image. Uses the bitmap to ensure the same array is not visited multiple times.
-    template <typename Visitor>
-    void UpdatePointerArrayContents(mirror::PointerArray* array, const Visitor& visitor) const
-        NO_THREAD_SAFETY_ANALYSIS {
-      DCHECK(array != nullptr);
-      DCHECK(visitor.IsInAppImage(array));
-      // The bit for the array contents is different than the bit for the array. Since we may have
-      // already visited the array as a long / int array from walking the bitmap without knowing it
-      // was a pointer array.
-      static_assert(kObjectAlignment == 8u, "array bit may be in another object");
-      mirror::Object* const contents_bit = reinterpret_cast<mirror::Object*>(
-          reinterpret_cast<uintptr_t>(array) + kObjectAlignment);
-      // If the bit is not set then the contents have not yet been updated.
-      if (!visited_->Test(contents_bit)) {
-        array->Fixup<kVerifyNone>(array, pointer_size_, visitor);
-        visited_->Set(contents_bit);
-      }
+      // Space is not yet added to the heap, don't do a read barrier.
+      mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
+          offset);
+      // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
+      // image.
+      obj->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(offset, ForwardObject(ref));
     }
 
     // java.lang.ref.Reference visitor.
@@ -797,81 +1153,16 @@
 
     void operator()(mirror::Object* obj) const
         NO_THREAD_SAFETY_ANALYSIS {
-      if (visited_->Test(obj)) {
-        // Already visited.
-        return;
-      }
-      visited_->Set(obj);
-
-      // Handle class specially first since we need it to be updated to properly visit the rest of
-      // the instance fields.
-      {
-        mirror::Class* klass = obj->GetClass<kVerifyNone, kWithoutReadBarrier>();
-        DCHECK(klass != nullptr) << "Null class in image";
-        // No AsClass since our fields aren't quite fixed up yet.
-        mirror::Class* new_klass = down_cast<mirror::Class*>(ForwardObject(klass));
-        if (klass != new_klass) {
-          obj->SetClass<kVerifyNone>(new_klass);
-        }
-        if (new_klass != klass && IsInAppImage(new_klass)) {
-          // Make sure the klass contents are fixed up since we depend on it to walk the fields.
-          operator()(new_klass);
-        }
-      }
-
-      if (obj->IsClass()) {
-        mirror::Class* klass = obj->AsClass<kVerifyNone>();
-        // Fixup super class before visiting instance fields which require
-        // information from their super class to calculate offsets.
-        mirror::Class* super_class =
-            klass->GetSuperClass<kVerifyNone, kWithoutReadBarrier>().Ptr();
-        if (super_class != nullptr) {
-          mirror::Class* new_super_class = down_cast<mirror::Class*>(ForwardObject(super_class));
-          if (new_super_class != super_class && IsInAppImage(new_super_class)) {
-            // Recursively fix all dependencies.
-            operator()(new_super_class);
-          }
-        }
-      }
-
-      obj->VisitReferences</*visit native roots*/false, kVerifyNone, kWithoutReadBarrier>(
-          *this,
-          *this);
-      // Note that this code relies on no circular dependencies.
-      // We want to use our own class loader and not the one in the image.
-      if (obj->IsClass<kVerifyNone>()) {
-        mirror::Class* as_klass = obj->AsClass<kVerifyNone>();
-        FixupObjectAdapter visitor(boot_image_, app_image_, app_oat_);
-        as_klass->FixupNativePointers<kVerifyNone>(as_klass, pointer_size_, visitor);
-        // Deal with the pointer arrays. Use the helper function since multiple classes can reference
-        // the same arrays.
-        mirror::PointerArray* const vtable = as_klass->GetVTable<kVerifyNone, kWithoutReadBarrier>();
-        if (vtable != nullptr && IsInAppImage(vtable)) {
-          operator()(vtable);
-          UpdatePointerArrayContents(vtable, visitor);
-        }
-        mirror::IfTable* iftable = as_klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
-        // Ensure iftable arrays are fixed up since we need GetMethodArray to return the valid
-        // contents.
-        if (IsInAppImage(iftable)) {
-          operator()(iftable);
-          for (int32_t i = 0, count = iftable->Count(); i < count; ++i) {
-            if (iftable->GetMethodArrayCount<kVerifyNone, kWithoutReadBarrier>(i) > 0) {
-              mirror::PointerArray* methods =
-                  iftable->GetMethodArray<kVerifyNone, kWithoutReadBarrier>(i);
-              if (visitor.IsInAppImage(methods)) {
-                operator()(methods);
-                DCHECK(methods != nullptr);
-                UpdatePointerArrayContents(methods, visitor);
-              }
-            }
-          }
-        }
+      if (!visited_->Set(obj)) {
+        // Not already visited.
+        obj->VisitReferences</*visit native roots*/false, kVerifyNone, kWithoutReadBarrier>(
+            *this,
+            *this);
+        CHECK(!obj->IsClass());
       }
     }
 
    private:
-    const PointerSize pointer_size_;
     gc::accounting::ContinuousSpaceBitmap* const visited_;
   };
 
@@ -951,6 +1242,7 @@
   // Relocate an image space mapped at target_base which possibly used to be at a different base
   // address. In place means modifying a single ImageSpace in place rather than relocating from
   // one ImageSpace to another.
+  template <PointerSize kPointerSize>
   static bool RelocateInPlace(ImageHeader& image_header,
                               uint8_t* target_base,
                               accounting::ContinuousSpaceBitmap* bitmap,
@@ -962,7 +1254,6 @@
     uint32_t boot_image_end = 0;
     uint32_t boot_oat_begin = 0;
     uint32_t boot_oat_end = 0;
-    const PointerSize pointer_size = image_header.GetPointerSize();
     gc::Heap* const heap = Runtime::Current()->GetHeap();
     heap->GetBootImagesSize(&boot_image_begin, &boot_image_end, &boot_oat_begin, &boot_oat_end);
     if (boot_image_begin == boot_image_end) {
@@ -1004,11 +1295,8 @@
       return true;
     }
     ScopedDebugDisallowReadBarriers sddrb(Thread::Current());
-    // Need to update the image to be at the target base.
-    const ImageSection& objects_section = image_header.GetObjectsSection();
-    uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
-    uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
     FixupObjectAdapter fixup_adapter(boot_image, app_image, app_oat);
+    PatchObjectVisitor<kPointerSize, FixupObjectAdapter> patch_object_visitor(fixup_adapter);
     if (fixup_image) {
       // Two pass approach, fix up all classes first, then fix up non class-objects.
       // The visited bitmap is used to ensure that pointer arrays are not forwarded twice.
@@ -1016,16 +1304,64 @@
           gc::accounting::ContinuousSpaceBitmap::Create("Relocate bitmap",
                                                         target_base,
                                                         image_header.GetImageSize()));
-      FixupObjectVisitor fixup_object_visitor(visited_bitmap.get(),
-                                              pointer_size,
-                                              boot_image,
-                                              app_image,
-                                              app_oat);
-      TimingLogger::ScopedTiming timing("Fixup classes", &logger);
-      // Fixup objects may read fields in the boot image, use the mutator lock here for sanity. Though
-      // its probably not required.
+      FixupObjectVisitor fixup_object_visitor(visited_bitmap.get(), boot_image, app_image, app_oat);
+      {
+        TimingLogger::ScopedTiming timing("Fixup classes", &logger);
+        const auto& class_table_section = image_header.GetClassTableSection();
+        if (class_table_section.Size() > 0u) {
+          ScopedObjectAccess soa(Thread::Current());
+          ClassTableVisitor class_table_visitor(fixup_adapter);
+          size_t read_count = 0u;
+          const uint8_t* data = target_base + class_table_section.Offset();
+          // We avoid making a copy of the data since we want modifications to be propagated to the
+          // memory map.
+          ClassTable::ClassSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
+          for (ClassTable::TableSlot& slot : temp_set) {
+            slot.VisitRoot(class_table_visitor);
+            mirror::Class* klass = slot.Read<kWithoutReadBarrier>();
+            if (!fixup_adapter.IsInAppImage(klass)) {
+              continue;
+            }
+            const bool already_marked = visited_bitmap->Set(klass);
+            CHECK(!already_marked) << "App image class already visited";
+            patch_object_visitor.VisitClass(klass);
+            // Then patch the non-embedded vtable and iftable.
+            mirror::PointerArray* vtable = klass->GetVTable<kVerifyNone, kWithoutReadBarrier>();
+            if (vtable != nullptr &&
+                fixup_object_visitor.IsInAppImage(vtable) &&
+                !visited_bitmap->Set(vtable)) {
+              patch_object_visitor.VisitPointerArray(vtable);
+            }
+            auto* iftable = klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
+            if (iftable != nullptr && fixup_object_visitor.IsInAppImage(iftable)) {
+              // Avoid processing the fields of iftable since we will process them later anyways
+              // below.
+              int32_t ifcount = klass->GetIfTableCount<kVerifyNone>();
+              for (int32_t i = 0; i != ifcount; ++i) {
+                mirror::PointerArray* unpatched_ifarray =
+                    iftable->GetMethodArrayOrNull<kVerifyNone, kWithoutReadBarrier>(i);
+                if (unpatched_ifarray != nullptr) {
+                  // The iftable has not been patched, so we need to explicitly adjust the pointer.
+                  mirror::PointerArray* ifarray = fixup_adapter(unpatched_ifarray);
+                  if (fixup_object_visitor.IsInAppImage(ifarray) &&
+                      !visited_bitmap->Set(ifarray)) {
+                    patch_object_visitor.VisitPointerArray(ifarray);
+                  }
+                }
+              }
+            }
+          }
+        }
+      }
+
+      // Fixup objects may read fields in the boot image, use the mutator lock here for sanity.
+      // Though its probably not required.
+      TimingLogger::ScopedTiming timing("Fixup cobjects", &logger);
       ScopedObjectAccess soa(Thread::Current());
-      timing.NewTiming("Fixup objects");
+      // Need to update the image to be at the target base.
+      const ImageSection& objects_section = image_header.GetObjectsSection();
+      uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
+      uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
       bitmap->VisitMarkedRange(objects_begin, objects_end, fixup_object_visitor);
       // Fixup image roots.
       CHECK(app_image.InSource(reinterpret_cast<uintptr_t>(
@@ -1037,96 +1373,19 @@
           AsObjectArray<mirror::DexCache, kVerifyNone>();
       for (int32_t i = 0, count = dex_caches->GetLength(); i < count; ++i) {
         mirror::DexCache* dex_cache = dex_caches->Get<kVerifyNone, kWithoutReadBarrier>(i);
-        // Fix up dex cache pointers.
-        mirror::StringDexCacheType* strings = dex_cache->GetStrings();
-        if (strings != nullptr) {
-          mirror::StringDexCacheType* new_strings = fixup_adapter.ForwardObject(strings);
-          if (strings != new_strings) {
-            dex_cache->SetStrings(new_strings);
-          }
-          dex_cache->FixupStrings<kWithoutReadBarrier>(new_strings, fixup_adapter);
-        }
-        mirror::TypeDexCacheType* types = dex_cache->GetResolvedTypes();
-        if (types != nullptr) {
-          mirror::TypeDexCacheType* new_types = fixup_adapter.ForwardObject(types);
-          if (types != new_types) {
-            dex_cache->SetResolvedTypes(new_types);
-          }
-          dex_cache->FixupResolvedTypes<kWithoutReadBarrier>(new_types, fixup_adapter);
-        }
-        mirror::MethodDexCacheType* methods = dex_cache->GetResolvedMethods();
-        if (methods != nullptr) {
-          mirror::MethodDexCacheType* new_methods = fixup_adapter.ForwardObject(methods);
-          if (methods != new_methods) {
-            dex_cache->SetResolvedMethods(new_methods);
-          }
-          for (size_t j = 0, num = dex_cache->NumResolvedMethods(); j != num; ++j) {
-            auto pair = mirror::DexCache::GetNativePairPtrSize(new_methods, j, pointer_size);
-            ArtMethod* orig = pair.object;
-            ArtMethod* copy = fixup_adapter.ForwardObject(orig);
-            if (orig != copy) {
-              pair.object = copy;
-              mirror::DexCache::SetNativePairPtrSize(new_methods, j, pair, pointer_size);
-            }
-          }
-        }
-        mirror::FieldDexCacheType* fields = dex_cache->GetResolvedFields();
-        if (fields != nullptr) {
-          mirror::FieldDexCacheType* new_fields = fixup_adapter.ForwardObject(fields);
-          if (fields != new_fields) {
-            dex_cache->SetResolvedFields(new_fields);
-          }
-          for (size_t j = 0, num = dex_cache->NumResolvedFields(); j != num; ++j) {
-            mirror::FieldDexCachePair orig =
-                mirror::DexCache::GetNativePairPtrSize(new_fields, j, pointer_size);
-            mirror::FieldDexCachePair copy(fixup_adapter.ForwardObject(orig.object), orig.index);
-            if (orig.object != copy.object) {
-              mirror::DexCache::SetNativePairPtrSize(new_fields, j, copy, pointer_size);
-            }
-          }
-        }
-
-        mirror::MethodTypeDexCacheType* method_types = dex_cache->GetResolvedMethodTypes();
-        if (method_types != nullptr) {
-          mirror::MethodTypeDexCacheType* new_method_types =
-              fixup_adapter.ForwardObject(method_types);
-          if (method_types != new_method_types) {
-            dex_cache->SetResolvedMethodTypes(new_method_types);
-          }
-          dex_cache->FixupResolvedMethodTypes<kWithoutReadBarrier>(new_method_types, fixup_adapter);
-        }
-        GcRoot<mirror::CallSite>* call_sites = dex_cache->GetResolvedCallSites();
-        if (call_sites != nullptr) {
-          GcRoot<mirror::CallSite>* new_call_sites = fixup_adapter.ForwardObject(call_sites);
-          if (call_sites != new_call_sites) {
-            dex_cache->SetResolvedCallSites(new_call_sites);
-          }
-          dex_cache->FixupResolvedCallSites<kWithoutReadBarrier>(new_call_sites, fixup_adapter);
-        }
-
-        GcRoot<mirror::String>* preresolved_strings = dex_cache->GetPreResolvedStrings();
-        if (preresolved_strings != nullptr) {
-          GcRoot<mirror::String>* new_array = fixup_adapter.ForwardObject(preresolved_strings);
-          if (preresolved_strings != new_array) {
-            dex_cache->SetPreResolvedStrings(new_array);
-          }
-          const size_t num_preresolved_strings = dex_cache->NumPreResolvedStrings();
-          for (size_t j = 0; j < num_preresolved_strings; ++j) {
-            new_array[j] = GcRoot<mirror::String>(
-                fixup_adapter(new_array[j].Read<kWithoutReadBarrier>()));
-          }
-        }
+        CHECK(dex_cache != nullptr);
+        patch_object_visitor.VisitDexCacheArrays(dex_cache);
       }
     }
     {
       // Only touches objects in the app image, no need for mutator lock.
       TimingLogger::ScopedTiming timing("Fixup methods", &logger);
       FixupArtMethodVisitor method_visitor(fixup_image,
-                                           pointer_size,
+                                           kPointerSize,
                                            boot_image,
                                            app_image,
                                            app_oat);
-      image_header.VisitPackedArtMethods(&method_visitor, target_base, pointer_size);
+      image_header.VisitPackedArtMethods(&method_visitor, target_base, kPointerSize);
     }
     if (fixup_image) {
       {
@@ -1137,26 +1396,14 @@
       }
       {
         TimingLogger::ScopedTiming timing("Fixup imt", &logger);
-        image_header.VisitPackedImTables(fixup_adapter, target_base, pointer_size);
+        image_header.VisitPackedImTables(fixup_adapter, target_base, kPointerSize);
       }
       {
         TimingLogger::ScopedTiming timing("Fixup conflict tables", &logger);
-        image_header.VisitPackedImtConflictTables(fixup_adapter, target_base, pointer_size);
+        image_header.VisitPackedImtConflictTables(fixup_adapter, target_base, kPointerSize);
       }
       // In the app image case, the image methods are actually in the boot image.
       image_header.RelocateImageMethods(boot_image.Delta());
-      const auto& class_table_section = image_header.GetClassTableSection();
-      if (class_table_section.Size() > 0u) {
-        // Note that we require that ReadFromMemory does not make an internal copy of the elements.
-        // This also relies on visit roots not doing any verification which could fail after we update
-        // the roots to be the image addresses.
-        ScopedObjectAccess soa(Thread::Current());
-        WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
-        ClassTable temp_table;
-        temp_table.ReadFromMemory(target_base + class_table_section.Offset());
-        FixupRootVisitor root_visitor(boot_image, app_image, app_oat);
-        temp_table.VisitRoots(root_visitor);
-      }
       // Fix up the intern table.
       const auto& intern_table_section = image_header.GetInternedStringsSection();
       if (intern_table_section.Size() > 0u) {
@@ -1185,8 +1432,13 @@
 
 class ImageSpace::BootImageLoader {
  public:
-  BootImageLoader(const std::string& image_location, InstructionSet image_isa)
-      : image_location_(image_location),
+  BootImageLoader(const std::vector<std::string>& boot_class_path,
+                  const std::vector<std::string>& boot_class_path_locations,
+                  const std::string& image_location,
+                  InstructionSet image_isa)
+      : boot_class_path_(boot_class_path),
+        boot_class_path_locations_(boot_class_path_locations),
+        image_location_(image_location),
         image_isa_(image_isa),
         is_zygote_(Runtime::Current()->IsZygote()),
         has_system_(false),
@@ -1234,57 +1486,17 @@
                       /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
     TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
     std::string filename = GetSystemImageFilename(image_location_.c_str(), image_isa_);
-    std::vector<std::string> locations;
-    if (!GetBootClassPathImageLocations(image_location_, filename, &locations, error_msg)) {
-      return false;
-    }
-    uint32_t image_start;
-    uint32_t image_end;
-    if (!GetBootImageAddressRange(filename, &image_start, &image_end, error_msg)) {
-      return false;
-    }
-    if (locations.size() > 1u) {
-      std::string last_filename = GetSystemImageFilename(locations.back().c_str(), image_isa_);
-      uint32_t dummy;
-      if (!GetBootImageAddressRange(last_filename, &dummy, &image_end, error_msg)) {
-        return false;
-      }
-    }
-    MemMap image_reservation;
-    MemMap local_extra_reservation;
-    if (!ReserveBootImageMemory(/*reservation_size=*/ image_end - image_start,
-                                image_start,
-                                extra_reservation_size,
-                                &image_reservation,
-                                &local_extra_reservation,
-                                error_msg)) {
-      return false;
-    }
 
-    std::vector<std::unique_ptr<ImageSpace>> spaces;
-    spaces.reserve(locations.size());
-    for (const std::string& location : locations) {
-      filename = GetSystemImageFilename(location.c_str(), image_isa_);
-      spaces.push_back(Load(location, filename, &logger, &image_reservation, error_msg));
-      if (spaces.back() == nullptr) {
-        return false;
-      }
-    }
-    for (std::unique_ptr<ImageSpace>& space : spaces) {
-      static constexpr bool kValidateOatFile = false;
-      if (!OpenOatFile(space.get(), kValidateOatFile, &logger, &image_reservation, error_msg)) {
-        return false;
-      }
-    }
-    if (!CheckReservationExhausted(image_reservation, error_msg)) {
+    if (!LoadFromFile(filename,
+                      /*validate_oat_file=*/ false,
+                      extra_reservation_size,
+                      &logger,
+                      boot_image_spaces,
+                      extra_reservation,
+                      error_msg)) {
       return false;
     }
 
-    MaybeRelocateSpaces(spaces, &logger);
-    InitRuntimeMethods(spaces);
-    boot_image_spaces->swap(spaces);
-    *extra_reservation = std::move(local_extra_reservation);
-
     if (VLOG_IS_ON(image)) {
       LOG(INFO) << "ImageSpace::BootImageLoader::LoadFromSystem exiting "
           << boot_image_spaces->front();
@@ -1301,65 +1513,17 @@
       /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
     TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
     DCHECK(DalvikCacheExists());
-    std::vector<std::string> locations;
-    if (!GetBootClassPathImageLocations(image_location_, cache_filename_, &locations, error_msg)) {
-      return false;
-    }
-    uint32_t image_start;
-    uint32_t image_end;
-    if (!GetBootImageAddressRange(cache_filename_, &image_start, &image_end, error_msg)) {
-      return false;
-    }
-    if (locations.size() > 1u) {
-      std::string last_filename;
-      if (!GetDalvikCacheFilename(locations.back().c_str(),
-                                  dalvik_cache_.c_str(),
-                                  &last_filename,
-                                  error_msg)) {
-        return false;
-      }
-      uint32_t dummy;
-      if (!GetBootImageAddressRange(last_filename, &dummy, &image_end, error_msg)) {
-        return false;
-      }
-    }
-    MemMap image_reservation;
-    MemMap local_extra_reservation;
-    if (!ReserveBootImageMemory(/*reservation_size=*/ image_end - image_start,
-                                image_start,
-                                extra_reservation_size,
-                                &image_reservation,
-                                &local_extra_reservation,
-                                error_msg)) {
-      return false;
-    }
 
-    std::vector<std::unique_ptr<ImageSpace>> spaces;
-    spaces.reserve(locations.size());
-    for (const std::string& location : locations) {
-      std::string filename;
-      if (!GetDalvikCacheFilename(location.c_str(), dalvik_cache_.c_str(), &filename, error_msg)) {
-        return false;
-      }
-      spaces.push_back(Load(location, filename, &logger, &image_reservation, error_msg));
-      if (spaces.back() == nullptr) {
-        return false;
-      }
-    }
-    for (std::unique_ptr<ImageSpace>& space : spaces) {
-      if (!OpenOatFile(space.get(), validate_oat_file, &logger, &image_reservation, error_msg)) {
-        return false;
-      }
-    }
-    if (!CheckReservationExhausted(image_reservation, error_msg)) {
+    if (!LoadFromFile(cache_filename_,
+                      validate_oat_file,
+                      extra_reservation_size,
+                      &logger,
+                      boot_image_spaces,
+                      extra_reservation,
+                      error_msg)) {
       return false;
     }
 
-    MaybeRelocateSpaces(spaces, &logger);
-    InitRuntimeMethods(spaces);
-    boot_image_spaces->swap(spaces);
-    *extra_reservation = std::move(local_extra_reservation);
-
     if (VLOG_IS_ON(image)) {
       LOG(INFO) << "ImageSpace::BootImageLoader::LoadFromDalvikCache exiting "
           << boot_image_spaces->front();
@@ -1369,44 +1533,100 @@
   }
 
  private:
-  template <typename T>
-  ALWAYS_INLINE static T* RelocatedAddress(T* src, uint32_t diff) {
-    DCHECK(src != nullptr);
-    return reinterpret_cast32<T*>(reinterpret_cast32<uint32_t>(src) + diff);
-  }
-
-  template <bool kMayBeNull = true, typename T>
-  ALWAYS_INLINE static void PatchGcRoot(uint32_t diff, /*inout*/GcRoot<T>* root)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    static_assert(sizeof(GcRoot<mirror::Class*>) == sizeof(uint32_t), "GcRoot size check");
-    T* old_value = root->template Read<kWithoutReadBarrier>();
-    DCHECK(kMayBeNull || old_value != nullptr);
-    if (!kMayBeNull || old_value != nullptr) {
-      *root = GcRoot<T>(RelocatedAddress(old_value, diff));
+  bool LoadFromFile(
+      const std::string& filename,
+      bool validate_oat_file,
+      size_t extra_reservation_size,
+      TimingLogger* logger,
+      /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
+      /*out*/MemMap* extra_reservation,
+      /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
+    ImageHeader system_hdr;
+    if (!ReadSpecificImageHeader(filename.c_str(), &system_hdr)) {
+      *error_msg = StringPrintf("Cannot read header of %s", filename.c_str());
+      return false;
     }
-  }
+    if (system_hdr.GetComponentCount() == 0u ||
+        system_hdr.GetComponentCount() > boot_class_path_.size()) {
+      *error_msg = StringPrintf("Unexpected component count in %s, received %u, "
+                                    "expected non-zero and <= %zu",
+                                filename.c_str(),
+                                system_hdr.GetComponentCount(),
+                                boot_class_path_.size());
+      return false;
+    }
+    MemMap image_reservation;
+    MemMap local_extra_reservation;
+    if (!ReserveBootImageMemory(system_hdr.GetImageReservationSize(),
+                                reinterpret_cast32<uint32_t>(system_hdr.GetImageBegin()),
+                                extra_reservation_size,
+                                &image_reservation,
+                                &local_extra_reservation,
+                                error_msg)) {
+      return false;
+    }
 
-  template <PointerSize kPointerSize, bool kMayBeNull = true, typename T>
-  ALWAYS_INLINE static void PatchNativePointer(uint32_t diff, /*inout*/T** entry) {
-    if (kPointerSize == PointerSize::k64) {
-      uint64_t* raw_entry = reinterpret_cast<uint64_t*>(entry);
-      T* old_value = reinterpret_cast64<T*>(*raw_entry);
-      DCHECK(kMayBeNull || old_value != nullptr);
-      if (!kMayBeNull || old_value != nullptr) {
-        T* new_value = RelocatedAddress(old_value, diff);
-        *raw_entry = reinterpret_cast64<uint64_t>(new_value);
+    ArrayRef<const std::string> provided_locations(boot_class_path_locations_.data(),
+                                                   system_hdr.GetComponentCount());
+    std::vector<std::string> locations =
+        ExpandMultiImageLocations(provided_locations, image_location_);
+    std::vector<std::string> filenames =
+        ExpandMultiImageLocations(provided_locations, filename);
+    DCHECK_EQ(locations.size(), filenames.size());
+    std::vector<std::unique_ptr<ImageSpace>> spaces;
+    spaces.reserve(locations.size());
+    for (std::size_t i = 0u, size = locations.size(); i != size; ++i) {
+      spaces.push_back(Load(locations[i], filenames[i], logger, &image_reservation, error_msg));
+      const ImageSpace* space = spaces.back().get();
+      if (space == nullptr) {
+        return false;
       }
-    } else {
-      uint32_t* raw_entry = reinterpret_cast<uint32_t*>(entry);
-      T* old_value = reinterpret_cast32<T*>(*raw_entry);
-      DCHECK(kMayBeNull || old_value != nullptr);
-      if (!kMayBeNull || old_value != nullptr) {
-        T* new_value = RelocatedAddress(old_value, diff);
-        *raw_entry = reinterpret_cast32<uint32_t>(new_value);
+      uint32_t expected_component_count = (i == 0u) ? system_hdr.GetComponentCount() : 0u;
+      uint32_t expected_reservation_size = (i == 0u) ? system_hdr.GetImageReservationSize() : 0u;
+      if (!Loader::CheckImageReservationSize(*space, expected_reservation_size, error_msg) ||
+          !Loader::CheckImageComponentCount(*space, expected_component_count, error_msg)) {
+        return false;
       }
     }
+    for (size_t i = 0u, size = spaces.size(); i != size; ++i) {
+      std::string expected_boot_class_path =
+          (i == 0u) ? android::base::Join(provided_locations, ':') : std::string();
+      if (!OpenOatFile(spaces[i].get(),
+                       boot_class_path_[i],
+                       expected_boot_class_path,
+                       validate_oat_file,
+                       logger,
+                       &image_reservation,
+                       error_msg)) {
+        return false;
+      }
+    }
+    if (!CheckReservationExhausted(image_reservation, error_msg)) {
+      return false;
+    }
+
+    MaybeRelocateSpaces(spaces, logger);
+    InitRuntimeMethods(spaces);
+    boot_image_spaces->swap(spaces);
+    *extra_reservation = std::move(local_extra_reservation);
+    return true;
   }
 
+ private:
+  class RelocateVisitor {
+   public:
+    explicit RelocateVisitor(uint32_t diff) : diff_(diff) {}
+
+    template <typename T>
+    ALWAYS_INLINE T* operator()(T* src) const {
+      DCHECK(src != nullptr);
+      return reinterpret_cast32<T*>(reinterpret_cast32<uint32_t>(src) + diff_);
+    }
+
+   private:
+    const uint32_t diff_;
+  };
+
   class PatchedObjectsMap {
    public:
     PatchedObjectsMap(uint8_t* image_space_begin, size_t size)
@@ -1442,266 +1662,14 @@
     BitMemoryRegion visited_objects_;
   };
 
-  class PatchArtFieldVisitor final : public ArtFieldVisitor {
-   public:
-    explicit PatchArtFieldVisitor(uint32_t diff)
-        : diff_(diff) {}
-
-    void Visit(ArtField* field) override REQUIRES_SHARED(Locks::mutator_lock_) {
-      PatchGcRoot</*kMayBeNull=*/ false>(diff_, &field->DeclaringClassRoot());
-    }
-
-   private:
-    const uint32_t diff_;
-  };
-
-  template <PointerSize kPointerSize>
-  class PatchArtMethodVisitor final : public ArtMethodVisitor {
-   public:
-    explicit PatchArtMethodVisitor(uint32_t diff)
-        : diff_(diff) {}
-
-    void Visit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_) {
-      PatchGcRoot(diff_, &method->DeclaringClassRoot());
-      void** data_address = PointerAddress(method, ArtMethod::DataOffset(kPointerSize));
-      PatchNativePointer<kPointerSize>(diff_, data_address);
-      void** entrypoint_address =
-          PointerAddress(method, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kPointerSize));
-      PatchNativePointer<kPointerSize>(diff_, entrypoint_address);
-    }
-
-   private:
-    void** PointerAddress(ArtMethod* method, MemberOffset offset) {
-      return reinterpret_cast<void**>(reinterpret_cast<uint8_t*>(method) + offset.Uint32Value());
-    }
-
-    const uint32_t diff_;
-  };
-
-  class ClassTableVisitor final {
-   public:
-    explicit ClassTableVisitor(uint32_t diff) : diff_(diff) {}
-
-    void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
-        REQUIRES_SHARED(Locks::mutator_lock_) {
-      DCHECK(root->AsMirrorPtr() != nullptr);
-      root->Assign(RelocatedAddress(root->AsMirrorPtr(), diff_));
-    }
-
-   private:
-    const uint32_t diff_;
-  };
-
-  template <PointerSize kPointerSize>
-  class PatchObjectVisitor final {
-   public:
-    explicit PatchObjectVisitor(uint32_t diff)
-        : diff_(diff) {}
-
-    void VisitClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) {
-      // A mirror::Class object consists of
-      //  - instance fields inherited from j.l.Object,
-      //  - instance fields inherited from j.l.Class,
-      //  - embedded tables (vtable, interface method table),
-      //  - static fields of the class itself.
-      // The reference fields are at the start of each field section (this is how the
-      // ClassLinker orders fields; except when that would create a gap between superclass
-      // fields and the first reference of the subclass due to alignment, it can be filled
-      // with smaller fields - but that's not the case for j.l.Object and j.l.Class).
-
-      DCHECK_ALIGNED(klass, kObjectAlignment);
-      static_assert(IsAligned<kHeapReferenceSize>(kObjectAlignment), "Object alignment check.");
-      // First, patch the `klass->klass_`, known to be a reference to the j.l.Class.class.
-      // This should be the only reference field in j.l.Object and we assert that below.
-      PatchReferenceField</*kMayBeNull=*/ false>(klass, mirror::Object::ClassOffset());
-      // Then patch the reference instance fields described by j.l.Class.class.
-      // Use the sizeof(Object) to determine where these reference fields start;
-      // this is the same as `class_class->GetFirstReferenceInstanceFieldOffset()`
-      // after patching but the j.l.Class may not have been patched yet.
-      mirror::Class* class_class = klass->GetClass<kVerifyNone, kWithoutReadBarrier>();
-      size_t num_reference_instance_fields = class_class->NumReferenceInstanceFields<kVerifyNone>();
-      DCHECK_NE(num_reference_instance_fields, 0u);
-      static_assert(IsAligned<kHeapReferenceSize>(sizeof(mirror::Object)), "Size alignment check.");
-      MemberOffset instance_field_offset(sizeof(mirror::Object));
-      for (size_t i = 0; i != num_reference_instance_fields; ++i) {
-        PatchReferenceField(klass, instance_field_offset);
-        static_assert(sizeof(mirror::HeapReference<mirror::Object>) == kHeapReferenceSize,
-                      "Heap reference sizes equality check.");
-        instance_field_offset =
-            MemberOffset(instance_field_offset.Uint32Value() + kHeapReferenceSize);
-      }
-      // Now that we have patched the `super_class_`, if this is the j.l.Class.class,
-      // we can get a reference to j.l.Object.class and assert that it has only one
-      // reference instance field (the `klass_` patched above).
-      if (kIsDebugBuild && klass == class_class) {
-        ObjPtr<mirror::Class> object_class =
-            klass->GetSuperClass<kVerifyNone, kWithoutReadBarrier>();
-        CHECK_EQ(object_class->NumReferenceInstanceFields<kVerifyNone>(), 1u);
-      }
-      // Then patch static fields.
-      size_t num_reference_static_fields = klass->NumReferenceStaticFields<kVerifyNone>();
-      if (num_reference_static_fields != 0u) {
-        MemberOffset static_field_offset =
-            klass->GetFirstReferenceStaticFieldOffset<kVerifyNone>(kPointerSize);
-        for (size_t i = 0; i != num_reference_static_fields; ++i) {
-          PatchReferenceField(klass, static_field_offset);
-          static_assert(sizeof(mirror::HeapReference<mirror::Object>) == kHeapReferenceSize,
-                        "Heap reference sizes equality check.");
-          static_field_offset =
-              MemberOffset(static_field_offset.Uint32Value() + kHeapReferenceSize);
-        }
-      }
-      // Then patch native pointers.
-      klass->FixupNativePointers<kVerifyNone>(klass, kPointerSize, *this);
-    }
-
-    template <typename T>
-    T* operator()(T* ptr, void** dest_addr ATTRIBUTE_UNUSED) const
-        REQUIRES_SHARED(Locks::mutator_lock_) {
-      if (ptr != nullptr) {
-        ptr = RelocatedAddress(ptr, diff_);
-      }
-      return ptr;
-    }
-
-    void VisitPointerArray(mirror::PointerArray* pointer_array)
-        REQUIRES_SHARED(Locks::mutator_lock_) {
-      // Fully patch the pointer array, including the `klass_` field.
-      PatchReferenceField</*kMayBeNull=*/ false>(pointer_array, mirror::Object::ClassOffset());
-
-      int32_t length = pointer_array->GetLength<kVerifyNone>();
-      for (int32_t i = 0; i != length; ++i) {
-        ArtMethod** method_entry = reinterpret_cast<ArtMethod**>(
-            pointer_array->ElementAddress<kVerifyNone>(i, kPointerSize));
-        PatchNativePointer<kPointerSize, /*kMayBeNull=*/ false>(diff_, method_entry);
-      }
-    }
-
-    void VisitObject(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
-      // Visit all reference fields.
-      object->VisitReferences</*kVisitNativeRoots=*/ false,
-                              kVerifyNone,
-                              kWithoutReadBarrier>(*this, *this);
-      // This function should not be called for classes.
-      DCHECK(!object->IsClass<kVerifyNone>());
-    }
-
-    // Visitor for VisitReferences().
-    ALWAYS_INLINE void operator()(mirror::Object* object, MemberOffset field_offset, bool is_static)
-        const REQUIRES_SHARED(Locks::mutator_lock_) {
-      DCHECK(!is_static);
-      PatchReferenceField(object, field_offset);
-    }
-    // Visitor for VisitReferences(), java.lang.ref.Reference case.
-    ALWAYS_INLINE void operator()(ObjPtr<mirror::Class> klass, mirror::Reference* ref) const
-        REQUIRES_SHARED(Locks::mutator_lock_) {
-      DCHECK(klass->IsTypeOfReferenceClass());
-      this->operator()(ref, mirror::Reference::ReferentOffset(), /*is_static=*/ false);
-    }
-    // Ignore class native roots; not called from VisitReferences() for kVisitNativeRoots == false.
-    void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
-        const {}
-    void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
-
-    void VisitDexCacheArrays(mirror::DexCache* dex_cache) REQUIRES_SHARED(Locks::mutator_lock_) {
-      FixupDexCacheArray<mirror::StringDexCacheType>(dex_cache,
-                                                     mirror::DexCache::StringsOffset(),
-                                                     dex_cache->NumStrings<kVerifyNone>());
-      FixupDexCacheArray<mirror::TypeDexCacheType>(dex_cache,
-                                                   mirror::DexCache::ResolvedTypesOffset(),
-                                                   dex_cache->NumResolvedTypes<kVerifyNone>());
-      FixupDexCacheArray<mirror::MethodDexCacheType>(dex_cache,
-                                                     mirror::DexCache::ResolvedMethodsOffset(),
-                                                     dex_cache->NumResolvedMethods<kVerifyNone>());
-      FixupDexCacheArray<mirror::FieldDexCacheType>(dex_cache,
-                                                    mirror::DexCache::ResolvedFieldsOffset(),
-                                                    dex_cache->NumResolvedFields<kVerifyNone>());
-      FixupDexCacheArray<mirror::MethodTypeDexCacheType>(
-          dex_cache,
-          mirror::DexCache::ResolvedMethodTypesOffset(),
-          dex_cache->NumResolvedMethodTypes<kVerifyNone>());
-      FixupDexCacheArray<GcRoot<mirror::CallSite>>(
-          dex_cache,
-          mirror::DexCache::ResolvedCallSitesOffset(),
-          dex_cache->NumResolvedCallSites<kVerifyNone>());
-      FixupDexCacheArray<GcRoot<mirror::String>>(
-          dex_cache,
-          mirror::DexCache::PreResolvedStringsOffset(),
-          dex_cache->NumPreResolvedStrings<kVerifyNone>());
-    }
-
-   private:
-    template <bool kMayBeNull = true>
-    ALWAYS_INLINE void PatchReferenceField(mirror::Object* object, MemberOffset offset) const
-        REQUIRES_SHARED(Locks::mutator_lock_) {
-      mirror::Object* old_value =
-          object->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset);
-      DCHECK(kMayBeNull || old_value != nullptr);
-      if (!kMayBeNull || old_value != nullptr) {
-        mirror::Object* new_value = RelocatedAddress(old_value, diff_);
-        object->SetFieldObjectWithoutWriteBarrier</*kTransactionActive=*/ false,
-                                                  /*kCheckTransaction=*/ true,
-                                                  kVerifyNone>(offset, new_value);
-      }
-    }
-
-    template <typename T>
-    void FixupDexCacheArrayEntry(std::atomic<mirror::DexCachePair<T>>* array, uint32_t index)
-        REQUIRES_SHARED(Locks::mutator_lock_) {
-      static_assert(sizeof(std::atomic<mirror::DexCachePair<T>>) == sizeof(mirror::DexCachePair<T>),
-                    "Size check for removing std::atomic<>.");
-      PatchGcRoot(diff_, &(reinterpret_cast<mirror::DexCachePair<T>*>(array)[index].object));
-    }
-
-    template <typename T>
-    void FixupDexCacheArrayEntry(std::atomic<mirror::NativeDexCachePair<T>>* array, uint32_t index)
-        REQUIRES_SHARED(Locks::mutator_lock_) {
-      static_assert(sizeof(std::atomic<mirror::NativeDexCachePair<T>>) ==
-                        sizeof(mirror::NativeDexCachePair<T>),
-                    "Size check for removing std::atomic<>.");
-      mirror::NativeDexCachePair<T> pair =
-          mirror::DexCache::GetNativePairPtrSize(array, index, kPointerSize);
-      if (pair.object != nullptr) {
-        pair.object = RelocatedAddress(pair.object, diff_);
-        mirror::DexCache::SetNativePairPtrSize(array, index, pair, kPointerSize);
-      }
-    }
-
-    void FixupDexCacheArrayEntry(GcRoot<mirror::CallSite>* array, uint32_t index)
-        REQUIRES_SHARED(Locks::mutator_lock_) {
-      PatchGcRoot(diff_, &array[index]);
-    }
-
-    void FixupDexCacheArrayEntry(GcRoot<mirror::String>* array, uint32_t index)
-        REQUIRES_SHARED(Locks::mutator_lock_) {
-      PatchGcRoot(diff_, &array[index]);
-    }
-
-    template <typename EntryType>
-    void FixupDexCacheArray(mirror::DexCache* dex_cache,
-                            MemberOffset array_offset,
-                            uint32_t size) REQUIRES_SHARED(Locks::mutator_lock_) {
-      EntryType* old_array =
-          reinterpret_cast64<EntryType*>(dex_cache->GetField64<kVerifyNone>(array_offset));
-      DCHECK_EQ(old_array != nullptr, size != 0u);
-      if (old_array != nullptr) {
-        EntryType* new_array = RelocatedAddress(old_array, diff_);
-        dex_cache->SetField64<kVerifyNone>(array_offset, reinterpret_cast64<uint64_t>(new_array));
-        for (uint32_t i = 0; i != size; ++i) {
-          FixupDexCacheArrayEntry(new_array, i);
-        }
-      }
-    }
-
-    const uint32_t diff_;
-  };
-
   template <PointerSize kPointerSize>
   static void DoRelocateSpaces(const std::vector<std::unique_ptr<ImageSpace>>& spaces,
                                uint32_t diff) REQUIRES_SHARED(Locks::mutator_lock_) {
     PatchedObjectsMap patched_objects(spaces.front()->Begin(),
                                       spaces.back()->End() - spaces.front()->Begin());
-    PatchObjectVisitor<kPointerSize> patch_object_visitor(diff);
+    using PatchRelocateVisitor = PatchObjectVisitor<kPointerSize, RelocateVisitor>;
+    RelocateVisitor relocate_visitor(diff);
+    PatchRelocateVisitor patch_object_visitor(relocate_visitor);
 
     mirror::Class* dcheck_class_class = nullptr;  // Used only for a DCHECK().
     for (size_t s = 0u, size = spaces.size(); s != size; ++s) {
@@ -1715,13 +1683,14 @@
 
       // Patch fields and methods.
       const ImageHeader& image_header = space->GetImageHeader();
-      PatchArtFieldVisitor field_visitor(diff);
+      PatchArtFieldVisitor<PatchRelocateVisitor> field_visitor(patch_object_visitor);
       image_header.VisitPackedArtFields(&field_visitor, space->Begin());
-      PatchArtMethodVisitor<kPointerSize> method_visitor(diff);
+      PatchArtMethodVisitor<kPointerSize, PatchRelocateVisitor, PatchRelocateVisitor>
+          method_visitor(patch_object_visitor, patch_object_visitor);
       image_header.VisitPackedArtMethods(&method_visitor, space->Begin(), kPointerSize);
-      auto method_table_visitor = [diff](ArtMethod* method) {
+      auto method_table_visitor = [&](ArtMethod* method) {
         DCHECK(method != nullptr);
-        return RelocatedAddress(method, diff);
+        return relocate_visitor(method);
       };
       image_header.VisitPackedImTables(method_table_visitor, space->Begin(), kPointerSize);
       image_header.VisitPackedImtConflictTables(method_table_visitor, space->Begin(), kPointerSize);
@@ -1732,7 +1701,7 @@
         size_t read_count;
         InternTable::UnorderedSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
         for (GcRoot<mirror::String>& slot : temp_set) {
-          PatchGcRoot</*kMayBeNull=*/ false>(diff, &slot);
+          patch_object_visitor.template PatchGcRoot</*kMayBeNull=*/ false>(&slot);
         }
       }
 
@@ -1743,7 +1712,7 @@
         size_t read_count;
         ClassTable::ClassSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
         DCHECK(!temp_set.empty());
-        ClassTableVisitor class_table_visitor(diff);
+        ClassTableVisitor class_table_visitor(relocate_visitor);
         for (ClassTable::TableSlot& slot : temp_set) {
           slot.VisitRoot(class_table_visitor);
           mirror::Class* klass = slot.Read<kWithoutReadBarrier>();
@@ -1772,7 +1741,7 @@
                   iftable->GetMethodArrayOrNull<kVerifyNone, kWithoutReadBarrier>(i);
               if (unpatched_ifarray != nullptr) {
                 // The iftable has not been patched, so we need to explicitly adjust the pointer.
-                mirror::PointerArray* ifarray = RelocatedAddress(unpatched_ifarray, diff);
+                mirror::PointerArray* ifarray = relocate_visitor(unpatched_ifarray);
                 if (!patched_objects.IsVisited(ifarray)) {
                   patched_objects.MarkVisited(ifarray);
                   patch_object_visitor.VisitPointerArray(ifarray);
@@ -1828,7 +1797,7 @@
             ObjPtr<mirror::Executable> as_executable =
                 ObjPtr<mirror::Executable>::DownCast(MakeObjPtr(object));
             ArtMethod* unpatched_method = as_executable->GetArtMethod<kVerifyNone>();
-            ArtMethod* patched_method = RelocatedAddress(unpatched_method, diff);
+            ArtMethod* patched_method = relocate_visitor(unpatched_method);
             as_executable->SetArtMethod</*kTransactionActive=*/ false,
                                         /*kCheckTransaction=*/ true,
                                         kVerifyNone>(patched_method);
@@ -1867,8 +1836,6 @@
     DCHECK(!spaces.empty());
     ImageSpace* space = spaces[0].get();
     const ImageHeader& image_header = space->GetImageHeader();
-    // Use oat_file_non_owned_ from the `space` to set the runtime methods.
-    runtime->SetInstructionSet(space->oat_file_non_owned_->GetOatHeader().GetInstructionSet());
     runtime->SetResolutionMethod(image_header.GetImageMethod(ImageHeader::kResolutionMethod));
     runtime->SetImtConflictMethod(image_header.GetImageMethod(ImageHeader::kImtConflictMethod));
     runtime->SetImtUnimplementedMethod(
@@ -1927,11 +1894,14 @@
                         image_location.c_str(),
                         /*oat_file=*/ nullptr,
                         logger,
+                        /*thread_pool=*/ nullptr,
                         image_reservation,
                         error_msg);
   }
 
   bool OpenOatFile(ImageSpace* space,
+                   const std::string& dex_filename,
+                   const std::string& expected_boot_class_path,
                    bool validate_oat_file,
                    TimingLogger* logger,
                    /*inout*/MemMap* image_reservation,
@@ -1947,13 +1917,15 @@
       TimingLogger::ScopedTiming timing("OpenOatFile", logger);
       std::string oat_filename =
           ImageHeader::GetOatLocationFromImageLocation(space->GetImageFilename());
+      std::string oat_location =
+          ImageHeader::GetOatLocationFromImageLocation(space->GetImageLocation());
 
       oat_file.reset(OatFile::Open(/*zip_fd=*/ -1,
                                    oat_filename,
-                                   oat_filename,
+                                   oat_location,
                                    !Runtime::Current()->IsAotCompiler(),
                                    /*low_4gb=*/ false,
-                                   /*abs_dex_location=*/ nullptr,
+                                   /*abs_dex_location=*/ dex_filename.c_str(),
                                    image_reservation,
                                    error_msg));
       if (oat_file == nullptr) {
@@ -1974,6 +1946,17 @@
                                   space->GetName());
         return false;
       }
+      const char* oat_boot_class_path =
+          oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kBootClassPathKey);
+      oat_boot_class_path = (oat_boot_class_path != nullptr) ? oat_boot_class_path : "";
+      if (expected_boot_class_path != oat_boot_class_path) {
+        *error_msg = StringPrintf("Failed to match oat boot class path %s to expected "
+                                  "boot class path %s in image %s",
+                                  oat_boot_class_path,
+                                  expected_boot_class_path.c_str(),
+                                  space->GetName());
+        return false;
+      }
       ptrdiff_t relocation_diff = space->Begin() - image_header.GetImageBegin();
       CHECK(image_header.GetOatDataBegin() != nullptr);
       uint8_t* oat_data_begin = image_header.GetOatDataBegin() + relocation_diff;
@@ -1999,58 +1982,14 @@
     return true;
   }
 
-  // Extract boot class path from oat file associated with `image_filename`
-  // and list all associated image locations.
-  static bool GetBootClassPathImageLocations(const std::string& image_location,
-                                             const std::string& image_filename,
-                                             /*out*/ std::vector<std::string>* all_locations,
-                                             /*out*/ std::string* error_msg) {
-    std::string oat_filename = ImageHeader::GetOatLocationFromImageLocation(image_filename);
-    std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
-                                                    oat_filename,
-                                                    oat_filename,
-                                                    /*executable=*/ false,
-                                                    /*low_4gb=*/ false,
-                                                    /*abs_dex_location=*/ nullptr,
-                                                    /*reservation=*/ nullptr,
-                                                    error_msg));
-    if (oat_file == nullptr) {
-      *error_msg = StringPrintf("Failed to open oat file '%s' for image file %s: %s",
-                                oat_filename.c_str(),
-                                image_filename.c_str(),
-                                error_msg->c_str());
-      return false;
-    }
-    const OatHeader& oat_header = oat_file->GetOatHeader();
-    const char* boot_classpath = oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey);
-    all_locations->push_back(image_location);
-    if (boot_classpath != nullptr && boot_classpath[0] != 0) {
-      ExtractMultiImageLocations(image_location, boot_classpath, all_locations);
-    }
-    return true;
-  }
-
-  bool GetBootImageAddressRange(const std::string& filename,
-                                /*out*/uint32_t* start,
-                                /*out*/uint32_t* end,
-                                /*out*/std::string* error_msg) {
-    ImageHeader system_hdr;
-    if (!ReadSpecificImageHeader(filename.c_str(), &system_hdr)) {
-      *error_msg = StringPrintf("Cannot read header of %s", filename.c_str());
-      return false;
-    }
-    *start = reinterpret_cast32<uint32_t>(system_hdr.GetImageBegin());
-    CHECK_ALIGNED(*start, kPageSize);
-    *end = RoundUp(reinterpret_cast32<uint32_t>(system_hdr.GetOatFileEnd()), kPageSize);
-    return true;
-  }
-
   bool ReserveBootImageMemory(uint32_t reservation_size,
                               uint32_t image_start,
                               size_t extra_reservation_size,
                               /*out*/MemMap* image_reservation,
                               /*out*/MemMap* extra_reservation,
                               /*out*/std::string* error_msg) {
+    DCHECK_ALIGNED(reservation_size, kPageSize);
+    DCHECK_ALIGNED(image_start, kPageSize);
     DCHECK(!image_reservation->IsValid());
     DCHECK_LT(extra_reservation_size, std::numeric_limits<uint32_t>::max() - reservation_size);
     size_t total_size = reservation_size + extra_reservation_size;
@@ -2096,6 +2035,8 @@
     return true;
   }
 
+  const std::vector<std::string>& boot_class_path_;
+  const std::vector<std::string>& boot_class_path_locations_;
   const std::string& image_location_;
   InstructionSet image_isa_;
   bool is_zygote_;
@@ -2143,6 +2084,8 @@
 }
 
 bool ImageSpace::LoadBootImage(
+    const std::vector<std::string>& boot_class_path,
+    const std::vector<std::string>& boot_class_path_locations,
     const std::string& image_location,
     const InstructionSet image_isa,
     size_t extra_reservation_size,
@@ -2160,7 +2103,7 @@
     return false;
   }
 
-  BootImageLoader loader(image_location, image_isa);
+  BootImageLoader loader(boot_class_path, boot_class_path_locations, image_location, image_isa);
 
   // Step 0: Extra zygote work.
 
@@ -2321,57 +2264,6 @@
       << ",name=\"" << GetName() << "\"]";
 }
 
-std::string ImageSpace::GetMultiImageBootClassPath(
-    const std::vector<std::string>& dex_locations,
-    const std::vector<std::string>& oat_filenames,
-    const std::vector<std::string>& image_filenames) {
-  DCHECK_GT(oat_filenames.size(), 1u);
-  // If the image filename was adapted (e.g., for our tests), we need to change this here,
-  // too, but need to strip all path components (they will be re-established when loading).
-  // For example, dex location
-  //    /system/framework/core-libart.art
-  // with image name
-  //    out/target/product/taimen/dex_bootjars/system/framework/arm64/boot-core-libart.art
-  // yields boot class path component
-  //    /system/framework/boot-core-libart.art .
-  std::ostringstream bootcp_oss;
-  bool first_bootcp = true;
-  for (size_t i = 0; i < dex_locations.size(); ++i) {
-    if (!first_bootcp) {
-      bootcp_oss << ":";
-    }
-
-    std::string dex_loc = dex_locations[i];
-    std::string image_filename = image_filenames[i];
-
-    // Use the dex_loc path, but the image_filename name (without path elements).
-    size_t dex_last_slash = dex_loc.rfind('/');
-
-    // npos is max(size_t). That makes this a bit ugly.
-    size_t image_last_slash = image_filename.rfind('/');
-    size_t image_last_at = image_filename.rfind('@');
-    size_t image_last_sep = (image_last_slash == std::string::npos)
-                                ? image_last_at
-                                : (image_last_at == std::string::npos)
-                                      ? image_last_slash
-                                      : std::max(image_last_slash, image_last_at);
-    // Note: whenever image_last_sep == npos, +1 overflow means using the full string.
-
-    if (dex_last_slash == std::string::npos) {
-      dex_loc = image_filename.substr(image_last_sep + 1);
-    } else {
-      dex_loc = dex_loc.substr(0, dex_last_slash + 1) +
-          image_filename.substr(image_last_sep + 1);
-    }
-
-    // Image filenames already end with .art, no need to replace.
-
-    bootcp_oss << dex_loc;
-    first_bootcp = false;
-  }
-  return bootcp_oss.str();
-}
-
 bool ImageSpace::ValidateOatFile(const OatFile& oat_file, std::string* error_msg) {
   const ArtDexFileLoader dex_file_loader;
   for (const OatDexFile* oat_dex_file : oat_file.GetOatDexFiles()) {
@@ -2432,46 +2324,159 @@
   return true;
 }
 
-void ImageSpace::ExtractMultiImageLocations(const std::string& input_image_file_name,
-                                            const std::string& boot_classpath,
-                                            std::vector<std::string>* image_file_names) {
-  DCHECK(image_file_names != nullptr);
-
-  std::vector<std::string> images;
-  Split(boot_classpath, ':', &images);
-
-  // Add the rest into the list. We have to adjust locations, possibly:
-  //
-  // For example, image_file_name is /a/b/c/d/e.art
-  //              images[0] is          f/c/d/e.art
-  // ----------------------------------------------
-  //              images[1] is          g/h/i/j.art  -> /a/b/h/i/j.art
-  const std::string& first_image = images[0];
-  // Length of common suffix.
-  size_t common = 0;
-  while (common < input_image_file_name.size() &&
-         common < first_image.size() &&
-         *(input_image_file_name.end() - common - 1) == *(first_image.end() - common - 1)) {
-    ++common;
+std::string ImageSpace::GetBootClassPathChecksums(const std::vector<std::string>& boot_class_path,
+                                                  const std::string& image_location,
+                                                  InstructionSet image_isa,
+                                                  /*out*/std::string* error_msg) {
+  std::string system_filename;
+  bool has_system = false;
+  std::string cache_filename;
+  bool has_cache = false;
+  bool dalvik_cache_exists = false;
+  bool is_global_cache = false;
+  if (!FindImageFilename(image_location.c_str(),
+                         image_isa,
+                         &system_filename,
+                         &has_system,
+                         &cache_filename,
+                         &dalvik_cache_exists,
+                         &has_cache,
+                         &is_global_cache)) {
+    *error_msg = StringPrintf("Unable to find image file for %s and %s",
+                              image_location.c_str(),
+                              GetInstructionSetString(image_isa));
+    return std::string();
   }
-  // We want to replace the prefix of the input image with the prefix of the boot class path.
-  // This handles the case where the image file contains @ separators.
-  // Example image_file_name is oats/system@framework@boot.art
-  // images[0] is .../arm/boot.art
-  // means that the image name prefix will be oats/system@framework@
-  // so that the other images are openable.
-  const size_t old_prefix_length = first_image.size() - common;
-  const std::string new_prefix = input_image_file_name.substr(
-      0,
-      input_image_file_name.size() - common);
 
-  // Apply pattern to images[1] .. images[n].
-  for (size_t i = 1; i < images.size(); ++i) {
-    const std::string& image = images[i];
-    CHECK_GT(image.length(), old_prefix_length);
-    std::string suffix = image.substr(old_prefix_length);
-    image_file_names->push_back(new_prefix + suffix);
+  DCHECK(has_system || has_cache);
+  const std::string& filename = has_system ? system_filename : cache_filename;
+  std::unique_ptr<ImageHeader> header = ReadSpecificImageHeader(filename.c_str(), error_msg);
+  if (header == nullptr) {
+    return std::string();
   }
+  if (header->GetComponentCount() == 0u || header->GetComponentCount() > boot_class_path.size()) {
+    *error_msg = StringPrintf("Unexpected component count in %s, received %u, "
+                                  "expected non-zero and <= %zu",
+                              filename.c_str(),
+                              header->GetComponentCount(),
+                              boot_class_path.size());
+    return std::string();
+  }
+
+  std::string boot_image_checksum =
+      StringPrintf("i;%d/%08x", header->GetComponentCount(), header->GetImageChecksum());
+  ArrayRef<const std::string> boot_class_path_tail =
+      ArrayRef<const std::string>(boot_class_path).SubArray(header->GetComponentCount());
+  for (const std::string& bcp_filename : boot_class_path_tail) {
+    std::vector<std::unique_ptr<const DexFile>> dex_files;
+    const ArtDexFileLoader dex_file_loader;
+    if (!dex_file_loader.Open(bcp_filename.c_str(),
+                              bcp_filename,  // The location does not matter here.
+                              /*verify=*/ false,
+                              /*verify_checksum=*/ false,
+                              error_msg,
+                              &dex_files)) {
+      return std::string();
+    }
+    DCHECK(!dex_files.empty());
+    StringAppendF(&boot_image_checksum, ":d");
+    for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
+      StringAppendF(&boot_image_checksum, "/%08x", dex_file->GetLocationChecksum());
+    }
+  }
+  return boot_image_checksum;
+}
+
+std::string ImageSpace::GetBootClassPathChecksums(
+    const std::vector<ImageSpace*>& image_spaces,
+    const std::vector<const DexFile*>& boot_class_path) {
+  DCHECK(!image_spaces.empty());
+  const ImageHeader& primary_header = image_spaces.front()->GetImageHeader();
+  uint32_t component_count = primary_header.GetComponentCount();
+  DCHECK_EQ(component_count, image_spaces.size());
+  std::string boot_image_checksum =
+      StringPrintf("i;%d/%08x", component_count, primary_header.GetImageChecksum());
+  size_t pos = 0u;
+  for (const ImageSpace* space : image_spaces) {
+    size_t num_dex_files = space->oat_file_non_owned_->GetOatDexFiles().size();
+    if (kIsDebugBuild) {
+      CHECK_NE(num_dex_files, 0u);
+      CHECK_LE(space->oat_file_non_owned_->GetOatDexFiles().size(), boot_class_path.size() - pos);
+      for (size_t i = 0; i != num_dex_files; ++i) {
+        CHECK_EQ(space->oat_file_non_owned_->GetOatDexFiles()[i]->GetDexFileLocation(),
+                 boot_class_path[pos + i]->GetLocation());
+      }
+    }
+    pos += num_dex_files;
+  }
+  ArrayRef<const DexFile* const> boot_class_path_tail =
+      ArrayRef<const DexFile* const>(boot_class_path).SubArray(pos);
+  DCHECK(boot_class_path_tail.empty() ||
+         !DexFileLoader::IsMultiDexLocation(boot_class_path_tail.front()->GetLocation().c_str()));
+  for (const DexFile* dex_file : boot_class_path_tail) {
+    if (!DexFileLoader::IsMultiDexLocation(dex_file->GetLocation().c_str())) {
+      StringAppendF(&boot_image_checksum, ":d");
+    }
+    StringAppendF(&boot_image_checksum, "/%08x", dex_file->GetLocationChecksum());
+  }
+  return boot_image_checksum;
+}
+
+std::vector<std::string> ImageSpace::ExpandMultiImageLocations(
+    const std::vector<std::string>& dex_locations,
+    const std::string& image_location) {
+  return ExpandMultiImageLocations(ArrayRef<const std::string>(dex_locations), image_location);
+}
+
+std::vector<std::string> ImageSpace::ExpandMultiImageLocations(
+    ArrayRef<const std::string> dex_locations,
+    const std::string& image_location) {
+  DCHECK(!dex_locations.empty());
+
+  // Find the path.
+  size_t last_slash = image_location.rfind('/');
+  CHECK_NE(last_slash, std::string::npos);
+
+  // We also need to honor path components that were encoded through '@'. Otherwise the loading
+  // code won't be able to find the images.
+  if (image_location.find('@', last_slash) != std::string::npos) {
+    last_slash = image_location.rfind('@');
+  }
+
+  // Find the dot separating the primary image name from the extension.
+  size_t last_dot = image_location.rfind('.');
+  // Extract the extension and base (the path and primary image name).
+  std::string extension;
+  std::string base = image_location;
+  if (last_dot != std::string::npos && last_dot > last_slash) {
+    extension = image_location.substr(last_dot);  // Including the dot.
+    base.resize(last_dot);
+  }
+  // For non-empty primary image name, add '-' to the `base`.
+  if (last_slash + 1u != base.size()) {
+    base += '-';
+  }
+
+  std::vector<std::string> locations;
+  locations.reserve(dex_locations.size());
+  locations.push_back(image_location);
+
+  // Now create the other names. Use a counted loop to skip the first one.
+  for (size_t i = 1u; i < dex_locations.size(); ++i) {
+    // Replace path with `base` (i.e. image path and prefix) and replace the original
+    // extension (if any) with `extension`.
+    std::string name = dex_locations[i];
+    size_t last_dex_slash = name.rfind('/');
+    if (last_dex_slash != std::string::npos) {
+      name = name.substr(last_dex_slash + 1);
+    }
+    size_t last_dex_dot = name.rfind('.');
+    if (last_dex_dot != std::string::npos) {
+      name.resize(last_dex_dot);
+    }
+    locations.push_back(base + name + extension);
+  }
+  return locations;
 }
 
 void ImageSpace::DumpSections(std::ostream& os) const {
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index aa45ed3..14e364a 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -24,6 +24,8 @@
 
 namespace art {
 
+template <typename T> class ArrayRef;
+class DexFile;
 class OatFile;
 
 namespace gc {
@@ -39,9 +41,11 @@
   // Load boot image spaces from a primary image file for a specified instruction set.
   //
   // On successful return, the loaded spaces are added to boot_image_spaces (which must be
-  // empty on entry) and oat_file_end is updated with the (page-aligned) end of the last
-  // oat file.
+  // empty on entry) and `extra_reservation` is set to the requested reservation located
+  // after the end of the last loaded oat file.
   static bool LoadBootImage(
+      const std::vector<std::string>& boot_class_path,
+      const std::vector<std::string>& boot_class_path_locations,
       const std::string& image_location,
       const InstructionSet image_isa,
       size_t extra_reservation_size,
@@ -122,15 +126,23 @@
                                 bool* has_data,
                                 bool *is_global_cache);
 
-  // Use the input image filename to adapt the names in the given boot classpath to establish
-  // complete locations for secondary images.
-  static void ExtractMultiImageLocations(const std::string& input_image_file_name,
-                                        const std::string& boot_classpath,
-                                        std::vector<std::string>* image_filenames);
+  // Returns the checksums for the boot image and extra boot class path dex files,
+  // based on the boot class path, image location and ISA (may differ from the ISA of an
+  // initialized Runtime). The boot image and dex files do not need to be loaded in memory.
+  static std::string GetBootClassPathChecksums(const std::vector<std::string>& boot_class_path,
+                                               const std::string& image_location,
+                                               InstructionSet image_isa,
+                                               /*out*/std::string* error_msg);
 
-  static std::string GetMultiImageBootClassPath(const std::vector<std::string>& dex_locations,
-                                                const std::vector<std::string>& oat_filenames,
-                                                const std::vector<std::string>& image_filenames);
+  // Returns the checksums for the boot image and extra boot class path dex files,
+  // based on the boot image and boot class path dex files loaded in memory.
+  static std::string GetBootClassPathChecksums(const std::vector<ImageSpace*>& image_spaces,
+                                               const std::vector<const DexFile*>& boot_class_path);
+
+  // Expand a single image location to multi-image locations based on the dex locations.
+  static std::vector<std::string> ExpandMultiImageLocations(
+      const std::vector<std::string>& dex_locations,
+      const std::string& image_location);
 
   // Returns true if the dex checksums in the given oat file match the
   // checksums of the original dex files on disk. This is intended to be used
@@ -191,8 +203,21 @@
   friend class Space;
 
  private:
-  class Loader;
+  // Internal overload that takes ArrayRef<> instead of vector<>.
+  static std::vector<std::string> ExpandMultiImageLocations(
+      ArrayRef<const std::string> dex_locations,
+      const std::string& image_location);
+
   class BootImageLoader;
+  template <typename ReferenceVisitor>
+  class ClassTableVisitor;
+  class Loader;
+  template <typename PatchObjectVisitor>
+  class PatchArtFieldVisitor;
+  template <PointerSize kPointerSize, typename PatchObjectVisitor, typename PatchCodeVisitor>
+  class PatchArtMethodVisitor;
+  template <PointerSize kPointerSize, typename ReferenceVisitor>
+  class PatchObjectVisitor;
 
   DISALLOW_COPY_AND_ASSIGN(ImageSpace);
 };
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index 5dd8136..7d28516 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -21,6 +21,7 @@
 
 #include <ostream>
 #include "base/memory_tool.h"
+#include "base/mutex.h"
 
 namespace art {
 namespace gc {
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 545e3d8..dd5451b 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -22,9 +22,9 @@
 
 #include "base/atomic.h"
 #include "base/globals.h"
+#include "base/locks.h"
 #include "base/macros.h"
 #include "base/mem_map.h"
-#include "base/mutex.h"
 #include "gc/accounting/space_bitmap.h"
 #include "gc/collector/object_byte_pair.h"
 
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index 0bd43f9..32af62d 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -17,8 +17,8 @@
 #ifndef ART_RUNTIME_GC_ROOT_H_
 #define ART_RUNTIME_GC_ROOT_H_
 
+#include "base/locks.h"       // For Locks::mutator_lock_.
 #include "base/macros.h"
-#include "base/mutex.h"       // For Locks::mutator_lock_.
 #include "mirror/object_reference.h"
 #include "read_barrier_option.h"
 
diff --git a/runtime/handle.h b/runtime/handle.h
index b13c43e..0c9c029 100644
--- a/runtime/handle.h
+++ b/runtime/handle.h
@@ -20,8 +20,8 @@
 #include <android-base/logging.h>
 
 #include "base/casts.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "base/value_object.h"
 #include "jni.h"
 #include "obj_ptr.h"
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index 9eaf1ec..1a1c92f 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -22,8 +22,8 @@
 #include <android-base/logging.h>
 
 #include "base/enums.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "handle.h"
 #include "stack_reference.h"
 #include "verify_object.h"
diff --git a/runtime/hidden_api.cc b/runtime/hidden_api.cc
index 6cdba73..e0939dd 100644
--- a/runtime/hidden_api.cc
+++ b/runtime/hidden_api.cc
@@ -21,7 +21,10 @@
 #include "art_field-inl.h"
 #include "art_method-inl.h"
 #include "base/dumpable.h"
+#include "class_root.h"
 #include "dex/class_accessor-inl.h"
+#include "dex/dex_file_loader.h"
+#include "mirror/class_ext.h"
 #include "scoped_thread_state_change.h"
 #include "thread-inl.h"
 #include "well_known_classes.h"
@@ -93,6 +96,24 @@
   type_ = kMethod;
 }
 
+MemberSignature::MemberSignature(const ClassAccessor::Field& field) {
+  const DexFile& dex_file = field.GetDexFile();
+  const DexFile::FieldId& field_id = dex_file.GetFieldId(field.GetIndex());
+  class_name_ = dex_file.GetFieldDeclaringClassDescriptor(field_id);
+  member_name_ = dex_file.GetFieldName(field_id);
+  type_signature_ = dex_file.GetFieldTypeDescriptor(field_id);
+  type_ = kField;
+}
+
+MemberSignature::MemberSignature(const ClassAccessor::Method& method) {
+  const DexFile& dex_file = method.GetDexFile();
+  const DexFile::MethodId& method_id = dex_file.GetMethodId(method.GetIndex());
+  class_name_ = dex_file.GetMethodDeclaringClassDescriptor(method_id);
+  member_name_ = dex_file.GetMethodName(method_id);
+  type_signature_ = dex_file.GetMethodSignature(method_id).ToString();
+  type_ = kMethod;
+}
+
 inline std::vector<const char*> MemberSignature::GetSignatureParts() const {
   if (type_ == kField) {
     return { class_name_.c_str(), "->", member_name_.c_str(), ":", type_signature_.c_str() };
@@ -137,6 +158,17 @@
                << Dumpable<MemberSignature>(*this) << " (" << list << ", " << access_method << ")";
 }
 
+bool MemberSignature::Equals(const MemberSignature& other) {
+  return type_ == other.type_ &&
+         class_name_ == other.class_name_ &&
+         member_name_ == other.member_name_ &&
+         type_signature_ == other.type_signature_;
+}
+
+bool MemberSignature::MemberNameAndTypeMatch(const MemberSignature& other) {
+  return member_name_ == other.member_name_ && type_signature_ == other.type_signature_;
+}
+
 #ifdef ART_TARGET_ANDROID
 // Convert an AccessMethod enum to a value for logging from the proto enum.
 // This method may look odd (the enum values are current the same), but it
@@ -238,63 +270,88 @@
 static constexpr uint32_t kNoDexFlags = 0u;
 static constexpr uint32_t kInvalidDexFlags = static_cast<uint32_t>(-1);
 
-uint32_t GetDexFlags(ArtField* field) REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjPtr<mirror::Class> declaring_class = field->GetDeclaringClass();
-  DCHECK(declaring_class != nullptr) << "Fields always have a declaring class";
-
-  const DexFile::ClassDef* class_def = declaring_class->GetClassDef();
-  if (class_def == nullptr) {
-    return kNoDexFlags;
-  }
-
-  uint32_t flags = kInvalidDexFlags;
-  DCHECK(!AreValidDexFlags(flags));
-
-  ClassAccessor accessor(declaring_class->GetDexFile(),
-                         *class_def,
-                         /* parse_hiddenapi_class_data= */ true);
-  auto fn_visit = [&](const ClassAccessor::Field& dex_field) {
-    if (dex_field.GetIndex() == field->GetDexFieldIndex()) {
-      flags = dex_field.GetHiddenapiFlags();
-    }
-  };
-  accessor.VisitFields(fn_visit, fn_visit);
-
-  CHECK_NE(flags, kInvalidDexFlags) << "Could not find flags for field " << field->PrettyField();
-  DCHECK(AreValidDexFlags(flags));
-  return flags;
+static ALWAYS_INLINE uint32_t GetMemberDexIndex(ArtField* field) {
+  return field->GetDexFieldIndex();
 }
 
-uint32_t GetDexFlags(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
-  if (declaring_class.IsNull()) {
-    DCHECK(method->IsRuntimeMethod());
-    return kNoDexFlags;
-  }
+static ALWAYS_INLINE uint32_t GetMemberDexIndex(ArtMethod* method)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  // Use the non-obsolete method to avoid DexFile mismatch between
+  // the method index and the declaring class.
+  return method->GetNonObsoleteMethod()->GetDexMethodIndex();
+}
 
-  const DexFile::ClassDef* class_def = declaring_class->GetClassDef();
-  if (class_def == nullptr) {
+static void VisitMembers(const DexFile& dex_file,
+                         const DexFile::ClassDef& class_def,
+                         const std::function<void(const ClassAccessor::Field&)>& fn_visit) {
+  ClassAccessor accessor(dex_file, class_def, /* parse_hiddenapi_class_data= */ true);
+  accessor.VisitFields(fn_visit, fn_visit);
+}
+
+static void VisitMembers(const DexFile& dex_file,
+                         const DexFile::ClassDef& class_def,
+                         const std::function<void(const ClassAccessor::Method&)>& fn_visit) {
+  ClassAccessor accessor(dex_file, class_def, /* parse_hiddenapi_class_data= */ true);
+  accessor.VisitMethods(fn_visit, fn_visit);
+}
+
+template<typename T>
+uint32_t GetDexFlags(T* member) REQUIRES_SHARED(Locks::mutator_lock_) {
+  static_assert(std::is_same<T, ArtField>::value || std::is_same<T, ArtMethod>::value);
+  using AccessorType = typename std::conditional<std::is_same<T, ArtField>::value,
+      ClassAccessor::Field, ClassAccessor::Method>::type;
+
+  ObjPtr<mirror::Class> declaring_class = member->GetDeclaringClass();
+  if (declaring_class.IsNull()) {
     return kNoDexFlags;
   }
 
   uint32_t flags = kInvalidDexFlags;
   DCHECK(!AreValidDexFlags(flags));
 
-  // Use the non-obsolete method to avoid DexFile mismatch between
-  // the method index and the declaring class.
-  uint32_t method_index = method->GetNonObsoleteMethod()->GetDexMethodIndex();
-
-  ClassAccessor accessor(declaring_class->GetDexFile(),
-                         *class_def,
-                         /* parse_hiddenapi_class_data= */ true);
-  auto fn_visit = [&](const ClassAccessor::Method& dex_method) {
-    if (dex_method.GetIndex() == method_index) {
-      flags = dex_method.GetHiddenapiFlags();
+  // Check if the declaring class has ClassExt allocated. If it does, check if
+  // the pre-JVMTI redefine dex file has been set to determine if the declaring
+  // class has been JVMTI-redefined.
+  ObjPtr<mirror::ClassExt> ext(declaring_class->GetExtData());
+  const DexFile* original_dex = ext.IsNull() ? nullptr : ext->GetPreRedefineDexFile();
+  if (LIKELY(original_dex == nullptr)) {
+    // Class is not redefined. Find the class def, iterate over its members and
+    // find the entry corresponding to this `member`.
+    const DexFile::ClassDef* class_def = declaring_class->GetClassDef();
+    if (class_def == nullptr) {
+      flags = kNoDexFlags;
+    } else {
+      uint32_t member_index = GetMemberDexIndex(member);
+      auto fn_visit = [&](const AccessorType& dex_member) {
+        if (dex_member.GetIndex() == member_index) {
+          flags = dex_member.GetHiddenapiFlags();
+        }
+      };
+      VisitMembers(declaring_class->GetDexFile(), *class_def, fn_visit);
     }
-  };
-  accessor.VisitMethods(fn_visit, fn_visit);
+  } else {
+    // Class was redefined using JVMTI. We have a pointer to the original dex file
+    // and the class def index of this class in that dex file, but the field/method
+    // indices are lost. Iterate over all members of the class def and find the one
+    // corresponding to this `member` by name and type string comparison.
+    // This is obviously very slow, but it is only used when non-exempt code tries
+    // to access a hidden member of a JVMTI-redefined class.
+    uint16_t class_def_idx = ext->GetPreRedefineClassDefIndex();
+    DCHECK_NE(class_def_idx, DexFile::kDexNoIndex16);
+    const DexFile::ClassDef& original_class_def = original_dex->GetClassDef(class_def_idx);
+    MemberSignature member_signature(member);
+    auto fn_visit = [&](const AccessorType& dex_member) {
+      MemberSignature cur_signature(dex_member);
+      if (member_signature.MemberNameAndTypeMatch(cur_signature)) {
+        DCHECK(member_signature.Equals(cur_signature));
+        flags = dex_member.GetHiddenapiFlags();
+      }
+    };
+    VisitMembers(*original_dex, original_class_def, fn_visit);
+  }
 
-  CHECK_NE(flags, kInvalidDexFlags) << "Could not find flags for method " << method->PrettyMethod();
+  CHECK_NE(flags, kInvalidDexFlags) << "Could not find hiddenapi flags for "
+      << Dumpable<MemberSignature>(MemberSignature(member));
   DCHECK(AreValidDexFlags(flags));
   return flags;
 }
@@ -356,6 +413,8 @@
 }
 
 // Need to instantiate this.
+template uint32_t GetDexFlags<ArtField>(ArtField* member);
+template uint32_t GetDexFlags<ArtMethod>(ArtMethod* member);
 template bool ShouldDenyAccessToMemberImpl<ArtField>(ArtField* member,
                                                      hiddenapi::ApiList api_list,
                                                      AccessMethod access_method);
diff --git a/runtime/hidden_api.h b/runtime/hidden_api.h
index eea58e9..13bead2 100644
--- a/runtime/hidden_api.h
+++ b/runtime/hidden_api.h
@@ -20,7 +20,7 @@
 #include "art_field.h"
 #include "art_method.h"
 #include "base/hiddenapi_flags.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "intrinsics_enum.h"
 #include "mirror/class-inl.h"
 #include "reflection.h"
@@ -137,9 +137,14 @@
  public:
   explicit MemberSignature(ArtField* field) REQUIRES_SHARED(Locks::mutator_lock_);
   explicit MemberSignature(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+  explicit MemberSignature(const ClassAccessor::Field& field);
+  explicit MemberSignature(const ClassAccessor::Method& method);
 
   void Dump(std::ostream& os) const;
 
+  bool Equals(const MemberSignature& other);
+  bool MemberNameAndTypeMatch(const MemberSignature& other);
+
   // Performs prefix match on this member. Since the full member signature is
   // composed of several parts, we match each part in turn (rather than
   // building the entire thing in memory and performing a simple prefix match)
@@ -160,11 +165,8 @@
 
 // Locates hiddenapi flags for `field` in the corresponding dex file.
 // NB: This is an O(N) operation, linear with the number of members in the class def.
-uint32_t GetDexFlags(ArtField* field) REQUIRES_SHARED(Locks::mutator_lock_);
-
-// Locates hiddenapi flags for `method` in the corresponding dex file.
-// NB: This is an O(N) operation, linear with the number of members in the class def.
-uint32_t GetDexFlags(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+template<typename T>
+uint32_t GetDexFlags(T* member) REQUIRES_SHARED(Locks::mutator_lock_);
 
 template<typename T>
 bool ShouldDenyAccessToMemberImpl(T* member, ApiList api_list, AccessMethod access_method)
@@ -233,6 +235,7 @@
       case Intrinsics::kUnsafeStoreFence:
       case Intrinsics::kUnsafeFullFence:
       case Intrinsics::kCRC32Update:
+      case Intrinsics::kCRC32UpdateBytes:
       case Intrinsics::kStringNewStringFromBytes:
       case Intrinsics::kStringNewStringFromChars:
       case Intrinsics::kStringNewStringFromString:
diff --git a/runtime/hidden_api_test.cc b/runtime/hidden_api_test.cc
index 520dc6d..595f077 100644
--- a/runtime/hidden_api_test.cc
+++ b/runtime/hidden_api_test.cc
@@ -103,12 +103,14 @@
   runtime_->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kDisabled);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Whitelist()), false);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Greylist()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxP()), false);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxO()), false);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Blacklist()), false);
 
   runtime_->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kJustWarn);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Whitelist()), false);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Greylist()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxP()), false);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxO()), false);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Blacklist()), false);
 
@@ -117,6 +119,7 @@
       static_cast<uint32_t>(hiddenapi::ApiList::GreylistMaxO().GetMaxAllowedSdkVersion()));
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Whitelist()), false);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Greylist()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxP()), false);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxO()), false);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Blacklist()), true);
 
@@ -125,6 +128,16 @@
       static_cast<uint32_t>(hiddenapi::ApiList::GreylistMaxO().GetMaxAllowedSdkVersion()) + 1);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Whitelist()), false);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Greylist()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxP()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxO()), true);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Blacklist()), true);
+
+  runtime_->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kEnabled);
+  runtime_->SetTargetSdkVersion(
+      static_cast<uint32_t>(hiddenapi::ApiList::GreylistMaxP().GetMaxAllowedSdkVersion()) + 1);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Whitelist()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Greylist()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxP()), true);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxO()), true);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Blacklist()), true);
 }
diff --git a/runtime/image.cc b/runtime/image.cc
index ae3d8e3..fe1d88f 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -29,9 +29,11 @@
 namespace art {
 
 const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '7', '1', '\0' };  // Add image blocks.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '7', '3', '\0' };  // Image reservation.
 
-ImageHeader::ImageHeader(uint32_t image_begin,
+ImageHeader::ImageHeader(uint32_t image_reservation_size,
+                         uint32_t component_count,
+                         uint32_t image_begin,
                          uint32_t image_size,
                          ImageSection* sections,
                          uint32_t image_roots,
@@ -43,7 +45,9 @@
                          uint32_t boot_image_begin,
                          uint32_t boot_image_size,
                          uint32_t pointer_size)
-  : image_begin_(image_begin),
+  : image_reservation_size_(image_reservation_size),
+    component_count_(component_count),
+    image_begin_(image_begin),
     image_size_(image_size),
     image_checksum_(0u),
     oat_checksum_(oat_checksum),
@@ -96,6 +100,9 @@
   if (memcmp(version_, kImageVersion, sizeof(kImageVersion)) != 0) {
     return false;
   }
+  if (!IsAligned<kPageSize>(image_reservation_size_)) {
+    return false;
+  }
   // Unsigned so wraparound is well defined.
   if (image_begin_ >= image_begin_ + image_size_) {
     return false;
diff --git a/runtime/image.h b/runtime/image.h
index 76fb3b7..140f504 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -116,6 +116,14 @@
       return storage_mode_;
     }
 
+    uint32_t GetDataSize() const {
+      return data_size_;
+    }
+
+    uint32_t GetImageSize() const {
+      return image_size_;
+    }
+
    private:
     // Storage method for the image, the image may be compressed.
     StorageMode storage_mode_ = kDefaultStorageMode;
@@ -130,7 +138,9 @@
   };
 
   ImageHeader() {}
-  ImageHeader(uint32_t image_begin,
+  ImageHeader(uint32_t image_reservation_size,
+              uint32_t component_count,
+              uint32_t image_begin,
               uint32_t image_size,
               ImageSection* sections,
               uint32_t image_roots,
@@ -146,6 +156,14 @@
   bool IsValid() const;
   const char* GetMagic() const;
 
+  uint32_t GetImageReservationSize() const {
+    return image_reservation_size_;
+  }
+
+  uint32_t GetComponentCount() const {
+    return component_count_;
+  }
+
   uint8_t* GetImageBegin() const {
     return reinterpret_cast<uint8_t*>(image_begin_);
   }
@@ -415,6 +433,19 @@
   uint8_t magic_[4];
   uint8_t version_[4];
 
+  // The total memory reservation size for the image.
+  // For boot image or boot image extension, the primary image includes the reservation
+  // for all image files and oat files, secondary images have the reservation set to 0.
+  // App images have reservation equal to `image_size_` rounded up to page size because
+  // their oat files are mmapped independently.
+  uint32_t image_reservation_size_ = 0u;
+
+  // The number of components.
+  // For boot image or boot image extension, the primary image stores the total number
+  // of images, secondary images have this set to 0.
+  // App images have 1 component.
+  uint32_t component_count_ = 0u;
+
   // Required base address for mapping the image.
   uint32_t image_begin_ = 0u;
 
diff --git a/runtime/imtable.h b/runtime/imtable.h
index 3c52fb8..48a8643 100644
--- a/runtime/imtable.h
+++ b/runtime/imtable.h
@@ -23,8 +23,8 @@
 
 #include "base/casts.h"
 #include "base/enums.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 
 namespace art {
 
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 8c63c00..eb07035 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -26,9 +26,9 @@
 #include <android-base/logging.h>
 
 #include "base/bit_utils.h"
+#include "base/locks.h"
 #include "base/macros.h"
 #include "base/mem_map.h"
-#include "base/mutex.h"
 #include "gc_root.h"
 #include "obj_ptr.h"
 #include "offsets.h"
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 12f1522..27c47bc 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -164,7 +164,8 @@
       have_watched_frame_pop_listeners_(false),
       have_branch_listeners_(false),
       have_exception_handled_listeners_(false),
-      deoptimized_methods_lock_("deoptimized methods lock", kGenericBottomLock),
+      deoptimized_methods_lock_(new ReaderWriterMutex("deoptimized methods lock",
+                                                      kGenericBottomLock)),
       deoptimization_enabled_(false),
       interpreter_handler_table_(kMainHandlerTable),
       quick_alloc_entry_points_instrumentation_counter_(0),
@@ -323,8 +324,9 @@
 
         const InstrumentationStackFrame& frame =
             (*instrumentation_stack_)[instrumentation_stack_depth_];
-        CHECK_EQ(m, frame.method_) << "Expected " << ArtMethod::PrettyMethod(m)
-                                   << ", Found " << ArtMethod::PrettyMethod(frame.method_);
+        CHECK_EQ(m->GetNonObsoleteMethod(), frame.method_->GetNonObsoleteMethod())
+            << "Expected " << ArtMethod::PrettyMethod(m)
+            << ", Found " << ArtMethod::PrettyMethod(frame.method_);
         return_pc = frame.return_pc_;
         if (kVerboseInstrumentation) {
           LOG(INFO) << "Ignoring already instrumented " << frame.Dump();
@@ -470,7 +472,9 @@
           if (instrumentation_frame.interpreter_entry_) {
             CHECK(m == Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
           } else {
-            CHECK(m == instrumentation_frame.method_) << ArtMethod::PrettyMethod(m);
+            CHECK_EQ(m->GetNonObsoleteMethod(),
+                     instrumentation_frame.method_->GetNonObsoleteMethod())
+                << ArtMethod::PrettyMethod(m);
           }
           SetReturnPc(instrumentation_frame.return_pc_);
           if (instrumentation_->ShouldNotifyMethodEnterExitEvents() &&
@@ -743,7 +747,7 @@
     // Restore stack only if there is no method currently deoptimized.
     bool empty;
     {
-      ReaderMutexLock mu(self, deoptimized_methods_lock_);
+      ReaderMutexLock mu(self, *GetDeoptimizedMethodsLock());
       empty = IsDeoptimizedMethodsEmpty();  // Avoid lock violation.
     }
     if (empty) {
@@ -931,7 +935,7 @@
 
   Thread* self = Thread::Current();
   {
-    WriterMutexLock mu(self, deoptimized_methods_lock_);
+    WriterMutexLock mu(self, *GetDeoptimizedMethodsLock());
     bool has_not_been_deoptimized = AddDeoptimizedMethod(method);
     CHECK(has_not_been_deoptimized) << "Method " << ArtMethod::PrettyMethod(method)
         << " is already deoptimized";
@@ -955,7 +959,7 @@
   Thread* self = Thread::Current();
   bool empty;
   {
-    WriterMutexLock mu(self, deoptimized_methods_lock_);
+    WriterMutexLock mu(self, *GetDeoptimizedMethodsLock());
     bool found_and_erased = RemoveDeoptimizedMethod(method);
     CHECK(found_and_erased) << "Method " << ArtMethod::PrettyMethod(method)
         << " is not deoptimized";
@@ -987,12 +991,12 @@
 
 bool Instrumentation::IsDeoptimized(ArtMethod* method) {
   DCHECK(method != nullptr);
-  ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
+  ReaderMutexLock mu(Thread::Current(), *GetDeoptimizedMethodsLock());
   return IsDeoptimizedMethod(method);
 }
 
 void Instrumentation::EnableDeoptimization() {
-  ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
+  ReaderMutexLock mu(Thread::Current(), *GetDeoptimizedMethodsLock());
   CHECK(IsDeoptimizedMethodsEmpty());
   CHECK_EQ(deoptimization_enabled_, false);
   deoptimization_enabled_ = true;
@@ -1009,7 +1013,7 @@
   while (true) {
     ArtMethod* method;
     {
-      ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
+      ReaderMutexLock mu(Thread::Current(), *GetDeoptimizedMethodsLock());
       if (IsDeoptimizedMethodsEmpty()) {
         break;
       }
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 3bd4fb5..d4c3c29 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -19,12 +19,13 @@
 
 #include <stdint.h>
 #include <list>
+#include <memory>
 #include <unordered_set>
 
 #include "arch/instruction_set.h"
 #include "base/enums.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "base/safe_map.h"
 #include "gc_root.h"
 
@@ -39,6 +40,7 @@
 template <typename T> class Handle;
 template <typename T> class MutableHandle;
 union JValue;
+class SHARED_LOCKABLE ReaderWriterMutex;
 class ShadowFrame;
 class Thread;
 enum class DeoptimizationMethodType;
@@ -211,11 +213,11 @@
   // Deoptimization.
   void EnableDeoptimization()
       REQUIRES(Locks::mutator_lock_)
-      REQUIRES(!deoptimized_methods_lock_);
+      REQUIRES(!GetDeoptimizedMethodsLock());
   // Calls UndeoptimizeEverything which may visit class linker classes through ConfigureStubs.
   void DisableDeoptimization(const char* key)
       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
-      REQUIRES(!deoptimized_methods_lock_);
+      REQUIRES(!GetDeoptimizedMethodsLock());
 
   bool AreAllMethodsDeoptimized() const {
     return interpreter_stubs_installed_;
@@ -231,7 +233,7 @@
       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
       REQUIRES(!Locks::thread_list_lock_,
                !Locks::classlinker_classes_lock_,
-               !deoptimized_methods_lock_);
+               !GetDeoptimizedMethodsLock());
 
   // Executes everything with compiled code (or interpreter if there is no code). May visit class
   // linker classes through ConfigureStubs.
@@ -239,23 +241,23 @@
       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
       REQUIRES(!Locks::thread_list_lock_,
                !Locks::classlinker_classes_lock_,
-               !deoptimized_methods_lock_);
+               !GetDeoptimizedMethodsLock());
 
   // Deoptimize a method by forcing its execution with the interpreter. Nevertheless, a static
   // method (except a class initializer) set to the resolution trampoline will be deoptimized only
   // once its declaring class is initialized.
   void Deoptimize(ArtMethod* method)
-      REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !deoptimized_methods_lock_);
+      REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !GetDeoptimizedMethodsLock());
 
   // Undeoptimze the method by restoring its entrypoints. Nevertheless, a static method
   // (except a class initializer) set to the resolution trampoline will be updated only once its
   // declaring class is initialized.
   void Undeoptimize(ArtMethod* method)
-      REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !deoptimized_methods_lock_);
+      REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !GetDeoptimizedMethodsLock());
 
   // Indicates whether the method has been deoptimized so it is executed with the interpreter.
   bool IsDeoptimized(ArtMethod* method)
-      REQUIRES(!deoptimized_methods_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+      REQUIRES(!GetDeoptimizedMethodsLock()) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Enable method tracing by installing instrumentation entry/exit stubs or interpreter.
   void EnableMethodTracing(const char* key,
@@ -263,14 +265,14 @@
       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
       REQUIRES(!Locks::thread_list_lock_,
                !Locks::classlinker_classes_lock_,
-               !deoptimized_methods_lock_);
+               !GetDeoptimizedMethodsLock());
 
   // Disable method tracing by uninstalling instrumentation entry/exit stubs or interpreter.
   void DisableMethodTracing(const char* key)
       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
       REQUIRES(!Locks::thread_list_lock_,
                !Locks::classlinker_classes_lock_,
-               !deoptimized_methods_lock_);
+               !GetDeoptimizedMethodsLock());
 
   InterpreterHandlerTable GetInterpreterHandlerTable() const
       REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -289,19 +291,19 @@
 
   // Update the code of a method respecting any installed stubs.
   void UpdateMethodsCode(ArtMethod* method, const void* quick_code)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
 
   // Update the code of a native method to a JITed stub.
   void UpdateNativeMethodsCodeToJitCode(ArtMethod* method, const void* quick_code)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
 
   // Update the code of a method to the interpreter respecting any installed stubs from debugger.
   void UpdateMethodsCodeToInterpreterEntryPoint(ArtMethod* method)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
 
   // Update the code of a method respecting any installed stubs from debugger.
   void UpdateMethodsCodeForJavaDebuggable(ArtMethod* method, const void* quick_code)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
 
   // Return the code that we can execute for an invoke including from the JIT.
   const void* GetCodeForInvoke(ArtMethod* method) const
@@ -483,7 +485,7 @@
   // being returned from.
   TwoWordReturn PopInstrumentationStackFrame(Thread* self, uintptr_t* return_pc,
                                              uint64_t* gpr_result, uint64_t* fpr_result)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
 
   // Pops nframes instrumentation frames from the current thread. Returns the return pc for the last
   // instrumentation frame that's popped.
@@ -492,10 +494,10 @@
 
   // Call back for configure stubs.
   void InstallStubsForClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!deoptimized_methods_lock_);
+      REQUIRES(!GetDeoptimizedMethodsLock());
 
   void InstallStubsForMethod(ArtMethod* method)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
 
   // Install instrumentation exit stub on every method of the stack of the given thread.
   // This is used by the debugger to cause a deoptimization of the thread's stack after updating
@@ -528,7 +530,7 @@
   // becomes the highest instrumentation level required by a client.
   void ConfigureStubs(const char* key, InstrumentationLevel desired_instrumentation_level)
       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
-      REQUIRES(!deoptimized_methods_lock_,
+      REQUIRES(!GetDeoptimizedMethodsLock(),
                !Locks::thread_list_lock_,
                !Locks::classlinker_classes_lock_);
 
@@ -583,18 +585,21 @@
 
   // Read barrier-aware utility functions for accessing deoptimized_methods_
   bool AddDeoptimizedMethod(ArtMethod* method)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(deoptimized_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(GetDeoptimizedMethodsLock());
   bool IsDeoptimizedMethod(ArtMethod* method)
-      REQUIRES_SHARED(Locks::mutator_lock_, deoptimized_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_, GetDeoptimizedMethodsLock());
   bool RemoveDeoptimizedMethod(ArtMethod* method)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(deoptimized_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(GetDeoptimizedMethodsLock());
   ArtMethod* BeginDeoptimizedMethod()
-      REQUIRES_SHARED(Locks::mutator_lock_, deoptimized_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_, GetDeoptimizedMethodsLock());
   bool IsDeoptimizedMethodsEmpty() const
-      REQUIRES_SHARED(Locks::mutator_lock_, deoptimized_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_, GetDeoptimizedMethodsLock());
   void UpdateMethodsCodeImpl(ArtMethod* method, const void* quick_code)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
 
+  ReaderWriterMutex* GetDeoptimizedMethodsLock() const {
+    return deoptimized_methods_lock_.get();
+  }
 
   // Have we hijacked ArtMethod::code_ so that it calls instrumentation/interpreter code?
   bool instrumentation_stubs_installed_;
@@ -676,8 +681,8 @@
 
   // The set of methods being deoptimized (by the debugger) which must be executed with interpreter
   // only.
-  mutable ReaderWriterMutex deoptimized_methods_lock_ BOTTOM_MUTEX_ACQUIRED_AFTER;
-  std::unordered_set<ArtMethod*> deoptimized_methods_ GUARDED_BY(deoptimized_methods_lock_);
+  mutable std::unique_ptr<ReaderWriterMutex> deoptimized_methods_lock_ BOTTOM_MUTEX_ACQUIRED_AFTER;
+  std::unordered_set<ArtMethod*> deoptimized_methods_ GUARDED_BY(GetDeoptimizedMethodsLock());
   bool deoptimization_enabled_;
 
   // Current interpreter handler table. This is updated each time the thread state flags are
diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h
index d7e69a6..e92d195 100644
--- a/runtime/interpreter/interpreter.h
+++ b/runtime/interpreter/interpreter.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_H_
 #define ART_RUNTIME_INTERPRETER_INTERPRETER_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "dex/dex_file.h"
 #include "obj_ptr.h"
 
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index bf84227..a633a63 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -32,8 +32,8 @@
 #include "art_field-inl.h"
 #include "art_method-inl.h"
 #include "base/enums.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "class_linker-inl.h"
 #include "class_root.h"
 #include "common_dex_operations.h"
diff --git a/runtime/interpreter/interpreter_intrinsics.cc b/runtime/interpreter/interpreter_intrinsics.cc
index 24a026a..16e118c 100644
--- a/runtime/interpreter/interpreter_intrinsics.cc
+++ b/runtime/interpreter/interpreter_intrinsics.cc
@@ -559,6 +559,7 @@
     UNIMPLEMENTED_CASE(IntegerValueOf /* (I)Ljava/lang/Integer; */)
     UNIMPLEMENTED_CASE(ThreadInterrupted /* ()Z */)
     UNIMPLEMENTED_CASE(CRC32Update /* (II)I */)
+    UNIMPLEMENTED_CASE(CRC32UpdateBytes /* (I[BII)I */)
     INTRINSIC_CASE(VarHandleFullFence)
     INTRINSIC_CASE(VarHandleAcquireFence)
     INTRINSIC_CASE(VarHandleReleaseFence)
diff --git a/runtime/interpreter/interpreter_mterp_impl.h b/runtime/interpreter/interpreter_mterp_impl.h
index d8a764f..177b0fd 100644
--- a/runtime/interpreter/interpreter_mterp_impl.h
+++ b/runtime/interpreter/interpreter_mterp_impl.h
@@ -17,8 +17,8 @@
 #ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_MTERP_IMPL_H_
 #define ART_RUNTIME_INTERPRETER_INTERPRETER_MTERP_IMPL_H_
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "dex/dex_file.h"
 #include "jvalue.h"
 #include "obj_ptr.h"
diff --git a/runtime/interpreter/interpreter_switch_impl.h b/runtime/interpreter/interpreter_switch_impl.h
index 9fc4239..d4dca11 100644
--- a/runtime/interpreter/interpreter_switch_impl.h
+++ b/runtime/interpreter/interpreter_switch_impl.h
@@ -17,8 +17,8 @@
 #ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_SWITCH_IMPL_H_
 #define ART_RUNTIME_INTERPRETER_INTERPRETER_SWITCH_IMPL_H_
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "dex/dex_file.h"
 #include "dex/code_item_accessors.h"
 #include "jvalue.h"
diff --git a/runtime/interpreter/lock_count_data.h b/runtime/interpreter/lock_count_data.h
index 3098d4f..efa14c5 100644
--- a/runtime/interpreter/lock_count_data.h
+++ b/runtime/interpreter/lock_count_data.h
@@ -20,7 +20,7 @@
 #include <memory>
 #include <vector>
 
-#include "base/mutex.h"
+#include "base/locks.h"
 
 namespace art {
 
diff --git a/runtime/interpreter/shadow_frame.h b/runtime/interpreter/shadow_frame.h
index 6609021..ca98999 100644
--- a/runtime/interpreter/shadow_frame.h
+++ b/runtime/interpreter/shadow_frame.h
@@ -21,8 +21,8 @@
 #include <cstring>
 #include <string>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "dex/dex_file.h"
 #include "lock_count_data.h"
 #include "read_barrier.h"
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index e292a76..4fa7271 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -571,12 +571,9 @@
 
   Runtime* runtime = Runtime::Current();
 
-  std::vector<std::string> split;
-  Split(runtime->GetBootClassPathString(), ':', &split);
-  if (split.empty()) {
-    AbortTransactionOrFail(self,
-                           "Boot classpath not set or split error:: %s",
-                           runtime->GetBootClassPathString().c_str());
+  const std::vector<std::string>& boot_class_path = Runtime::Current()->GetBootClassPath();
+  if (boot_class_path.empty()) {
+    AbortTransactionOrFail(self, "Boot classpath not set");
     return;
   }
 
@@ -584,7 +581,7 @@
   size_t map_size;
   std::string last_error_msg;  // Only store the last message (we could concatenate).
 
-  for (const std::string& jar_file : split) {
+  for (const std::string& jar_file : boot_class_path) {
     mem_map = FindAndExtractEntry(jar_file, resource_cstr, &map_size, &last_error_msg);
     if (mem_map.IsValid()) {
       break;
diff --git a/runtime/intrinsics_list.h b/runtime/intrinsics_list.h
index 093dd7f..82ea476 100644
--- a/runtime/intrinsics_list.h
+++ b/runtime/intrinsics_list.h
@@ -220,6 +220,7 @@
   V(VarHandleStoreStoreFence, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "storeStoreFence", "()V") \
   V(ReachabilityFence, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kNoThrow, "Ljava/lang/ref/Reference;", "reachabilityFence", "(Ljava/lang/Object;)V") \
   V(CRC32Update, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/util/zip/CRC32;", "update", "(II)I") \
+  V(CRC32UpdateBytes, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/util/zip/CRC32;", "updateBytes", "(I[BII)I") \
   SIGNATURE_POLYMORPHIC_INTRINSICS_LIST(V)
 
 #endif  // ART_RUNTIME_INTRINSICS_LIST_H_
diff --git a/runtime/java_frame_root_info.h b/runtime/java_frame_root_info.h
index 452a76b..8141ea2 100644
--- a/runtime/java_frame_root_info.h
+++ b/runtime/java_frame_root_info.h
@@ -19,8 +19,8 @@
 
 #include <iosfwd>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "gc_root.h"
 
 namespace art {
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index d31f166..37365ff 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -292,8 +292,7 @@
     expandBufAddUtf8String(pReply, str);
   }
 
-  std::vector<std::string> boot_class_path;
-  Split(Runtime::Current()->GetBootClassPathString(), ':', &boot_class_path);
+  std::vector<std::string> boot_class_path = Runtime::Current()->GetBootClassPath();
   expandBufAdd4BE(pReply, boot_class_path.size());
   for (const std::string& str : boot_class_path) {
     expandBufAddUtf8String(pReply, str);
diff --git a/runtime/jit/debugger_interface.cc b/runtime/jit/debugger_interface.cc
index 6cd719a..853c0ca 100644
--- a/runtime/jit/debugger_interface.cc
+++ b/runtime/jit/debugger_interface.cc
@@ -21,12 +21,13 @@
 #include "base/array_ref.h"
 #include "base/mutex.h"
 #include "base/time_utils.h"
+#include "dex/dex_file.h"
 #include "thread-current-inl.h"
 #include "thread.h"
 
 #include <atomic>
-#include <unordered_map>
 #include <cstddef>
+#include <map>
 
 //
 // Debug interface for native tools (gdb, lldb, libunwind, simpleperf).
@@ -126,14 +127,14 @@
   void (*__jit_debug_register_code_ptr)() = __jit_debug_register_code;
 
   // The root data structure describing of all JITed methods.
-  JITDescriptor __jit_debug_descriptor {};
+  JITDescriptor __jit_debug_descriptor GUARDED_BY(*Locks::native_debug_interface_lock_) {};
 
   // The following globals mirror the ones above, but are used to register dex files.
   void __attribute__((noinline)) __dex_debug_register_code() {
     __asm__("");
   }
   void (*__dex_debug_register_code_ptr)() = __dex_debug_register_code;
-  JITDescriptor __dex_debug_descriptor {};
+  JITDescriptor __dex_debug_descriptor GUARDED_BY(*Locks::native_debug_interface_lock_) {};
 }
 
 // Mark the descriptor as "locked", so native tools know the data is being modified.
@@ -155,8 +156,17 @@
 static JITCodeEntry* CreateJITCodeEntryInternal(
     JITDescriptor& descriptor,
     void (*register_code_ptr)(),
-    const ArrayRef<const uint8_t>& symfile)
+    ArrayRef<const uint8_t> symfile,
+    bool copy_symfile)
     REQUIRES(Locks::native_debug_interface_lock_) {
+  // Make a copy of the buffer to shrink it and to pass ownership to JITCodeEntry.
+  if (copy_symfile) {
+    uint8_t* copy = new uint8_t[symfile.size()];
+    CHECK(copy != nullptr);
+    memcpy(copy, symfile.data(), symfile.size());
+    symfile = ArrayRef<const uint8_t>(copy, symfile.size());
+  }
+
   // Ensure the timestamp is monotonically increasing even in presence of low
   // granularity system timer.  This ensures each entry has unique timestamp.
   uint64_t timestamp = std::max(descriptor.action_timestamp_ + 1, NanoTime());
@@ -188,9 +198,11 @@
 static void DeleteJITCodeEntryInternal(
     JITDescriptor& descriptor,
     void (*register_code_ptr)(),
-    JITCodeEntry* entry)
+    JITCodeEntry* entry,
+    bool free_symfile)
     REQUIRES(Locks::native_debug_interface_lock_) {
   CHECK(entry != nullptr);
+  const uint8_t* symfile = entry->symfile_addr_;
 
   // Ensure the timestamp is monotonically increasing even in presence of low
   // granularity system timer.  This ensures each entry has unique timestamp.
@@ -221,83 +233,88 @@
   memset(entry, 0, sizeof(*entry));
 
   delete entry;
+  if (free_symfile) {
+    delete[] symfile;
+  }
 }
 
-static std::unordered_map<const void*, JITCodeEntry*> __dex_debug_entries
-    GUARDED_BY(Locks::native_debug_interface_lock_);
+static std::map<const DexFile*, JITCodeEntry*> g_dex_debug_entries
+    GUARDED_BY(*Locks::native_debug_interface_lock_);
 
-void AddNativeDebugInfoForDex(Thread* current_thread, ArrayRef<const uint8_t> dexfile) {
-  MutexLock mu(current_thread, *Locks::native_debug_interface_lock_);
-  DCHECK(dexfile.data() != nullptr);
+void AddNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) {
+  MutexLock mu(self, *Locks::native_debug_interface_lock_);
+  DCHECK(dexfile != nullptr);
   // This is just defensive check. The class linker should not register the dex file twice.
-  if (__dex_debug_entries.count(dexfile.data()) == 0) {
+  if (g_dex_debug_entries.count(dexfile) == 0) {
+    const ArrayRef<const uint8_t> symfile(dexfile->Begin(), dexfile->Size());
     JITCodeEntry* entry = CreateJITCodeEntryInternal(__dex_debug_descriptor,
                                                      __dex_debug_register_code_ptr,
-                                                     dexfile);
-    __dex_debug_entries.emplace(dexfile.data(), entry);
+                                                     symfile,
+                                                     /*copy_symfile=*/ false);
+    g_dex_debug_entries.emplace(dexfile, entry);
   }
 }
 
-void RemoveNativeDebugInfoForDex(Thread* current_thread, ArrayRef<const uint8_t> dexfile) {
-  MutexLock mu(current_thread, *Locks::native_debug_interface_lock_);
-  auto it = __dex_debug_entries.find(dexfile.data());
+void RemoveNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) {
+  MutexLock mu(self, *Locks::native_debug_interface_lock_);
+  auto it = g_dex_debug_entries.find(dexfile);
   // We register dex files in the class linker and free them in DexFile_closeDexFile, but
   // there might be cases where we load the dex file without using it in the class linker.
-  if (it != __dex_debug_entries.end()) {
+  if (it != g_dex_debug_entries.end()) {
     DeleteJITCodeEntryInternal(__dex_debug_descriptor,
                                __dex_debug_register_code_ptr,
-                               it->second);
-    __dex_debug_entries.erase(it);
+                               /*entry=*/ it->second,
+                               /*free_symfile=*/ false);
+    g_dex_debug_entries.erase(it);
   }
 }
 
-static size_t __jit_debug_mem_usage
-    GUARDED_BY(Locks::native_debug_interface_lock_) = 0;
-
 // Mapping from handle to entry. Used to manage life-time of the entries.
-static std::unordered_map<const void*, JITCodeEntry*> __jit_debug_entries
-    GUARDED_BY(Locks::native_debug_interface_lock_);
+static std::map<const void*, JITCodeEntry*> g_jit_debug_entries
+    GUARDED_BY(*Locks::native_debug_interface_lock_);
 
-void AddNativeDebugInfoForJit(const void* handle, const std::vector<uint8_t>& symfile) {
+void AddNativeDebugInfoForJit(Thread* self,
+                              const void* code_ptr,
+                              const std::vector<uint8_t>& symfile) {
+  MutexLock mu(self, *Locks::native_debug_interface_lock_);
   DCHECK_NE(symfile.size(), 0u);
 
-  // Make a copy of the buffer to shrink it and to pass ownership to JITCodeEntry.
-  uint8_t* copy = new uint8_t[symfile.size()];
-  CHECK(copy != nullptr);
-  memcpy(copy, symfile.data(), symfile.size());
-
   JITCodeEntry* entry = CreateJITCodeEntryInternal(
       __jit_debug_descriptor,
       __jit_debug_register_code_ptr,
-      ArrayRef<const uint8_t>(copy, symfile.size()));
-  __jit_debug_mem_usage += sizeof(JITCodeEntry) + entry->symfile_size_;
+      ArrayRef<const uint8_t>(symfile),
+      /*copy_symfile=*/ true);
 
-  // We don't provide handle for type debug info, which means we cannot free it later.
+  // We don't provide code_ptr for type debug info, which means we cannot free it later.
   // (this only happens when --generate-debug-info flag is enabled for the purpose
   // of being debugged with gdb; it does not happen for debuggable apps by default).
-  bool ok = handle == nullptr || __jit_debug_entries.emplace(handle, entry).second;
-  DCHECK(ok) << "Native debug entry already exists for " << std::hex << handle;
-}
-
-void RemoveNativeDebugInfoForJit(const void* handle) {
-  auto it = __jit_debug_entries.find(handle);
-  // We generate JIT native debug info only if the right runtime flags are enabled,
-  // but we try to remove it unconditionally whenever code is freed from JIT cache.
-  if (it != __jit_debug_entries.end()) {
-    JITCodeEntry* entry = it->second;
-    const uint8_t* symfile_addr = entry->symfile_addr_;
-    uint64_t symfile_size = entry->symfile_size_;
-    DeleteJITCodeEntryInternal(__jit_debug_descriptor,
-                               __jit_debug_register_code_ptr,
-                               entry);
-    __jit_debug_entries.erase(it);
-    __jit_debug_mem_usage -= sizeof(JITCodeEntry) + symfile_size;
-    delete[] symfile_addr;
+  if (code_ptr != nullptr) {
+    bool ok = g_jit_debug_entries.emplace(code_ptr, entry).second;
+    DCHECK(ok) << "Native debug entry already exists for " << std::hex << code_ptr;
   }
 }
 
-size_t GetJitNativeDebugInfoMemUsage() {
-  return __jit_debug_mem_usage + __jit_debug_entries.size() * 2 * sizeof(void*);
+void RemoveNativeDebugInfoForJit(Thread* self, const void* code_ptr) {
+  MutexLock mu(self, *Locks::native_debug_interface_lock_);
+  auto it = g_jit_debug_entries.find(code_ptr);
+  // We generate JIT native debug info only if the right runtime flags are enabled,
+  // but we try to remove it unconditionally whenever code is freed from JIT cache.
+  if (it != g_jit_debug_entries.end()) {
+    DeleteJITCodeEntryInternal(__jit_debug_descriptor,
+                               __jit_debug_register_code_ptr,
+                               it->second,
+                               /*free_symfile=*/ true);
+    g_jit_debug_entries.erase(it);
+  }
+}
+
+size_t GetJitMiniDebugInfoMemUsage() {
+  MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
+  size_t size = 0;
+  for (auto entry : g_jit_debug_entries) {
+    size += sizeof(JITCodeEntry) + entry.second->symfile_size_ + /*map entry*/ 4 * sizeof(void*);
+  }
+  return size;
 }
 
 }  // namespace art
diff --git a/runtime/jit/debugger_interface.h b/runtime/jit/debugger_interface.h
index 3d25910..4b0d011 100644
--- a/runtime/jit/debugger_interface.h
+++ b/runtime/jit/debugger_interface.h
@@ -18,35 +18,37 @@
 #define ART_RUNTIME_JIT_DEBUGGER_INTERFACE_H_
 
 #include <inttypes.h>
-#include <memory>
 #include <vector>
 
-#include "base/array_ref.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 
 namespace art {
 
+class DexFile;
+class Thread;
+
 // Notify native tools (e.g. libunwind) that DEX file has been opened.
-// It takes the lock itself. The parameter must point to dex data (not the DexFile* object).
-void AddNativeDebugInfoForDex(Thread* current_thread, ArrayRef<const uint8_t> dexfile);
+void AddNativeDebugInfoForDex(Thread* self, const DexFile* dexfile)
+    REQUIRES(!Locks::native_debug_interface_lock_);
 
 // Notify native tools (e.g. libunwind) that DEX file has been closed.
-// It takes the lock itself. The parameter must point to dex data (not the DexFile* object).
-void RemoveNativeDebugInfoForDex(Thread* current_thread, ArrayRef<const uint8_t> dexfile);
+void RemoveNativeDebugInfoForDex(Thread* self, const DexFile* dexfile)
+    REQUIRES(!Locks::native_debug_interface_lock_);
 
-// Notify native tools about new JITed code by passing in-memory ELF.
-// The handle is the object that is being described (needed to be able to remove the entry).
+// Notify native tools (e.g. libunwind) that JIT has compiled a new method.
 // The method will make copy of the passed ELF file (to shrink it to the minimum size).
-void AddNativeDebugInfoForJit(const void* handle, const std::vector<uint8_t>& symfile)
-    REQUIRES(Locks::native_debug_interface_lock_);
+void AddNativeDebugInfoForJit(Thread* self,
+                              const void* code_ptr,
+                              const std::vector<uint8_t>& symfile)
+    REQUIRES(!Locks::native_debug_interface_lock_);
 
-// Notify native debugger that JITed code has been removed and free the debug info.
-void RemoveNativeDebugInfoForJit(const void* handle)
-    REQUIRES(Locks::native_debug_interface_lock_);
+// Notify native tools (e.g. libunwind) that JIT code has been garbage collected.
+void RemoveNativeDebugInfoForJit(Thread* self, const void* code_ptr)
+    REQUIRES(!Locks::native_debug_interface_lock_);
 
-// Returns approximate memory used by all JITCodeEntries.
-size_t GetJitNativeDebugInfoMemUsage()
-    REQUIRES(Locks::native_debug_interface_lock_);
+// Returns approximate memory used by debug info for JIT code.
+size_t GetJitMiniDebugInfoMemUsage()
+    REQUIRES(!Locks::native_debug_interface_lock_);
 
 }  // namespace art
 
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 4a3ef07..43f32b9 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -58,7 +58,7 @@
 void* Jit::jit_compiler_handle_ = nullptr;
 void* (*Jit::jit_load_)(void) = nullptr;
 void (*Jit::jit_unload_)(void*) = nullptr;
-bool (*Jit::jit_compile_method_)(void*, ArtMethod*, Thread*, bool) = nullptr;
+bool (*Jit::jit_compile_method_)(void*, ArtMethod*, Thread*, bool, bool) = nullptr;
 void (*Jit::jit_types_loaded_)(void*, mirror::Class**, size_t count) = nullptr;
 bool (*Jit::jit_generate_debug_info_)(void*) = nullptr;
 void (*Jit::jit_update_options_)(void*) = nullptr;
@@ -242,7 +242,7 @@
   return true;
 }
 
-bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool osr) {
+bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool baseline, bool osr) {
   DCHECK(Runtime::Current()->UseJitCompilation());
   DCHECK(!method->IsRuntimeMethod());
 
@@ -272,7 +272,7 @@
   VLOG(jit) << "Compiling method "
             << ArtMethod::PrettyMethod(method_to_compile)
             << " osr=" << std::boolalpha << osr;
-  bool success = jit_compile_method_(jit_compiler_handle_, method_to_compile, self, osr);
+  bool success = jit_compile_method_(jit_compiler_handle_, method_to_compile, self, baseline, osr);
   code_cache_->DoneCompiling(method_to_compile, self, osr);
   if (!success) {
     VLOG(jit) << "Failed to compile method "
@@ -291,22 +291,6 @@
   return success;
 }
 
-void Jit::CreateThreadPool() {
-  if (Runtime::Current()->IsSafeMode()) {
-    // Never create the pool in safe mode.
-    return;
-  }
-  // There is a DCHECK in the 'AddSamples' method to ensure the thread pool
-  // is not null when we instrument.
-
-  // We need peers as we may report the JIT thread, e.g., in the debugger.
-  constexpr bool kJitPoolNeedsPeers = true;
-  thread_pool_.reset(new ThreadPool("Jit thread pool", 1, kJitPoolNeedsPeers));
-
-  thread_pool_->SetPthreadPriority(options_->GetThreadPoolPthreadPriority());
-  Start();
-}
-
 void Jit::DeleteThreadPool() {
   Thread* self = Thread::Current();
   DCHECK(Runtime::Current()->IsShuttingDown(self));
@@ -562,10 +546,11 @@
 
 class JitCompileTask final : public Task {
  public:
-  enum TaskKind {
+  enum class TaskKind {
     kAllocateProfile,
     kCompile,
-    kCompileOsr
+    kCompileBaseline,
+    kCompileOsr,
   };
 
   JitCompileTask(ArtMethod* method, TaskKind kind) : method_(method), kind_(kind) {
@@ -582,14 +567,22 @@
 
   void Run(Thread* self) override {
     ScopedObjectAccess soa(self);
-    if (kind_ == kCompile) {
-      Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr= */ false);
-    } else if (kind_ == kCompileOsr) {
-      Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr= */ true);
-    } else {
-      DCHECK(kind_ == kAllocateProfile);
-      if (ProfilingInfo::Create(self, method_, /* retry_allocation= */ true)) {
-        VLOG(jit) << "Start profiling " << ArtMethod::PrettyMethod(method_);
+    switch (kind_) {
+      case TaskKind::kCompile:
+      case TaskKind::kCompileBaseline:
+      case TaskKind::kCompileOsr: {
+        Runtime::Current()->GetJit()->CompileMethod(
+            method_,
+            self,
+            /* baseline= */ (kind_ == TaskKind::kCompileBaseline),
+            /* osr= */ (kind_ == TaskKind::kCompileOsr));
+        break;
+      }
+      case TaskKind::kAllocateProfile: {
+        if (ProfilingInfo::Create(self, method_, /* retry_allocation= */ true)) {
+          VLOG(jit) << "Start profiling " << ArtMethod::PrettyMethod(method_);
+        }
+        break;
       }
     }
     ProfileSaver::NotifyJitActivity();
@@ -607,6 +600,18 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
 };
 
+void Jit::CreateThreadPool() {
+  // There is a DCHECK in the 'AddSamples' method to ensure the tread pool
+  // is not null when we instrument.
+
+  // We need peers as we may report the JIT thread, e.g., in the debugger.
+  constexpr bool kJitPoolNeedsPeers = true;
+  thread_pool_.reset(new ThreadPool("Jit thread pool", 1, kJitPoolNeedsPeers));
+
+  thread_pool_->SetPthreadPriority(options_->GetThreadPoolPthreadPriority());
+  Start();
+}
+
 static bool IgnoreSamplesForMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
   if (method->IsClassInitializer() || !method->IsCompilable()) {
     // We do not want to compile such methods.
@@ -630,11 +635,10 @@
 
 void Jit::AddSamples(Thread* self, ArtMethod* method, uint16_t count, bool with_backedges) {
   if (thread_pool_ == nullptr) {
-    // Should only see this when shutting down, starting up, or in zygote, which doesn't
-    // have a thread pool.
+    // Should only see this when shutting down, starting up, or in safe mode.
     DCHECK(Runtime::Current()->IsShuttingDown(self) ||
            !Runtime::Current()->IsFinishedStarting() ||
-           Runtime::Current()->IsZygote());
+           Runtime::Current()->IsSafeMode());
     return;
   }
   if (IgnoreSamplesForMethod(method)) {
@@ -675,7 +679,8 @@
       if (!success) {
         // We failed allocating. Instead of doing the collection on the Java thread, we push
         // an allocation to a compiler thread, that will do the collection.
-        thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kAllocateProfile));
+        thread_pool_->AddTask(
+            self, new JitCompileTask(method, JitCompileTask::TaskKind::kAllocateProfile));
       }
     }
     // Avoid jumping more than one state at a time.
@@ -685,7 +690,7 @@
       if ((new_count >= HotMethodThreshold()) &&
           !code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
         DCHECK(thread_pool_ != nullptr);
-        thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompile));
+        thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompile));
       }
       // Avoid jumping more than one state at a time.
       new_count = std::min(new_count, static_cast<uint32_t>(OSRMethodThreshold() - 1));
@@ -697,7 +702,8 @@
       DCHECK(!method->IsNative());  // No back edges reported for native methods.
       if ((new_count >= OSRMethodThreshold()) &&  !code_cache_->IsOsrCompiled(method)) {
         DCHECK(thread_pool_ != nullptr);
-        thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompileOsr));
+        thread_pool_->AddTask(
+            self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompileOsr));
       }
     }
   }
@@ -730,7 +736,7 @@
         // The compiler requires a ProfilingInfo object for non-native methods.
         ProfilingInfo::Create(thread, np_method, /* retry_allocation= */ true);
       }
-      JitCompileTask compile_task(method, JitCompileTask::kCompile);
+      JitCompileTask compile_task(method, JitCompileTask::TaskKind::kCompile);
       // Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
       ScopedSetRuntimeThread ssrt(thread);
       compile_task.Run(thread);
@@ -798,7 +804,16 @@
   }
 }
 
-void Jit::PostForkChildAction() {
+void Jit::PostForkChildAction(bool is_zygote) {
+  if (is_zygote) {
+    // Don't transition if this is for a child zygote.
+    return;
+  }
+  if (Runtime::Current()->IsSafeMode()) {
+    // Delete the thread pool, we are not going to JIT.
+    thread_pool_.reset(nullptr);
+    return;
+  }
   // At this point, the compiler options have been adjusted to the particular configuration
   // of the forked child. Parse them again.
   jit_update_options_(jit_compiler_handle_);
@@ -806,6 +821,28 @@
   // Adjust the status of code cache collection: the status from zygote was to not collect.
   code_cache_->SetGarbageCollectCode(!jit_generate_debug_info_(jit_compiler_handle_) &&
       !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled());
+
+  if (thread_pool_ != nullptr) {
+    // Remove potential tasks that have been inherited from the zygote.
+    thread_pool_->RemoveAllTasks(Thread::Current());
+
+    // Resume JIT compilation.
+    thread_pool_->CreateThreads();
+  }
+}
+
+void Jit::PreZygoteFork() {
+  if (thread_pool_ == nullptr) {
+    return;
+  }
+  thread_pool_->DeleteThreads();
+}
+
+void Jit::PostZygoteFork() {
+  if (thread_pool_ == nullptr) {
+    return;
+  }
+  thread_pool_->CreateThreads();
 }
 
 }  // namespace jit
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index e12b032..485537c 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -161,7 +161,7 @@
   // Create JIT itself.
   static Jit* Create(JitCodeCache* code_cache, JitOptions* options);
 
-  bool CompileMethod(ArtMethod* method, Thread* self, bool osr)
+  bool CompileMethod(ArtMethod* method, Thread* self, bool baseline, bool osr)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   const JitCodeCache* GetCodeCache() const {
@@ -285,8 +285,14 @@
   // Start JIT threads.
   void Start();
 
-  // Transition to a zygote child state.
-  void PostForkChildAction();
+  // Transition to a child state.
+  void PostForkChildAction(bool is_zygote);
+
+  // Prepare for forking.
+  void PreZygoteFork();
+
+  // Adjust state after forking.
+  void PostZygoteFork();
 
  private:
   Jit(JitCodeCache* code_cache, JitOptions* options);
@@ -298,7 +304,7 @@
   static void* jit_compiler_handle_;
   static void* (*jit_load_)(void);
   static void (*jit_unload_)(void*);
-  static bool (*jit_compile_method_)(void*, ArtMethod*, Thread*, bool);
+  static bool (*jit_compile_method_)(void*, ArtMethod*, Thread*, bool, bool);
   static void (*jit_types_loaded_)(void*, mirror::Class**, size_t count);
   static void (*jit_update_options_)(void*);
   static bool (*jit_generate_debug_info_)(void*);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 97887cc..d976fec 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -436,6 +436,12 @@
   initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
   max_capacity = RoundDown(max_capacity, 2 * kPageSize);
 
+  used_memory_for_data_ = 0;
+  used_memory_for_code_ = 0;
+  number_of_compilations_ = 0;
+  number_of_osr_compilations_ = 0;
+  number_of_collections_ = 0;
+
   data_pages_ = MemMap();
   exec_pages_ = MemMap();
   non_exec_pages_ = MemMap();
@@ -477,7 +483,7 @@
 JitCodeCache::~JitCodeCache() {}
 
 bool JitCodeCache::ContainsPc(const void* ptr) const {
-  return exec_pages_.Begin() <= ptr && ptr < exec_pages_.End();
+  return exec_pages_.HasAddress(ptr) || zygote_exec_pages_.HasAddress(ptr);
 }
 
 bool JitCodeCache::WillExecuteJitCode(ArtMethod* method) {
@@ -752,11 +758,14 @@
 }
 
 void JitCodeCache::FreeCodeAndData(const void* code_ptr) {
+  if (IsInZygoteExecSpace(code_ptr)) {
+    // No need to free, this is shared memory.
+    return;
+  }
   uintptr_t allocation = FromCodeToAllocation(code_ptr);
   // Notify native debugger that we are about to remove the code.
   // It does nothing if we are not using native debugger.
-  MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
-  RemoveNativeDebugInfoForJit(code_ptr);
+  RemoveNativeDebugInfoForJit(Thread::Current(), code_ptr);
   if (OatQuickMethodHeader::FromCodePointer(code_ptr)->IsOptimized()) {
     FreeData(GetRootTable(code_ptr));
   }  // else this is a JNI stub without any data.
@@ -1321,7 +1330,7 @@
             return true;
           }
           const void* code = method_header->GetCode();
-          if (code_cache_->ContainsPc(code)) {
+          if (code_cache_->ContainsPc(code) && !code_cache_->IsInZygoteExecSpace(code)) {
             // Use the atomic set version, as multiple threads are executing this code.
             bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
           }
@@ -1493,7 +1502,7 @@
         // interpreter will update its entry point to the compiled code and call it.
         for (ProfilingInfo* info : profiling_infos_) {
           const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
-          if (ContainsPc(entry_point)) {
+          if (!IsInZygoteDataSpace(info) && ContainsPc(entry_point)) {
             info->SetSavedEntryPoint(entry_point);
             // Don't call Instrumentation::UpdateMethodsCode(), as it can check the declaring
             // class of the method. We may be concurrently running a GC which makes accessing
@@ -1508,7 +1517,7 @@
         // Change entry points of native methods back to the GenericJNI entrypoint.
         for (const auto& entry : jni_stubs_map_) {
           const JniStubData& data = entry.second;
-          if (!data.IsCompiled()) {
+          if (!data.IsCompiled() || IsInZygoteExecSpace(data.GetCode())) {
             continue;
           }
           // Make sure a single invocation of the GenericJNI trampoline tries to recompile.
@@ -1540,7 +1549,9 @@
     // Iterate over all compiled code and remove entries that are not marked.
     for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
       JniStubData* data = &it->second;
-      if (!data->IsCompiled() || GetLiveBitmap()->Test(FromCodeToAllocation(data->GetCode()))) {
+      if (IsInZygoteExecSpace(data->GetCode()) ||
+          !data->IsCompiled() ||
+          GetLiveBitmap()->Test(FromCodeToAllocation(data->GetCode()))) {
         ++it;
       } else {
         method_headers.insert(OatQuickMethodHeader::FromCodePointer(data->GetCode()));
@@ -1550,7 +1561,7 @@
     for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
       const void* code_ptr = it->first;
       uintptr_t allocation = FromCodeToAllocation(code_ptr);
-      if (GetLiveBitmap()->Test(allocation)) {
+      if (IsInZygoteExecSpace(code_ptr) || GetLiveBitmap()->Test(allocation)) {
         ++it;
       } else {
         OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(code_ptr);
@@ -1571,7 +1582,7 @@
       // Also remove the saved entry point from the ProfilingInfo objects.
       for (ProfilingInfo* info : profiling_infos_) {
         const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
-        if (!ContainsPc(ptr) && !info->IsInUseByCompiler()) {
+        if (!ContainsPc(ptr) && !info->IsInUseByCompiler() && !IsInZygoteDataSpace(info)) {
           info->GetMethod()->SetProfilingInfo(nullptr);
         }
 
@@ -1596,6 +1607,9 @@
     for (const auto& entry : jni_stubs_map_) {
       const JniStubData& data = entry.second;
       const void* code_ptr = data.GetCode();
+      if (IsInZygoteExecSpace(code_ptr)) {
+        continue;
+      }
       const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
       for (ArtMethod* method : data.GetMethods()) {
         if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
@@ -1607,6 +1621,9 @@
     for (const auto& it : method_code_map_) {
       ArtMethod* method = it.second;
       const void* code_ptr = it.first;
+      if (IsInZygoteExecSpace(code_ptr)) {
+        continue;
+      }
       const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
       if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
         GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
@@ -1953,6 +1970,7 @@
         instrumentation->UpdateNativeMethodsCodeToJitCode(m, entrypoint);
       }
       if (collection_in_progress_) {
+        CHECK(!IsInZygoteExecSpace(data->GetCode()));
         GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(data->GetCode()));
       }
     }
@@ -2057,6 +2075,10 @@
 }
 
 void JitCodeCache::FreeCode(uint8_t* code) {
+  if (IsInZygoteExecSpace(code)) {
+    // No need to free, this is shared memory.
+    return;
+  }
   used_memory_for_code_ -= mspace_usable_size(code);
   mspace_free(exec_mspace_, code);
 }
@@ -2068,16 +2090,19 @@
 }
 
 void JitCodeCache::FreeData(uint8_t* data) {
+  if (IsInZygoteDataSpace(data)) {
+    // No need to free, this is shared memory.
+    return;
+  }
   used_memory_for_data_ -= mspace_usable_size(data);
   mspace_free(data_mspace_, data);
 }
 
 void JitCodeCache::Dump(std::ostream& os) {
   MutexLock mu(Thread::Current(), lock_);
-  MutexLock mu2(Thread::Current(), *Locks::native_debug_interface_lock_);
   os << "Current JIT code cache size: " << PrettySize(used_memory_for_code_) << "\n"
      << "Current JIT data cache size: " << PrettySize(used_memory_for_data_) << "\n"
-     << "Current JIT mini-debug-info size: " << PrettySize(GetJitNativeDebugInfoMemUsage()) << "\n"
+     << "Current JIT mini-debug-info size: " << PrettySize(GetJitMiniDebugInfoMemUsage()) << "\n"
      << "Current JIT capacity: " << PrettySize(current_capacity_) << "\n"
      << "Current number of JIT JNI stub entries: " << jni_stubs_map_.size() << "\n"
      << "Current number of JIT code cache entries: " << method_code_map_.size() << "\n"
@@ -2091,13 +2116,11 @@
 }
 
 void JitCodeCache::PostForkChildAction(bool is_system_server, bool is_zygote) {
+  if (is_zygote) {
+    // Don't transition if this is for a child zygote.
+    return;
+  }
   MutexLock mu(Thread::Current(), lock_);
-  // Currently, we don't expect any compilations from zygote.
-  CHECK_EQ(number_of_compilations_, 0u);
-  CHECK_EQ(number_of_osr_compilations_, 0u);
-  CHECK(jni_stubs_map_.empty());
-  CHECK(method_code_map_.empty());
-  CHECK(osr_code_map_.empty());
 
   zygote_data_pages_ = std::move(data_pages_);
   zygote_exec_pages_ = std::move(exec_pages_);
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 7a838fd..e2f3357 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -71,6 +71,7 @@
 
 namespace jit {
 
+class MarkCodeClosure;
 class ScopedCodeCacheWrite;
 
 // Alignment in bits that will suit all architectures.
@@ -387,6 +388,14 @@
 
   const MemMap* GetUpdatableCodeMapping() const;
 
+  bool IsInZygoteDataSpace(const void* ptr) const {
+    return zygote_data_pages_.HasAddress(ptr);
+  }
+
+  bool IsInZygoteExecSpace(const void* ptr) const {
+    return zygote_exec_pages_.HasAddress(ptr);
+  }
+
   bool IsWeakAccessEnabled(Thread* self) const;
   void WaitUntilInlineCacheAccessible(Thread* self)
       REQUIRES(!lock_)
@@ -487,6 +496,7 @@
 
   friend class art::JitJniStubTestHelper;
   friend class ScopedCodeCacheWrite;
+  friend class MarkCodeClosure;
 
   DISALLOW_COPY_AND_ASSIGN(JitCodeCache);
 };
diff --git a/runtime/jni/jni_env_ext.cc b/runtime/jni/jni_env_ext.cc
index efe43ee..976f89b 100644
--- a/runtime/jni/jni_env_ext.cc
+++ b/runtime/jni/jni_env_ext.cc
@@ -21,6 +21,7 @@
 
 #include "android-base/stringprintf.h"
 
+#include "base/mutex.h"
 #include "base/to_str.h"
 #include "check_jni.h"
 #include "indirect_reference_table.h"
diff --git a/runtime/jni/jni_env_ext.h b/runtime/jni/jni_env_ext.h
index 3a007ad..61de074 100644
--- a/runtime/jni/jni_env_ext.h
+++ b/runtime/jni/jni_env_ext.h
@@ -19,8 +19,8 @@
 
 #include <jni.h>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "indirect_reference_table.h"
 #include "obj_ptr.h"
 #include "reference_table.h"
diff --git a/runtime/jvalue.h b/runtime/jvalue.h
index b42d995..d03749c 100644
--- a/runtime/jvalue.h
+++ b/runtime/jvalue.h
@@ -17,8 +17,8 @@
 #ifndef ART_RUNTIME_JVALUE_H_
 #define ART_RUNTIME_JVALUE_H_
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 
 #include <stdint.h>
 
diff --git a/runtime/managed_stack.h b/runtime/managed_stack.h
index 6a0f075..3fb83ac 100644
--- a/runtime/managed_stack.h
+++ b/runtime/managed_stack.h
@@ -23,8 +23,8 @@
 
 #include <android-base/logging.h>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "base/bit_utils.h"
 
 namespace art {
diff --git a/runtime/mirror/call_site.h b/runtime/mirror/call_site.h
index 9b6afca..be5bdc9 100644
--- a/runtime/mirror/call_site.h
+++ b/runtime/mirror/call_site.h
@@ -17,7 +17,6 @@
 #ifndef ART_RUNTIME_MIRROR_CALL_SITE_H_
 #define ART_RUNTIME_MIRROR_CALL_SITE_H_
 
-#include "base/utils.h"
 #include "mirror/method_handle_impl.h"
 
 namespace art {
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 4e551ad..66b1405 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -22,7 +22,6 @@
 #include "base/enums.h"
 #include "base/iteration_range.h"
 #include "base/stride_iterator.h"
-#include "base/utils.h"
 #include "class_flags.h"
 #include "class_status.h"
 #include "dex/dex_file.h"
diff --git a/runtime/mirror/class_ext.cc b/runtime/mirror/class_ext.cc
index 6712630..146adc9 100644
--- a/runtime/mirror/class_ext.cc
+++ b/runtime/mirror/class_ext.cc
@@ -119,5 +119,17 @@
   SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ClassExt, original_dex_file_), bytes);
 }
 
+void ClassExt::SetPreRedefineClassDefIndex(uint16_t index) {
+  DCHECK(!Runtime::Current()->IsActiveTransaction());
+  SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ClassExt, pre_redefine_class_def_index_),
+      static_cast<int32_t>(index));
+}
+
+void ClassExt::SetPreRedefineDexFile(const DexFile* dex_file) {
+  DCHECK(!Runtime::Current()->IsActiveTransaction());
+  SetField64<false>(OFFSET_OF_OBJECT_MEMBER(ClassExt, pre_redefine_dex_file_ptr_),
+      static_cast<int64_t>(reinterpret_cast<uintptr_t>(dex_file)));
+}
+
 }  // namespace mirror
 }  // namespace art
diff --git a/runtime/mirror/class_ext.h b/runtime/mirror/class_ext.h
index 612fd0f..126f94a 100644
--- a/runtime/mirror/class_ext.h
+++ b/runtime/mirror/class_ext.h
@@ -64,6 +64,20 @@
 
   void SetOriginalDexFile(ObjPtr<Object> bytes) REQUIRES_SHARED(Locks::mutator_lock_);
 
+  uint16_t GetPreRedefineClassDefIndex() REQUIRES_SHARED(Locks::mutator_lock_) {
+    return static_cast<uint16_t>(
+        GetField32(OFFSET_OF_OBJECT_MEMBER(ClassExt, pre_redefine_class_def_index_)));
+  }
+
+  void SetPreRedefineClassDefIndex(uint16_t index) REQUIRES_SHARED(Locks::mutator_lock_);
+
+  const DexFile* GetPreRedefineDexFile() REQUIRES_SHARED(Locks::mutator_lock_) {
+    return reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(
+        GetField64(OFFSET_OF_OBJECT_MEMBER(ClassExt, pre_redefine_dex_file_ptr_))));
+  }
+
+  void SetPreRedefineDexFile(const DexFile* dex_file) REQUIRES_SHARED(Locks::mutator_lock_);
+
   void SetObsoleteArrays(ObjPtr<PointerArray> methods, ObjPtr<ObjectArray<DexCache>> dex_caches)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -88,6 +102,10 @@
   // The saved verification error of this class.
   HeapReference<Object> verify_error_;
 
+  // Native pointer to DexFile and ClassDef index of this class before it was JVMTI-redefined.
+  int64_t pre_redefine_dex_file_ptr_;
+  int32_t pre_redefine_class_def_index_;
+
   friend struct art::ClassExtOffsets;  // for verifying offset information
   DISALLOW_IMPLICIT_CONSTRUCTORS(ClassExt);
 };
diff --git a/runtime/mirror/class_loader.h b/runtime/mirror/class_loader.h
index e3cb12f..783ba6a 100644
--- a/runtime/mirror/class_loader.h
+++ b/runtime/mirror/class_loader.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_MIRROR_CLASS_LOADER_H_
 #define ART_RUNTIME_MIRROR_CLASS_LOADER_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "obj_ptr.h"
 #include "object.h"
 #include "object_reference.h"
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 58b199d..c742928 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -19,7 +19,7 @@
 
 #include "array.h"
 #include "base/bit_utils.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "dex/dex_file_types.h"
 #include "gc_root.h"  // Note: must not use -inl here to avoid circular dependency.
 #include "object.h"
diff --git a/runtime/mirror/method_type.h b/runtime/mirror/method_type.h
index 014b211..9cceff9 100644
--- a/runtime/mirror/method_type.h
+++ b/runtime/mirror/method_type.h
@@ -17,7 +17,6 @@
 #ifndef ART_RUNTIME_MIRROR_METHOD_TYPE_H_
 #define ART_RUNTIME_MIRROR_METHOD_TYPE_H_
 
-#include "base/utils.h"
 #include "object_array.h"
 #include "object.h"
 #include "string.h"
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index 98cc4a8..b984474 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -24,7 +24,6 @@
 #include "android-base/stringprintf.h"
 
 #include "array-inl.h"
-#include "base/utils.h"
 #include "class.h"
 #include "obj_ptr-inl.h"
 #include "object-inl.h"
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index d6a39aa..8636928 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -19,7 +19,7 @@
 
 #include "base/atomic.h"
 #include "base/globals.h"
-#include "base/mutex.h"  // For Locks::mutator_lock_.
+#include "base/locks.h"  // For Locks::mutator_lock_.
 #include "heap_poisoning.h"
 #include "obj_ptr.h"
 
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index 63c5ae5..9ace4f7 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -18,8 +18,8 @@
 #define ART_RUNTIME_MIRROR_REFERENCE_H_
 
 #include "base/enums.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "obj_ptr.h"
 #include "object.h"
 #include "read_barrier_option.h"
diff --git a/runtime/mirror/string-alloc-inl.h b/runtime/mirror/string-alloc-inl.h
index c026c67..c31eccf 100644
--- a/runtime/mirror/string-alloc-inl.h
+++ b/runtime/mirror/string-alloc-inl.h
@@ -23,7 +23,6 @@
 #include "array.h"
 #include "base/bit_utils.h"
 #include "base/globals.h"
-#include "base/utils.h"
 #include "class.h"
 #include "class_root.h"
 #include "gc/heap-inl.h"
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index d89ef1e..e11906a 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -21,7 +21,6 @@
 #include "android-base/stringprintf.h"
 
 #include "base/globals.h"
-#include "base/utils.h"
 #include "class-inl.h"
 #include "common_throws.h"
 #include "dex/utf.h"
diff --git a/runtime/mirror/var_handle.cc b/runtime/mirror/var_handle.cc
index 085dcab..7c25529 100644
--- a/runtime/mirror/var_handle.cc
+++ b/runtime/mirror/var_handle.cc
@@ -1947,7 +1947,7 @@
 
   // Determine offset and limit for accesses.
   int32_t byte_buffer_offset;
-  if (native_address == 0l) {
+  if (native_address == 0L) {
     // Accessing a heap allocated byte buffer.
     byte_buffer_offset = byte_buffer->GetField32(
         GetMemberOffset(WellKnownClasses::java_nio_ByteBuffer_offset));
diff --git a/runtime/monitor_objects_stack_visitor.h b/runtime/monitor_objects_stack_visitor.h
index c943402..3968239 100644
--- a/runtime/monitor_objects_stack_visitor.h
+++ b/runtime/monitor_objects_stack_visitor.h
@@ -20,7 +20,7 @@
 #include <android-base/logging.h>
 
 #include "art_method.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "monitor.h"
 #include "stack.h"
 #include "thread.h"
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 203d200..1da91b0 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -337,8 +337,7 @@
     int32_t i = kDexFileIndexStart;  // Oat file is at index 0.
     for (const DexFile* dex_file : dex_files) {
       if (dex_file != nullptr) {
-        RemoveNativeDebugInfoForDex(soa.Self(), ArrayRef<const uint8_t>(dex_file->Begin(),
-                                                                        dex_file->Size()));
+        RemoveNativeDebugInfoForDex(soa.Self(), dex_file);
         // Only delete the dex file if the dex cache is not found to prevent runtime crashes if there
         // are calls to DexFile.close while the ART DexFile is still in use.
         if (!class_linker->IsDexFileRegistered(soa.Self(), *dex_file)) {
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index e213dc7..892d4cc 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -24,7 +24,8 @@
 #include <limits.h>
 #include "nativehelper/scoped_utf_chars.h"
 
-#include "android-base/stringprintf.h"
+#include <android-base/stringprintf.h>
+#include <android-base/strings.h>
 
 #include "arch/instruction_set.h"
 #include "art_method-inl.h"
@@ -222,7 +223,8 @@
 }
 
 static jstring VMRuntime_bootClassPath(JNIEnv* env, jobject) {
-  return env->NewStringUTF(DefaultToDot(Runtime::Current()->GetBootClassPathString()));
+  std::string boot_class_path = android::base::Join(Runtime::Current()->GetBootClassPath(), ':');
+  return env->NewStringUTF(DefaultToDot(boot_class_path));
 }
 
 static jstring VMRuntime_classPath(JNIEnv* env, jobject) {
@@ -269,7 +271,7 @@
 #endif
 }
 
-static void VMRuntime_registerNativeAllocation(JNIEnv* env, jobject, jint bytes) {
+static void VMRuntime_registerNativeAllocationInternal(JNIEnv* env, jobject, jint bytes) {
   if (UNLIKELY(bytes < 0)) {
     ScopedObjectAccess soa(env);
     ThrowRuntimeException("allocation size negative %d", bytes);
@@ -278,11 +280,7 @@
   Runtime::Current()->GetHeap()->RegisterNativeAllocation(env, static_cast<size_t>(bytes));
 }
 
-static void VMRuntime_registerSensitiveThread(JNIEnv*, jobject) {
-  Runtime::Current()->RegisterSensitiveThread();
-}
-
-static void VMRuntime_registerNativeFree(JNIEnv* env, jobject, jint bytes) {
+static void VMRuntime_registerNativeFreeInternal(JNIEnv* env, jobject, jint bytes) {
   if (UNLIKELY(bytes < 0)) {
     ScopedObjectAccess soa(env);
     ThrowRuntimeException("allocation size negative %d", bytes);
@@ -291,6 +289,18 @@
   Runtime::Current()->GetHeap()->RegisterNativeFree(env, static_cast<size_t>(bytes));
 }
 
+static jint VMRuntime_getNotifyNativeInterval(JNIEnv*, jclass) {
+  return Runtime::Current()->GetHeap()->GetNotifyNativeInterval();
+}
+
+static void VMRuntime_notifyNativeAllocationsInternal(JNIEnv* env, jobject) {
+  Runtime::Current()->GetHeap()->NotifyNativeAllocations(env);
+}
+
+static void VMRuntime_registerSensitiveThread(JNIEnv*, jobject) {
+  Runtime::Current()->RegisterSensitiveThread();
+}
+
 static void VMRuntime_updateProcessState(JNIEnv*, jobject, jint process_state) {
   Runtime* runtime = Runtime::Current();
   runtime->UpdateProcessState(static_cast<ProcessState>(process_state));
@@ -708,9 +718,11 @@
   FAST_NATIVE_METHOD(VMRuntime, newUnpaddedArray, "(Ljava/lang/Class;I)Ljava/lang/Object;"),
   NATIVE_METHOD(VMRuntime, properties, "()[Ljava/lang/String;"),
   NATIVE_METHOD(VMRuntime, setTargetSdkVersionNative, "(I)V"),
-  NATIVE_METHOD(VMRuntime, registerNativeAllocation, "(I)V"),
+  NATIVE_METHOD(VMRuntime, registerNativeAllocationInternal, "(I)V"),
+  NATIVE_METHOD(VMRuntime, registerNativeFreeInternal, "(I)V"),
+  NATIVE_METHOD(VMRuntime, getNotifyNativeInterval, "()I"),
+  NATIVE_METHOD(VMRuntime, notifyNativeAllocationsInternal, "()V"),
   NATIVE_METHOD(VMRuntime, registerSensitiveThread, "()V"),
-  NATIVE_METHOD(VMRuntime, registerNativeFree, "(I)V"),
   NATIVE_METHOD(VMRuntime, requestConcurrentGC, "()V"),
   NATIVE_METHOD(VMRuntime, requestHeapTrim, "()V"),
   NATIVE_METHOD(VMRuntime, runHeapTasks, "()V"),
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 530371d..b7ac1e8 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -44,10 +44,6 @@
 #include "thread_list.h"
 #include "trace.h"
 
-#if defined(__linux__)
-#include <sys/prctl.h>
-#endif
-
 #include <sys/resource.h>
 
 namespace art {
@@ -59,37 +55,6 @@
 
 using android::base::StringPrintf;
 
-static void EnableDebugger() {
-#if defined(__linux__)
-  // To let a non-privileged gdbserver attach to this
-  // process, we must set our dumpable flag.
-  if (prctl(PR_SET_DUMPABLE, 1, 0, 0, 0) == -1) {
-    PLOG(ERROR) << "prctl(PR_SET_DUMPABLE) failed for pid " << getpid();
-  }
-
-  // Even if Yama is on a non-privileged native debugger should
-  // be able to attach to the debuggable app.
-  if (prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, 0, 0, 0) == -1) {
-    // if Yama is off prctl(PR_SET_PTRACER) returns EINVAL - don't log in this
-    // case since it's expected behaviour.
-    if (errno != EINVAL) {
-      PLOG(ERROR) << "prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY) failed for pid " << getpid();
-    }
-  }
-#endif
-  // We don't want core dumps, though, so set the soft limit on core dump size
-  // to 0 without changing the hard limit.
-  rlimit rl;
-  if (getrlimit(RLIMIT_CORE, &rl) == -1) {
-    PLOG(ERROR) << "getrlimit(RLIMIT_CORE) failed for pid " << getpid();
-  } else {
-    rl.rlim_cur = 0;
-    if (setrlimit(RLIMIT_CORE, &rl) == -1) {
-      PLOG(ERROR) << "setrlimit(RLIMIT_CORE) failed for pid " << getpid();
-    }
-  }
-}
-
 class ClassSet {
  public:
   // The number of classes we reasonably expect to have to look at. Realistically the number is more
@@ -211,9 +176,6 @@
   }
 
   Dbg::SetJdwpAllowed((runtime_flags & DEBUG_ENABLE_JDWP) != 0);
-  if ((runtime_flags & DEBUG_ENABLE_JDWP) != 0) {
-    EnableDebugger();
-  }
   runtime_flags &= ~DEBUG_ENABLE_JDWP;
 
   const bool safe_mode = (runtime_flags & DEBUG_ENABLE_SAFEMODE) != 0;
@@ -287,6 +249,13 @@
   return reinterpret_cast<jlong>(ThreadForEnv(env));
 }
 
+static void ZygoteHooks_nativePostZygoteFork(JNIEnv*, jclass) {
+  Runtime* runtime = Runtime::Current();
+  if (runtime->IsZygote()) {
+    runtime->PostZygoteFork();
+  }
+}
+
 static void ZygoteHooks_nativePostForkSystemServer(JNIEnv* env ATTRIBUTE_UNUSED,
                                                    jclass klass ATTRIBUTE_UNUSED) {
   // This JIT code cache for system server is created whilst the runtime is still single threaded.
@@ -343,7 +312,7 @@
           /* is_system_server= */ false, is_zygote);
     }
     // This must be called after EnableDebugFeatures.
-    Runtime::Current()->GetJit()->PostForkChildAction();
+    Runtime::Current()->GetJit()->PostForkChildAction(is_zygote);
   }
 
   // Update tracing.
@@ -441,6 +410,7 @@
 
 static JNINativeMethod gMethods[] = {
   NATIVE_METHOD(ZygoteHooks, nativePreFork, "()J"),
+  NATIVE_METHOD(ZygoteHooks, nativePostZygoteFork, "()V"),
   NATIVE_METHOD(ZygoteHooks, nativePostForkSystemServer, "()V"),
   NATIVE_METHOD(ZygoteHooks, nativePostForkChild, "(JIZZLjava/lang/String;)V"),
   NATIVE_METHOD(ZygoteHooks, startZygoteNoThreadCreation, "()V"),
diff --git a/runtime/non_debuggable_classes.h b/runtime/non_debuggable_classes.h
index e1b5633..e2c51e6 100644
--- a/runtime/non_debuggable_classes.h
+++ b/runtime/non_debuggable_classes.h
@@ -19,7 +19,7 @@
 
 #include <vector>
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "jni.h"
 
 namespace art {
diff --git a/runtime/nth_caller_visitor.h b/runtime/nth_caller_visitor.h
index 71c6a82..ffec179 100644
--- a/runtime/nth_caller_visitor.h
+++ b/runtime/nth_caller_visitor.h
@@ -18,7 +18,7 @@
 #define ART_RUNTIME_NTH_CALLER_VISITOR_H_
 
 #include "art_method.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "stack.h"
 
 namespace art {
diff --git a/runtime/oat.cc b/runtime/oat.cc
index e931b28..d7c968f 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -79,8 +79,7 @@
       quick_generic_jni_trampoline_offset_(0),
       quick_imt_conflict_trampoline_offset_(0),
       quick_resolution_trampoline_offset_(0),
-      quick_to_interpreter_bridge_offset_(0),
-      boot_image_checksum_(0) {
+      quick_to_interpreter_bridge_offset_(0) {
   // Don't want asserts in header as they would be checked in each file that includes it. But the
   // fields are private, so we check inside a method.
   static_assert(sizeof(magic_) == sizeof(kOatMagic),
@@ -316,16 +315,6 @@
   quick_to_interpreter_bridge_offset_ = offset;
 }
 
-uint32_t OatHeader::GetBootImageChecksum() const {
-  CHECK(IsValid());
-  return boot_image_checksum_;
-}
-
-void OatHeader::SetBootImageChecksum(uint32_t boot_image_checksum) {
-  CHECK(IsValid());
-  boot_image_checksum_ = boot_image_checksum;
-}
-
 uint32_t OatHeader::GetKeyValueStoreSize() const {
   CHECK(IsValid());
   return key_value_store_size_;
diff --git a/runtime/oat.h b/runtime/oat.h
index ee46f42..ded1489 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -31,8 +31,8 @@
 class PACKED(4) OatHeader {
  public:
   static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
-  // Last oat version changed reason: Image checksums.
-  static constexpr uint8_t kOatVersion[] = { '1', '6', '4', '\0' };
+  // Last oat version changed reason: Partial boot image.
+  static constexpr uint8_t kOatVersion[] = { '1', '6', '6', '\0' };
 
   static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
   static constexpr const char* kDebuggableKey = "debuggable";
@@ -40,6 +40,7 @@
   static constexpr const char* kCompilerFilter = "compiler-filter";
   static constexpr const char* kClassPathKey = "classpath";
   static constexpr const char* kBootClassPathKey = "bootclasspath";
+  static constexpr const char* kBootClassPathChecksumsKey = "bootclasspath-checksums";
   static constexpr const char* kConcurrentCopying = "concurrent-copying";
   static constexpr const char* kCompilationReasonKey = "compilation-reason";
 
@@ -93,9 +94,6 @@
   InstructionSet GetInstructionSet() const;
   uint32_t GetInstructionSetFeaturesBitmap() const;
 
-  uint32_t GetBootImageChecksum() const;
-  void SetBootImageChecksum(uint32_t boot_image_checksum);
-
   uint32_t GetKeyValueStoreSize() const;
   const uint8_t* GetKeyValueStore() const;
   const char* GetStoreValueByKey(const char* key) const;
@@ -137,8 +135,6 @@
   uint32_t quick_resolution_trampoline_offset_;
   uint32_t quick_to_interpreter_bridge_offset_;
 
-  uint32_t boot_image_checksum_;
-
   uint32_t key_value_store_size_;
   uint8_t key_value_store_[0];  // note variable width data at end
 
diff --git a/runtime/oat_file-inl.h b/runtime/oat_file-inl.h
index 721fab9..b71c4e8 100644
--- a/runtime/oat_file-inl.h
+++ b/runtime/oat_file-inl.h
@@ -18,6 +18,8 @@
 #define ART_RUNTIME_OAT_FILE_INL_H_
 
 #include "oat_file.h"
+
+#include "base/utils.h"
 #include "oat_quick_method_header.h"
 
 namespace art {
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 4294baf..ab6e62d 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -27,7 +27,6 @@
 #include "base/safe_map.h"
 #include "base/stringpiece.h"
 #include "base/tracking_safe_map.h"
-#include "base/utils.h"
 #include "class_status.h"
 #include "compiler_filter.h"
 #include "dex/dex_file.h"
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 6f32b98..8b81bb9 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -419,7 +419,7 @@
       // starts up.
       LOG(WARNING) << "Dex location " << dex_location_ << " does not seem to include dex file. "
         << "Allow oat file use. This is potentially dangerous.";
-    } else if (file.GetOatHeader().GetBootImageChecksum() != image_info->boot_image_checksum) {
+    } else if (!image_info->ValidateBootClassPathChecksums(file)) {
       VLOG(oat) << "Oat image checksum does not match image checksum.";
       return kOatBootImageOutOfDate;
     }
@@ -560,6 +560,13 @@
   return required_dex_checksums_found_ ? &cached_required_dex_checksums_ : nullptr;
 }
 
+bool OatFileAssistant::ImageInfo::ValidateBootClassPathChecksums(const OatFile& oat_file) const {
+  const char* oat_boot_class_path_checksums =
+      oat_file.GetOatHeader().GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey);
+  return oat_boot_class_path_checksums != nullptr &&
+         oat_boot_class_path_checksums == boot_class_path_checksums;
+}
+
 std::unique_ptr<OatFileAssistant::ImageInfo>
 OatFileAssistant::ImageInfo::GetRuntimeImageInfo(InstructionSet isa, std::string* error_msg) {
   CHECK(error_msg != nullptr);
@@ -567,14 +574,11 @@
   Runtime* runtime = Runtime::Current();
   std::unique_ptr<ImageInfo> info(new ImageInfo());
   info->location = runtime->GetImageLocation();
-
-  std::unique_ptr<ImageHeader> image_header(
-      gc::space::ImageSpace::ReadImageHeader(info->location.c_str(), isa, error_msg));
-  if (image_header == nullptr) {
+  info->boot_class_path_checksums = gc::space::ImageSpace::GetBootClassPathChecksums(
+      runtime->GetBootClassPath(), info->location, isa, error_msg);
+  if (info->boot_class_path_checksums.empty()) {
     return nullptr;
   }
-
-  info->boot_image_checksum = image_header->GetImageChecksum();
   return info;
 }
 
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 09c9d3b..def55b8 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -246,8 +246,10 @@
 
  private:
   struct ImageInfo {
-    uint32_t boot_image_checksum = 0;
+    bool ValidateBootClassPathChecksums(const OatFile& oat_file) const;
+
     std::string location;
+    std::string boot_class_path_checksums;
 
     static std::unique_ptr<ImageInfo> GetRuntimeImageInfo(InstructionSet isa,
                                                           std::string* error_msg);
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 2c882ec..9552ca3 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -27,6 +27,7 @@
 #include "base/bit_vector-inl.h"
 #include "base/file_utils.h"
 #include "base/logging.h"  // For VLOG.
+#include "base/mutex-inl.h"
 #include "base/stl_util.h"
 #include "base/systrace.h"
 #include "class_linker.h"
diff --git a/runtime/oat_file_manager.h b/runtime/oat_file_manager.h
index 7d96a7a..99e1b73 100644
--- a/runtime/oat_file_manager.h
+++ b/runtime/oat_file_manager.h
@@ -23,8 +23,8 @@
 #include <unordered_map>
 #include <vector>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "jni.h"
 
 namespace art {
diff --git a/runtime/obj_ptr.h b/runtime/obj_ptr.h
index efbb66f..9e2ee29 100644
--- a/runtime/obj_ptr.h
+++ b/runtime/obj_ptr.h
@@ -21,8 +21,8 @@
 #include <type_traits>
 
 #include "base/globals.h"
+#include "base/locks.h"  // For Locks::mutator_lock_.
 #include "base/macros.h"
-#include "base/mutex.h"  // For Locks::mutator_lock_.
 
 // Always inline ObjPtr methods even in debug builds.
 #define OBJPTR_INLINE __attribute__ ((always_inline))
diff --git a/runtime/object_lock.h b/runtime/object_lock.h
index 5916f90..15b763a 100644
--- a/runtime/object_lock.h
+++ b/runtime/object_lock.h
@@ -17,8 +17,8 @@
 #ifndef ART_RUNTIME_OBJECT_LOCK_H_
 #define ART_RUNTIME_OBJECT_LOCK_H_
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "handle.h"
 
 namespace art {
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 29b5690..17ff3a2 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -20,6 +20,7 @@
 #include <sstream>
 
 #include <android-base/logging.h>
+#include <android-base/strings.h>
 
 #include "base/file_utils.h"
 #include "base/macros.h"
@@ -78,7 +79,7 @@
       .Define("-showversion")
           .IntoKey(M::ShowVersion)
       .Define("-Xbootclasspath:_")
-          .WithType<std::string>()
+          .WithType<ParseStringList<':'>>()  // std::vector<std::string>, split by :
           .IntoKey(M::BootClassPath)
       .Define("-Xbootclasspath-locations:_")
           .WithType<ParseStringList<':'>>()  // std::vector<std::string>, split by :
@@ -513,7 +514,7 @@
                  GetInstructionSetString(kRuntimeISA));
     Exit(0);
   } else if (args.Exists(M::BootClassPath)) {
-    LOG(INFO) << "setting boot class path to " << *args.Get(M::BootClassPath);
+    LOG(INFO) << "setting boot class path to " << args.Get(M::BootClassPath)->Join();
   }
 
   if (args.GetOrDefault(M::Interpret)) {
@@ -525,8 +526,9 @@
   }
 
   // Set a default boot class path if we didn't get an explicit one via command line.
-  if (getenv("BOOTCLASSPATH") != nullptr) {
-    args.SetIfMissing(M::BootClassPath, std::string(getenv("BOOTCLASSPATH")));
+  const char* env_bcp = getenv("BOOTCLASSPATH");
+  if (env_bcp != nullptr) {
+    args.SetIfMissing(M::BootClassPath, ParseStringList<':'>::Split(env_bcp));
   }
 
   // Set a default class path if we didn't get an explicit one via command line.
@@ -586,22 +588,20 @@
     args.Set(M::BackgroundGc, BackgroundGcOption { background_collector_type_ });
   }
 
-  auto boot_class_path_string = args.GetOrDefault(M::BootClassPath);
-  {
-    auto&& boot_class_path = args.GetOrDefault(M::BootClassPath);
-    auto&& boot_class_path_locations = args.GetOrDefault(M::BootClassPathLocations);
-    if (args.Exists(M::BootClassPathLocations)) {
-      size_t boot_class_path_count = ParseStringList<':'>::Split(boot_class_path).Size();
-
-      if (boot_class_path_count != boot_class_path_locations.Size()) {
-        Usage("The number of boot class path files does not match"
-            " the number of boot class path locations given\n"
-            "  boot class path files     (%zu): %s\n"
-            "  boot class path locations (%zu): %s\n",
-            boot_class_path.size(), boot_class_path_string.c_str(),
-            boot_class_path_locations.Size(), boot_class_path_locations.Join().c_str());
-        return false;
-      }
+  const ParseStringList<':'>* boot_class_path_locations = args.Get(M::BootClassPathLocations);
+  if (boot_class_path_locations != nullptr && boot_class_path_locations->Size() != 0u) {
+    const ParseStringList<':'>* boot_class_path = args.Get(M::BootClassPath);
+    if (boot_class_path == nullptr ||
+        boot_class_path_locations->Size() != boot_class_path->Size()) {
+      Usage("The number of boot class path files does not match"
+          " the number of boot class path locations given\n"
+          "  boot class path files     (%zu): %s\n"
+          "  boot class path locations (%zu): %s\n",
+          (boot_class_path != nullptr) ? boot_class_path->Size() : 0u,
+          (boot_class_path != nullptr) ? boot_class_path->Join().c_str() : "<nil>",
+          boot_class_path_locations->Size(),
+          boot_class_path_locations->Join().c_str());
+      return false;
     }
   }
 
diff --git a/runtime/parsed_options_test.cc b/runtime/parsed_options_test.cc
index 9e5d9ab..cbb7b82 100644
--- a/runtime/parsed_options_test.cc
+++ b/runtime/parsed_options_test.cc
@@ -26,7 +26,7 @@
 class ParsedOptionsTest : public ::testing::Test {
  public:
   static void SetUpTestCase() {
-    CommonRuntimeTest::SetUpAndroidRoot();
+    CommonRuntimeTest::SetUpAndroidRootEnvVars();
   }
 };
 
@@ -40,8 +40,7 @@
   boot_class_path += "-Xbootclasspath:";
 
   bool first_dex_file = true;
-  for (const std::string &dex_file_name :
-           CommonRuntimeTest::GetLibCoreDexFileNames()) {
+  for (const std::string &dex_file_name : CommonRuntimeTest::GetLibCoreDexFileNames()) {
     if (!first_dex_file) {
       class_path += ":";
     } else {
@@ -50,6 +49,8 @@
     class_path += dex_file_name;
   }
   boot_class_path += class_path;
+  std::vector<std::string> expected_boot_class_path;
+  Split(class_path, ':', &expected_boot_class_path);
 
   RuntimeOptions options;
   options.push_back(std::make_pair(boot_class_path.c_str(), nullptr));
@@ -78,9 +79,11 @@
   using Opt = RuntimeArgumentMap;
 
 #define EXPECT_PARSED_EQ(expected, actual_key) EXPECT_EQ(expected, map.GetOrDefault(actual_key))
+#define EXPECT_PARSED_EQ_AS_STRING_VECTOR(expected, actual_key) \
+  EXPECT_EQ(expected, static_cast<std::vector<std::string>>(map.GetOrDefault(actual_key)))
 #define EXPECT_PARSED_EXISTS(actual_key) EXPECT_TRUE(map.Exists(actual_key))
 
-  EXPECT_PARSED_EQ(class_path, Opt::BootClassPath);
+  EXPECT_PARSED_EQ_AS_STRING_VECTOR(expected_boot_class_path, Opt::BootClassPath);
   EXPECT_PARSED_EQ(class_path, Opt::ClassPath);
   EXPECT_PARSED_EQ(std::string("boot_image"), Opt::Image);
   EXPECT_PARSED_EXISTS(Opt::CheckJni);
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index 672303a..1bcbcff 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -19,7 +19,6 @@
 
 #include "read_barrier.h"
 
-#include "base/utils.h"
 #include "gc/accounting/read_barrier_table.h"
 #include "gc/collector/concurrent_copying-inl.h"
 #include "gc/heap.h"
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index 0741da6..3b89377 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -19,8 +19,8 @@
 
 #include <android-base/logging.h>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "base/runtime_debug.h"
 #include "gc_root.h"
 #include "jni.h"
diff --git a/runtime/reference_table.h b/runtime/reference_table.h
index 6af5ca5..6388944 100644
--- a/runtime/reference_table.h
+++ b/runtime/reference_table.h
@@ -23,7 +23,7 @@
 #include <vector>
 
 #include "base/allocator.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "gc_root.h"
 #include "obj_ptr.h"
 
diff --git a/runtime/reflection-inl.h b/runtime/reflection-inl.h
index 9fe4bca..8ad61f0 100644
--- a/runtime/reflection-inl.h
+++ b/runtime/reflection-inl.h
@@ -21,7 +21,6 @@
 
 #include "android-base/stringprintf.h"
 
-#include "base/utils.h"
 #include "common_throws.h"
 #include "dex/descriptors_names.h"
 #include "dex/primitive.h"
diff --git a/runtime/reflection.h b/runtime/reflection.h
index 74580a2..574e302 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_REFLECTION_H_
 #define ART_RUNTIME_REFLECTION_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "dex/primitive.h"
 #include "jni.h"
 #include "obj_ptr.h"
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index e6cc471..2ffaf98 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -23,6 +23,7 @@
 #include "art_method.h"
 #include "base/callee_save_type.h"
 #include "base/casts.h"
+#include "base/mutex.h"
 #include "entrypoints/quick/callee_save_frame.h"
 #include "gc_root-inl.h"
 #include "interpreter/mterp/mterp.h"
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index ab79b9e..c5a8a23 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -233,8 +233,7 @@
       class_linker_(nullptr),
       signal_catcher_(nullptr),
       java_vm_(nullptr),
-      fault_message_lock_("Fault message lock"),
-      fault_message_(""),
+      fault_message_(nullptr),
       threads_being_born_(0),
       shutdown_cond_(new ConditionVariable("Runtime shutdown", *Locks::runtime_shutdown_lock_)),
       shutting_down_(false),
@@ -278,7 +277,6 @@
       // Initially assume we perceive jank in case the process state is never updated.
       process_state_(kProcessStateJankPerceptible),
       zygote_no_threads_(false),
-      process_cpu_start_time_(ProcessCpuNanoTime()),
       verifier_logging_threshold_ms_(100) {
   static_assert(Runtime::kCalleeSaveSize ==
                     static_cast<uint32_t>(CalleeSaveType::kLastCalleeSaveType), "Unexpected size");
@@ -322,20 +320,32 @@
   }
 
   if (dump_gc_performance_on_shutdown_) {
-    process_cpu_end_time_ = ProcessCpuNanoTime();
+    heap_->CalculatePreGcWeightedAllocatedBytes();
+    heap_->CalculatePostGcWeightedAllocatedBytes();
+    uint64_t process_cpu_end_time = ProcessCpuNanoTime();
     ScopedLogSeverity sls(LogSeverity::INFO);
     // This can't be called from the Heap destructor below because it
     // could call RosAlloc::InspectAll() which needs the thread_list
     // to be still alive.
     heap_->DumpGcPerformanceInfo(LOG_STREAM(INFO));
 
-    uint64_t process_cpu_time = process_cpu_end_time_ - process_cpu_start_time_;
+    uint64_t process_cpu_time = process_cpu_end_time - heap_->GetProcessCpuStartTime();
     uint64_t gc_cpu_time = heap_->GetTotalGcCpuTime();
     float ratio = static_cast<float>(gc_cpu_time) / process_cpu_time;
     LOG_STREAM(INFO) << "GC CPU time " << PrettyDuration(gc_cpu_time)
         << " out of process CPU time " << PrettyDuration(process_cpu_time)
         << " (" << ratio << ")"
         << "\n";
+    double pre_gc_weighted_allocated_bytes =
+        heap_->GetPreGcWeightedAllocatedBytes() / process_cpu_time;
+    double post_gc_weighted_allocated_bytes =
+        heap_->GetPostGcWeightedAllocatedBytes() / process_cpu_time;
+
+    LOG_STREAM(INFO) << "Pre GC weighted bytes allocated over CPU time: "
+        << " (" <<  PrettySize(pre_gc_weighted_allocated_bytes)  << ")";
+    LOG_STREAM(INFO) << "Post GC weighted bytes allocated over CPU time: "
+        << " (" <<  PrettySize(post_gc_weighted_allocated_bytes)  << ")"
+        << "\n";
   }
 
   if (jit_ != nullptr) {
@@ -538,18 +548,18 @@
 void Runtime::Abort(const char* msg) {
   auto old_value = gAborting.fetch_add(1);  // set before taking any locks
 
-#ifdef ART_TARGET_ANDROID
+  // Only set the first abort message.
   if (old_value == 0) {
-    // Only set the first abort message.
-    android_set_abort_message(msg);
-  }
-#else
-  UNUSED(old_value);
-#endif
-
 #ifdef ART_TARGET_ANDROID
-  android_set_abort_message(msg);
+    android_set_abort_message(msg);
+#else
+    // Set the runtime fault message in case our unexpected-signal code will run.
+    Runtime* current = Runtime::Current();
+    if (current != nullptr) {
+      current->SetFaultMessage(msg);
+    }
 #endif
+  }
 
   // Ensure that we don't have multiple threads trying to abort at once,
   // which would result in significantly worse diagnostics.
@@ -590,9 +600,18 @@
 }
 
 void Runtime::PreZygoteFork() {
+  if (GetJit() != nullptr) {
+    GetJit()->PreZygoteFork();
+  }
   heap_->PreZygoteFork();
 }
 
+void Runtime::PostZygoteFork() {
+  if (GetJit() != nullptr) {
+    GetJit()->PostZygoteFork();
+  }
+}
+
 void Runtime::CallExitHook(jint status) {
   if (exit_ != nullptr) {
     ScopedThreadStateChange tsc(Thread::Current(), kNative);
@@ -906,10 +925,6 @@
     }
   }
 
-  if (jit_ != nullptr) {
-    jit_->CreateThreadPool();
-  }
-
   // Create the thread pools.
   heap_->CreateThreadPool();
   // Reset the gc performance data at zygote fork so that the GCs
@@ -956,8 +971,8 @@
   VLOG(startup) << "Runtime::StartDaemonThreads exiting";
 }
 
-static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames,
-                           const std::vector<std::string>& dex_locations,
+static size_t OpenDexFiles(ArrayRef<const std::string> dex_filenames,
+                           ArrayRef<const std::string> dex_locations,
                            std::vector<std::unique_ptr<const DexFile>>* dex_files) {
   DCHECK(dex_files != nullptr) << "OpenDexFiles: out-param is nullptr";
   size_t failure_count = 0;
@@ -1073,7 +1088,45 @@
   Monitor::Init(runtime_options.GetOrDefault(Opt::LockProfThreshold),
                 runtime_options.GetOrDefault(Opt::StackDumpLockProfThreshold));
 
-  boot_class_path_string_ = runtime_options.ReleaseOrDefault(Opt::BootClassPath);
+  image_location_ = runtime_options.GetOrDefault(Opt::Image);
+  SetInstructionSet(runtime_options.GetOrDefault(Opt::ImageInstructionSet));
+  boot_class_path_ = runtime_options.ReleaseOrDefault(Opt::BootClassPath);
+  boot_class_path_locations_ = runtime_options.ReleaseOrDefault(Opt::BootClassPathLocations);
+  DCHECK(boot_class_path_locations_.empty() ||
+         boot_class_path_locations_.size() == boot_class_path_.size());
+  if (boot_class_path_.empty()) {
+    // Try to extract the boot class path from the system boot image.
+    if (image_location_.empty()) {
+      LOG(ERROR) << "Empty boot class path, cannot continue without image.";
+      return false;
+    }
+    std::string system_oat_filename = ImageHeader::GetOatLocationFromImageLocation(
+        GetSystemImageFilename(image_location_.c_str(), instruction_set_));
+    std::string system_oat_location = ImageHeader::GetOatLocationFromImageLocation(image_location_);
+    std::string error_msg;
+    std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
+                                                    system_oat_filename,
+                                                    system_oat_location,
+                                                    /*executable=*/ false,
+                                                    /*low_4gb=*/ false,
+                                                    /*abs_dex_location=*/ nullptr,
+                                                    /*reservation=*/ nullptr,
+                                                    &error_msg));
+    if (oat_file == nullptr) {
+      LOG(ERROR) << "Could not open boot oat file for extracting boot class path: " << error_msg;
+      return false;
+    }
+    const OatHeader& oat_header = oat_file->GetOatHeader();
+    const char* oat_boot_class_path = oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey);
+    if (oat_boot_class_path != nullptr) {
+      Split(oat_boot_class_path, ':', &boot_class_path_);
+    }
+    if (boot_class_path_.empty()) {
+      LOG(ERROR) << "Boot class path missing from boot image oat file " << oat_file->GetLocation();
+      return false;
+    }
+  }
+
   class_path_string_ = runtime_options.ReleaseOrDefault(Opt::ClassPath);
   properties_ = runtime_options.ReleaseOrDefault(Opt::PropertiesList);
 
@@ -1099,7 +1152,6 @@
     }
   }
   image_compiler_options_ = runtime_options.ReleaseOrDefault(Opt::ImageCompilerOptions);
-  image_location_ = runtime_options.GetOrDefault(Opt::Image);
 
   max_spins_before_thin_lock_inflation_ =
       runtime_options.GetOrDefault(Opt::MaxSpinsBeforeThinLockInflation);
@@ -1168,8 +1220,10 @@
                        foreground_heap_growth_multiplier,
                        runtime_options.GetOrDefault(Opt::MemoryMaximumSize),
                        runtime_options.GetOrDefault(Opt::NonMovingSpaceCapacity),
-                       runtime_options.GetOrDefault(Opt::Image),
-                       runtime_options.GetOrDefault(Opt::ImageInstructionSet),
+                       GetBootClassPath(),
+                       GetBootClassPathLocations(),
+                       image_location_,
+                       instruction_set_,
                        // Override the collector type to CC if the read barrier config.
                        kUseReadBarrier ? gc::kCollectorTypeCC : xgc_option.collector_type_,
                        kUseReadBarrier ? BackgroundGcOption(gc::kCollectorTypeCCBackground)
@@ -1369,46 +1423,41 @@
         image_space->VerifyImageAllocations();
       }
     }
-    if (boot_class_path_string_.empty()) {
-      // The bootclasspath is not explicitly specified: construct it from the loaded dex files.
-      const std::vector<const DexFile*>& boot_class_path = GetClassLinker()->GetBootClassPath();
-      std::vector<std::string> dex_locations;
-      dex_locations.reserve(boot_class_path.size());
-      for (const DexFile* dex_file : boot_class_path) {
-        dex_locations.push_back(dex_file->GetLocation());
-      }
-      boot_class_path_string_ = android::base::Join(dex_locations, ':');
-    }
     {
       ScopedTrace trace2("AddImageStringsToTable");
       for (gc::space::ImageSpace* image_space : heap_->GetBootImageSpaces()) {
         GetInternTable()->AddImageStringsToTable(image_space, VoidFunctor());
       }
     }
+    if (heap_->GetBootImageSpaces().size() != GetBootClassPath().size()) {
+      // The boot image did not contain all boot class path components. Load the rest.
+      DCHECK_LT(heap_->GetBootImageSpaces().size(), GetBootClassPath().size());
+      size_t start = heap_->GetBootImageSpaces().size();
+      DCHECK_LT(start, GetBootClassPath().size());
+      std::vector<std::unique_ptr<const DexFile>> extra_boot_class_path;
+      if (runtime_options.Exists(Opt::BootClassPathDexList)) {
+        extra_boot_class_path.swap(*runtime_options.GetOrDefault(Opt::BootClassPathDexList));
+      } else {
+        OpenDexFiles(ArrayRef<const std::string>(GetBootClassPath()).SubArray(start),
+                     ArrayRef<const std::string>(GetBootClassPathLocations()).SubArray(start),
+                     &extra_boot_class_path);
+      }
+      class_linker_->AddExtraBootDexFiles(self, std::move(extra_boot_class_path));
+    }
     if (IsJavaDebuggable()) {
       // Now that we have loaded the boot image, deoptimize its methods if we are running
       // debuggable, as the code may have been compiled non-debuggable.
       DeoptimizeBootImage();
     }
   } else {
-    std::vector<std::string> dex_filenames;
-    Split(boot_class_path_string_, ':', &dex_filenames);
-
-    std::vector<std::string> dex_locations;
-    if (!runtime_options.Exists(Opt::BootClassPathLocations)) {
-      dex_locations = dex_filenames;
-    } else {
-      dex_locations = runtime_options.GetOrDefault(Opt::BootClassPathLocations);
-      CHECK_EQ(dex_filenames.size(), dex_locations.size());
-    }
-
     std::vector<std::unique_ptr<const DexFile>> boot_class_path;
     if (runtime_options.Exists(Opt::BootClassPathDexList)) {
       boot_class_path.swap(*runtime_options.GetOrDefault(Opt::BootClassPathDexList));
     } else {
-      OpenDexFiles(dex_filenames, dex_locations, &boot_class_path);
+      OpenDexFiles(ArrayRef<const std::string>(GetBootClassPath()),
+                   ArrayRef<const std::string>(GetBootClassPathLocations()),
+                   &boot_class_path);
     }
-    instruction_set_ = runtime_options.GetOrDefault(Opt::ImageInstructionSet);
     if (!class_linker_->InitWithoutImage(std::move(boot_class_path), &error_msg)) {
       LOG(ERROR) << "Could not initialize without image: " << error_msg;
       return false;
@@ -2347,8 +2396,27 @@
 }
 
 void Runtime::SetFaultMessage(const std::string& message) {
-  MutexLock mu(Thread::Current(), fault_message_lock_);
-  fault_message_ = message;
+  std::string* new_msg = new std::string(message);
+  std::string* cur_msg = fault_message_.exchange(new_msg);
+  delete cur_msg;
+}
+
+std::string Runtime::GetFaultMessage() {
+  // Retrieve the message. Temporarily replace with null so that SetFaultMessage will not delete
+  // the string in parallel.
+  std::string* cur_msg = fault_message_.exchange(nullptr);
+
+  // Make a copy of the string.
+  std::string ret = cur_msg == nullptr ? "" : *cur_msg;
+
+  // Put the message back if it hasn't been updated.
+  std::string* null_str = nullptr;
+  if (!fault_message_.compare_exchange_strong(null_str, cur_msg)) {
+    // Already replaced.
+    delete cur_msg;
+  }
+
+  return ret;
 }
 
 void Runtime::AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* argv)
@@ -2410,6 +2478,8 @@
     LOG(WARNING) << "Failed to allocate JIT";
     // Release JIT code cache resources (several MB of memory).
     jit_code_cache_.reset();
+  } else {
+    jit->CreateThreadPool();
   }
 }
 
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 0ccc7b7..76cfcd1 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -28,9 +28,9 @@
 #include <vector>
 
 #include "arch/instruction_set.h"
+#include "base/locks.h"
 #include "base/macros.h"
 #include "base/mem_map.h"
-#include "base/mutex.h"
 #include "deoptimization_kind.h"
 #include "dex/dex_file_types.h"
 #include "experimental_flags.h"
@@ -99,6 +99,7 @@
 class StackOverflowHandler;
 class SuspensionHandler;
 class ThreadList;
+class ThreadPool;
 class Trace;
 struct TraceConfig;
 class Transaction;
@@ -242,8 +243,14 @@
 
   ~Runtime();
 
-  const std::string& GetBootClassPathString() const {
-    return boot_class_path_string_;
+  const std::vector<std::string>& GetBootClassPath() const {
+    return boot_class_path_;
+  }
+
+  const std::vector<std::string>& GetBootClassPathLocations() const {
+    DCHECK(boot_class_path_locations_.empty() ||
+           boot_class_path_locations_.size() == boot_class_path_.size());
+    return boot_class_path_locations_.empty() ? boot_class_path_ : boot_class_path_locations_;
   }
 
   const std::string& GetClassPathString() const {
@@ -444,6 +451,7 @@
   bool UseJitCompilation() const;
 
   void PreZygoteFork();
+  void PostZygoteFork();
   void InitNonZygoteOrPostFork(
       JNIEnv* env,
       bool is_system_server,
@@ -510,12 +518,7 @@
   void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void SetFaultMessage(const std::string& message) REQUIRES(!fault_message_lock_);
-  // Only read by the signal handler, NO_THREAD_SAFETY_ANALYSIS to prevent lock order violations
-  // with the unexpected_signal_lock_.
-  const std::string& GetFaultMessage() NO_THREAD_SAFETY_ANALYSIS {
-    return fault_message_;
-  }
+  void SetFaultMessage(const std::string& message);
 
   void AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* arg_vector) const;
 
@@ -819,6 +822,12 @@
   void VisitConstantRoots(RootVisitor* visitor)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Note: To be lock-free, GetFaultMessage temporarily replaces the lock message with null.
+  //       As such, there is a window where a call will return an empty string. In general,
+  //       only aborting code should retrieve this data (via GetFaultMessageForAbortLogging
+  //       friend).
+  std::string GetFaultMessage();
+
   // A pointer to the active runtime or null.
   static Runtime* instance_;
 
@@ -859,7 +868,8 @@
   std::vector<std::string> image_compiler_options_;
   std::string image_location_;
 
-  std::string boot_class_path_string_;
+  std::vector<std::string> boot_class_path_;
+  std::vector<std::string> boot_class_path_locations_;
   std::string class_path_string_;
   std::vector<std::string> properties_;
 
@@ -901,9 +911,9 @@
   std::unique_ptr<jit::JitCodeCache> jit_code_cache_;
   std::unique_ptr<jit::JitOptions> jit_options_;
 
-  // Fault message, printed when we get a SIGSEGV.
-  Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-  std::string fault_message_ GUARDED_BY(fault_message_lock_);
+  // Fault message, printed when we get a SIGSEGV. Stored as a native-heap object and accessed
+  // lock-free, so needs to be atomic.
+  std::atomic<std::string*> fault_message_;
 
   // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by
   // the shutdown lock so that threads aren't born while we're shutting down.
@@ -1101,11 +1111,11 @@
 
   MemMap protected_fault_page_;
 
-  uint64_t process_cpu_start_time_;
-  uint64_t process_cpu_end_time_;
-
   uint32_t verifier_logging_threshold_ms_;
 
+  // Note: See comments on GetFaultMessage.
+  friend std::string GetFaultMessageForAbortLogging();
+
   DISALLOW_COPY_AND_ASSIGN(Runtime);
 };
 
diff --git a/runtime/runtime_callbacks.h b/runtime/runtime_callbacks.h
index 4cce15e..32ee3aa3 100644
--- a/runtime/runtime_callbacks.h
+++ b/runtime/runtime_callbacks.h
@@ -20,8 +20,8 @@
 #include <vector>
 
 #include "base/array_ref.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "dex/dex_file.h"
 #include "handle.h"
 
diff --git a/runtime/runtime_common.cc b/runtime/runtime_common.cc
index eae2505..5676577 100644
--- a/runtime/runtime_common.cc
+++ b/runtime/runtime_common.cc
@@ -371,6 +371,11 @@
 #pragma GCC diagnostic ignored "-Wframe-larger-than="
 #endif
 
+std::string GetFaultMessageForAbortLogging() {
+  Runtime* runtime = Runtime::Current();
+  return  (runtime != nullptr) ? runtime->GetFaultMessage() : "";
+}
+
 static void HandleUnexpectedSignalCommonDump(int signal_number,
                                              siginfo_t* info,
                                              void* raw_context,
@@ -427,9 +432,9 @@
     }
 
     if (dump_on_stderr) {
-      std::cerr << "Fault message: " << runtime->GetFaultMessage() << std::endl;
+      std::cerr << "Fault message: " << GetFaultMessageForAbortLogging() << std::endl;
     } else {
-      LOG(FATAL_WITHOUT_ABORT) << "Fault message: " << runtime->GetFaultMessage();
+      LOG(FATAL_WITHOUT_ABORT) << "Fault message: " << GetFaultMessageForAbortLogging();
     }
   }
 }
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 5cec309..2b2919e 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -37,7 +37,7 @@
 RUNTIME_OPTIONS_KEY (Unit,                Zygote)
 RUNTIME_OPTIONS_KEY (Unit,                Help)
 RUNTIME_OPTIONS_KEY (Unit,                ShowVersion)
-RUNTIME_OPTIONS_KEY (std::string,         BootClassPath)
+RUNTIME_OPTIONS_KEY (ParseStringList<':'>,BootClassPath)           // std::vector<std::string>
 RUNTIME_OPTIONS_KEY (ParseStringList<':'>,BootClassPathLocations)  // std::vector<std::string>
 RUNTIME_OPTIONS_KEY (std::string,         ClassPath)
 RUNTIME_OPTIONS_KEY (std::string,         Image)
diff --git a/runtime/scoped_thread_state_change-inl.h b/runtime/scoped_thread_state_change-inl.h
index 3089c24..2541ab5 100644
--- a/runtime/scoped_thread_state_change-inl.h
+++ b/runtime/scoped_thread_state_change-inl.h
@@ -22,6 +22,7 @@
 #include <android-base/logging.h>
 
 #include "base/casts.h"
+#include "base/mutex.h"
 #include "jni/jni_env_ext-inl.h"
 #include "obj_ptr-inl.h"
 #include "runtime.h"
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index 0c42c5a..b2ad90a 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -19,8 +19,8 @@
 
 #include "jni.h"
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "base/value_object.h"
 #include "thread_state.h"
 
diff --git a/runtime/stack.h b/runtime/stack.h
index 9d30115..0edf4f5 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -20,8 +20,8 @@
 #include <stdint.h>
 #include <string>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "quick/quick_method_frame_info.h"
 #include "stack_map.h"
 
diff --git a/runtime/subtype_check.h b/runtime/subtype_check.h
index 106c7f1..493ea85 100644
--- a/runtime/subtype_check.h
+++ b/runtime/subtype_check.h
@@ -20,7 +20,7 @@
 #include "subtype_check_bits_and_status.h"
 #include "subtype_check_info.h"
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "mirror/class.h"
 #include "runtime.h"
 
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 6c41ae4..8bec2d9 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -39,6 +39,7 @@
 #include <sstream>
 
 #include "android-base/stringprintf.h"
+#include "android-base/strings.h"
 
 #include "arch/context-inl.h"
 #include "arch/context.h"
@@ -1875,8 +1876,9 @@
 
   // Grab the scheduler stats for this thread.
   std::string scheduler_stats;
-  if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)) {
-    scheduler_stats.resize(scheduler_stats.size() - 1);  // Lose the trailing '\n'.
+  if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)
+      && !scheduler_stats.empty()) {
+    scheduler_stats = android::base::Trim(scheduler_stats);  // Lose the trailing '\n'.
   } else {
     scheduler_stats = "0 0 0";
   }
diff --git a/runtime/thread.h b/runtime/thread.h
index ccde236..6db1943 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -30,8 +30,8 @@
 #include "base/atomic.h"
 #include "base/enums.h"
 #include "base/globals.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "base/safe_map.h"
 #include "entrypoints/jni/jni_entrypoints.h"
 #include "entrypoints/quick/quick_entrypoints.h"
@@ -563,11 +563,11 @@
   bool Interrupted();
   // Implements java.lang.Thread.isInterrupted.
   bool IsInterrupted();
-  void Interrupt(Thread* self) REQUIRES(!*wait_mutex_);
+  void Interrupt(Thread* self) REQUIRES(!wait_mutex_);
   void SetInterrupted(bool i) {
     tls32_.interrupted.store(i, std::memory_order_seq_cst);
   }
-  void Notify() REQUIRES(!*wait_mutex_);
+  void Notify() REQUIRES(!wait_mutex_);
 
   ALWAYS_INLINE void PoisonObjectPointers() {
     ++poison_object_cookie_;
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index 8723c99..de698c2 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -123,7 +123,10 @@
   tasks_.clear();
 }
 
-ThreadPool::ThreadPool(const char* name, size_t num_threads, bool create_peers)
+ThreadPool::ThreadPool(const char* name,
+                       size_t num_threads,
+                       bool create_peers,
+                       size_t worker_stack_size)
   : name_(name),
     task_queue_lock_("task queue lock"),
     task_queue_condition_("task queue condition", task_queue_lock_),
@@ -133,28 +136,33 @@
     waiting_count_(0),
     start_time_(0),
     total_wait_time_(0),
-    // Add one since the caller of constructor waits on the barrier too.
-    creation_barier_(num_threads + 1),
+    creation_barier_(0),
     max_active_workers_(num_threads),
-    create_peers_(create_peers) {
+    create_peers_(create_peers),
+    worker_stack_size_(worker_stack_size) {
+  CreateThreads();
+}
+
+void ThreadPool::CreateThreads() {
+  CHECK(threads_.empty());
   Thread* self = Thread::Current();
-  while (GetThreadCount() < num_threads) {
-    const std::string worker_name = StringPrintf("%s worker thread %zu", name_.c_str(),
-                                                 GetThreadCount());
-    threads_.push_back(
-        new ThreadPoolWorker(this, worker_name, ThreadPoolWorker::kDefaultStackSize));
+  {
+    MutexLock mu(self, task_queue_lock_);
+    shutting_down_ = false;
+    // Add one since the caller of constructor waits on the barrier too.
+    creation_barier_.Init(self, max_active_workers_ + 1);
+    while (GetThreadCount() < max_active_workers_) {
+      const std::string worker_name = StringPrintf("%s worker thread %zu", name_.c_str(),
+                                                   GetThreadCount());
+      threads_.push_back(
+          new ThreadPoolWorker(this, worker_name, worker_stack_size_));
+    }
   }
   // Wait for all of the threads to attach.
-  creation_barier_.Wait(self);
+  creation_barier_.Wait(Thread::Current());
 }
 
-void ThreadPool::SetMaxActiveWorkers(size_t threads) {
-  MutexLock mu(Thread::Current(), task_queue_lock_);
-  CHECK_LE(threads, GetThreadCount());
-  max_active_workers_ = threads;
-}
-
-ThreadPool::~ThreadPool() {
+void ThreadPool::DeleteThreads() {
   {
     Thread* self = Thread::Current();
     MutexLock mu(self, task_queue_lock_);
@@ -164,10 +172,22 @@
     task_queue_condition_.Broadcast(self);
     completion_condition_.Broadcast(self);
   }
-  // Wait for the threads to finish.
+  // Wait for the threads to finish. We expect the user of the pool
+  // not to run multi-threaded calls to `CreateThreads` and `DeleteThreads`,
+  // so we don't guard the field here.
   STLDeleteElements(&threads_);
 }
 
+void ThreadPool::SetMaxActiveWorkers(size_t max_workers) {
+  MutexLock mu(Thread::Current(), task_queue_lock_);
+  CHECK_LE(max_workers, GetThreadCount());
+  max_active_workers_ = max_workers;
+}
+
+ThreadPool::~ThreadPool() {
+  DeleteThreads();
+}
+
 void ThreadPool::StartWorkers(Thread* self) {
   MutexLock mu(self, task_queue_lock_);
   started_ = true;
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index 98a1193..f55d72e 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -18,6 +18,7 @@
 #define ART_RUNTIME_THREAD_POOL_H_
 
 #include <deque>
+#include <functional>
 #include <vector>
 
 #include "barrier.h"
@@ -48,6 +49,18 @@
   }
 };
 
+class FunctionTask : public SelfDeletingTask {
+ public:
+  explicit FunctionTask(std::function<void(Thread*)>&& func) : func_(std::move(func)) {}
+
+  void Run(Thread* self) override {
+    func_(self);
+  }
+
+ private:
+  std::function<void(Thread*)> func_;
+};
+
 class ThreadPoolWorker {
  public:
   static const size_t kDefaultStackSize = 1 * MB;
@@ -110,9 +123,18 @@
   // If create_peers is true, all worker threads will have a Java peer object. Note that if the
   // pool is asked to do work on the current thread (see Wait), a peer may not be available. Wait
   // will conservatively abort if create_peers and do_work are true.
-  ThreadPool(const char* name, size_t num_threads, bool create_peers = false);
+  ThreadPool(const char* name,
+             size_t num_threads,
+             bool create_peers = false,
+             size_t worker_stack_size = ThreadPoolWorker::kDefaultStackSize);
   virtual ~ThreadPool();
 
+  // Create the threads of this pool.
+  void CreateThreads();
+
+  // Stops and deletes all threads in this pool.
+  void DeleteThreads();
+
   // Wait for all tasks currently on queue to get completed. If the pool has been stopped, only
   // wait till all already running tasks are done.
   // When the pool was created with peers for workers, do_work must not be true (see ThreadPool()).
@@ -158,7 +180,6 @@
   // How many worker threads are waiting on the condition.
   volatile size_t waiting_count_ GUARDED_BY(task_queue_lock_);
   std::deque<Task*> tasks_ GUARDED_BY(task_queue_lock_);
-  // TODO: make this immutable/const?
   std::vector<ThreadPoolWorker*> threads_;
   // Work balance detection.
   uint64_t start_time_ GUARDED_BY(task_queue_lock_);
@@ -166,6 +187,7 @@
   Barrier creation_barier_;
   size_t max_active_workers_ GUARDED_BY(task_queue_lock_);
   const bool create_peers_;
+  const size_t worker_stack_size_;
 
  private:
   friend class ThreadPoolWorker;
diff --git a/runtime/trace.h b/runtime/trace.h
index 926a34f..1089962 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -28,6 +28,7 @@
 
 #include "base/atomic.h"
 #include "base/globals.h"
+#include "base/locks.h"
 #include "base/macros.h"
 #include "base/os.h"
 #include "base/safe_map.h"
@@ -42,6 +43,7 @@
 class ArtField;
 class ArtMethod;
 class DexFile;
+class LOCKABLE Mutex;
 class ShadowFrame;
 class Thread;
 
@@ -173,57 +175,57 @@
   uint32_t GetClockOverheadNanoSeconds();
 
   void CompareAndUpdateStackTrace(Thread* thread, std::vector<ArtMethod*>* stack_trace)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_);
 
   // InstrumentationListener implementation.
   void MethodEntered(Thread* thread,
                      Handle<mirror::Object> this_object,
                      ArtMethod* method,
                      uint32_t dex_pc)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_)
       override;
   void MethodExited(Thread* thread,
                     Handle<mirror::Object> this_object,
                     ArtMethod* method,
                     uint32_t dex_pc,
                     const JValue& return_value)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_)
       override;
   void MethodUnwind(Thread* thread,
                     Handle<mirror::Object> this_object,
                     ArtMethod* method,
                     uint32_t dex_pc)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_)
       override;
   void DexPcMoved(Thread* thread,
                   Handle<mirror::Object> this_object,
                   ArtMethod* method,
                   uint32_t new_dex_pc)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_)
       override;
   void FieldRead(Thread* thread,
                  Handle<mirror::Object> this_object,
                  ArtMethod* method,
                  uint32_t dex_pc,
                  ArtField* field)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_) override;
   void FieldWritten(Thread* thread,
                     Handle<mirror::Object> this_object,
                     ArtMethod* method,
                     uint32_t dex_pc,
                     ArtField* field,
                     const JValue& field_value)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_) override;
   void ExceptionThrown(Thread* thread,
                        Handle<mirror::Throwable> exception_object)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_) override;
   void ExceptionHandled(Thread* thread, Handle<mirror::Throwable> exception_object)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_) override;
   void Branch(Thread* thread,
               ArtMethod* method,
               uint32_t dex_pc,
               int32_t dex_pc_offset)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_) override;
   void WatchedFramePop(Thread* thread, const ShadowFrame& frame)
       REQUIRES_SHARED(Locks::mutator_lock_) override;
   // Reuse an old stack trace if it exists, otherwise allocate a new one.
@@ -258,20 +260,20 @@
       // how to annotate this.
       NO_THREAD_SAFETY_ANALYSIS;
   void FinishTracing()
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_);
 
   void ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff);
 
   void LogMethodTraceEvent(Thread* thread, ArtMethod* method,
                            instrumentation::Instrumentation::InstrumentationEvent event,
                            uint32_t thread_clock_diff, uint32_t wall_clock_diff)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_);
 
   // Methods to output traced methods and threads.
   void GetVisitedMethods(size_t end_offset, std::set<ArtMethod*>* visited_methods)
-      REQUIRES(!*unique_methods_lock_);
+      REQUIRES(!unique_methods_lock_);
   void DumpMethodList(std::ostream& os, const std::set<ArtMethod*>& visited_methods)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_);
   void DumpThreadList(std::ostream& os) REQUIRES(!Locks::thread_list_lock_);
 
   // Methods to register seen entitites in streaming mode. The methods return true if the entity
@@ -289,15 +291,15 @@
   void FlushBuf()
       REQUIRES(streaming_lock_);
 
-  uint32_t EncodeTraceMethod(ArtMethod* method) REQUIRES(!*unique_methods_lock_);
+  uint32_t EncodeTraceMethod(ArtMethod* method) REQUIRES(!unique_methods_lock_);
   uint32_t EncodeTraceMethodAndAction(ArtMethod* method, TraceAction action)
-      REQUIRES(!*unique_methods_lock_);
-  ArtMethod* DecodeTraceMethod(uint32_t tmid) REQUIRES(!*unique_methods_lock_);
-  std::string GetMethodLine(ArtMethod* method) REQUIRES(!*unique_methods_lock_)
+      REQUIRES(!unique_methods_lock_);
+  ArtMethod* DecodeTraceMethod(uint32_t tmid) REQUIRES(!unique_methods_lock_);
+  std::string GetMethodLine(ArtMethod* method) REQUIRES(!unique_methods_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_);
 
   // Singleton instance of the Trace or null when no method tracing is active.
   static Trace* volatile the_trace_ GUARDED_BY(Locks::trace_lock_);
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 3099b23..3369784 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -24,8 +24,8 @@
 
 #include "base/arena_object.h"
 #include "base/bit_vector.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "base/stringpiece.h"
 #include "dex/primitive.h"
 #include "gc_root.h"
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 9bb60bb..de66bf5 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -23,7 +23,7 @@
 
 #include <android-base/logging.h>
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "base/safe_map.h"
 #include "base/scoped_arena_containers.h"
 
diff --git a/runtime/verifier/verifier_deps.cc b/runtime/verifier/verifier_deps.cc
index b666c15..d346a95 100644
--- a/runtime/verifier/verifier_deps.cc
+++ b/runtime/verifier/verifier_deps.cc
@@ -22,6 +22,7 @@
 #include "art_method-inl.h"
 #include "base/indenter.h"
 #include "base/leb128.h"
+#include "base/mutex-inl.h"
 #include "base/stl_util.h"
 #include "compiler_callbacks.h"
 #include "dex/dex_file-inl.h"
diff --git a/runtime/verifier/verifier_deps.h b/runtime/verifier/verifier_deps.h
index 0146b17..dfd4a5c 100644
--- a/runtime/verifier/verifier_deps.h
+++ b/runtime/verifier/verifier_deps.h
@@ -22,7 +22,7 @@
 #include <vector>
 
 #include "base/array_ref.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "dex/dex_file_types.h"
 #include "handle.h"
 #include "obj_ptr.h"
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index cde885c..a19cc92 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -48,6 +48,7 @@
 jclass WellKnownClasses::dalvik_system_DexPathList;
 jclass WellKnownClasses::dalvik_system_DexPathList__Element;
 jclass WellKnownClasses::dalvik_system_EmulatedStackFrame;
+jclass WellKnownClasses::dalvik_system_InMemoryDexClassLoader;
 jclass WellKnownClasses::dalvik_system_PathClassLoader;
 jclass WellKnownClasses::dalvik_system_VMRuntime;
 jclass WellKnownClasses::java_lang_annotation_Annotation__array;
@@ -306,6 +307,7 @@
   dalvik_system_DexPathList = CacheClass(env, "dalvik/system/DexPathList");
   dalvik_system_DexPathList__Element = CacheClass(env, "dalvik/system/DexPathList$Element");
   dalvik_system_EmulatedStackFrame = CacheClass(env, "dalvik/system/EmulatedStackFrame");
+  dalvik_system_InMemoryDexClassLoader = CacheClass(env, "dalvik/system/InMemoryDexClassLoader");
   dalvik_system_PathClassLoader = CacheClass(env, "dalvik/system/PathClassLoader");
   dalvik_system_VMRuntime = CacheClass(env, "dalvik/system/VMRuntime");
 
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index 0b7ed09..f0e98a8 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_WELL_KNOWN_CLASSES_H_
 #define ART_RUNTIME_WELL_KNOWN_CLASSES_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "jni.h"
 #include "obj_ptr.h"
 
@@ -57,6 +57,7 @@
   static jclass dalvik_system_DexPathList;
   static jclass dalvik_system_DexPathList__Element;
   static jclass dalvik_system_EmulatedStackFrame;
+  static jclass dalvik_system_InMemoryDexClassLoader;
   static jclass dalvik_system_PathClassLoader;
   static jclass dalvik_system_VMRuntime;
   static jclass java_lang_annotation_Annotation__array;
diff --git a/sigchainlib/Android.bp b/sigchainlib/Android.bp
index a151d7a..1d5ec2b 100644
--- a/sigchainlib/Android.bp
+++ b/sigchainlib/Android.bp
@@ -16,7 +16,6 @@
 
 cc_library {
     name: "libsigchain",
-    cpp_std: "gnu++17",
 
     host_supported: true,
     defaults: ["art_defaults"],
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
index cbc3ff8..08ee690 100644
--- a/sigchainlib/sigchain.cc
+++ b/sigchainlib/sigchain.cc
@@ -100,7 +100,7 @@
 
 template<typename T>
 static void lookup_next_symbol(T* output, T wrapper, const char* name) {
-  void* sym = dlsym(RTLD_NEXT, name);
+  void* sym = dlsym(RTLD_NEXT, name);  // NOLINT glibc triggers cert-dcl16-c with RTLD_NEXT.
   if (sym == nullptr) {
     sym = dlsym(RTLD_DEFAULT, name);
     if (sym == wrapper || sym == sigaction) {
diff --git a/test/175-alloc-big-bignums/expected.txt b/test/175-alloc-big-bignums/expected.txt
new file mode 100644
index 0000000..f75da10
--- /dev/null
+++ b/test/175-alloc-big-bignums/expected.txt
@@ -0,0 +1 @@
+Test complete
diff --git a/test/175-alloc-big-bignums/info.txt b/test/175-alloc-big-bignums/info.txt
new file mode 100644
index 0000000..8f6bcc3
--- /dev/null
+++ b/test/175-alloc-big-bignums/info.txt
@@ -0,0 +1,11 @@
+Allocate large numbers of huge BigIntegers in rapid succession. Most of the
+associated memory will be in the C++ heap. This makes sure that we trigger
+the garbage collector often enough to prevent us from running out of memory.
+
+The test allocates roughly 10GB of native memory, approximately 1MB of which
+will be live at any point. Basically all native memory deallocation is
+triggered by Java garbage collection.
+
+This test is a lot nastier than it looks. In particular, failure on target tends
+to exhaust device memory, and kill off all processes on the device, including the
+adb daemon :-( .
diff --git a/test/175-alloc-big-bignums/src/Main.java b/test/175-alloc-big-bignums/src/Main.java
new file mode 100644
index 0000000..5fbeb46
--- /dev/null
+++ b/test/175-alloc-big-bignums/src/Main.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.math.BigInteger;
+
+// This is motivated by the assumption that BigInteger allocates malloc memory
+// underneath. That's true (in 2018) on Android.
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    final int nIters = 20_000;  // Presumed < 1_000_000.
+    final BigInteger big2_20 = BigInteger.valueOf(1024*1024); // 2^20
+    BigInteger huge = BigInteger.valueOf(1).shiftLeft(4_000_000);  // ~0.5MB
+    for (int i = 0; i < nIters; ++i) {  // 10 GB total
+      huge = huge.add(BigInteger.ONE);
+    }
+    if (huge.bitLength() != 4_000_001) {
+      System.out.println("Wrong answer length: " + huge.bitLength());
+    } else if (huge.mod(big2_20).compareTo(BigInteger.valueOf(nIters)) != 0) {
+      System.out.println("Wrong answer: ..." + huge.mod(big2_20));
+    } else {
+      System.out.println("Test complete");
+    }
+  }
+}
diff --git a/test/1934-jvmti-signal-thread/signal_threads.cc b/test/1934-jvmti-signal-thread/signal_threads.cc
index 726a7a86..dfb08c1 100644
--- a/test/1934-jvmti-signal-thread/signal_threads.cc
+++ b/test/1934-jvmti-signal-thread/signal_threads.cc
@@ -47,19 +47,19 @@
                             jvmti_env,
                             jvmti_env->Allocate(sizeof(NativeMonitor),
                                                 reinterpret_cast<unsigned char**>(&mon)))) {
-    return -1l;
+    return -1L;
   }
   if (JvmtiErrorToException(env,
                             jvmti_env,
                             jvmti_env->CreateRawMonitor("test-1934 start",
                                                         &mon->start_monitor))) {
-    return -1l;
+    return -1L;
   }
   if (JvmtiErrorToException(env,
                             jvmti_env,
                             jvmti_env->CreateRawMonitor("test-1934 continue",
                                                         &mon->continue_monitor))) {
-    return -1l;
+    return -1L;
   }
   mon->should_continue = false;
   mon->should_start = false;
@@ -92,7 +92,7 @@
   while (!mon->should_continue) {
     if (JvmtiErrorToException(env,
                               jvmti_env,
-                              jvmti_env->RawMonitorWait(mon->continue_monitor, -1l))) {
+                              jvmti_env->RawMonitorWait(mon->continue_monitor, -1L))) {
       JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorExit(mon->continue_monitor));
       return;
     }
@@ -112,7 +112,7 @@
   while (!mon->should_start) {
     if (JvmtiErrorToException(env,
                               jvmti_env,
-                              jvmti_env->RawMonitorWait(mon->start_monitor, -1l))) {
+                              jvmti_env->RawMonitorWait(mon->start_monitor, -1L))) {
       return;
     }
   }
diff --git a/test/1959-redefine-object-instrument/expected.txt b/test/1959-redefine-object-instrument/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/1959-redefine-object-instrument/expected.txt
diff --git a/test/1959-redefine-object-instrument/fake_redef_object.cc b/test/1959-redefine-object-instrument/fake_redef_object.cc
new file mode 100644
index 0000000..b1201ab
--- /dev/null
+++ b/test/1959-redefine-object-instrument/fake_redef_object.cc
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <limits>
+#include <memory>
+
+#include "jni.h"
+#include "jvmti.h"
+
+// Test infrastructure
+#include "jvmti_helper.h"
+#include "test_env.h"
+
+// Slicer's headers have code that triggers these warnings. b/65298177
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wsign-compare"
+#pragma clang diagnostic ignored "-Wunused-parameter"
+#include "slicer/instrumentation.h"
+#include "slicer/reader.h"
+#include "slicer/writer.h"
+#pragma clang diagnostic pop
+
+namespace art {
+namespace Test1959RedefineObjectInstrument {
+
+// Just pull it out of the dex file but don't bother changing anything.
+static void JNICALL RedefineObjectHook(jvmtiEnv *jvmti_env,
+                                       JNIEnv* env,
+                                       jclass class_being_redefined ATTRIBUTE_UNUSED,
+                                       jobject loader ATTRIBUTE_UNUSED,
+                                       const char* name,
+                                       jobject protection_domain ATTRIBUTE_UNUSED,
+                                       jint class_data_len,
+                                       const unsigned char* class_data,
+                                       jint* new_class_data_len,
+                                       unsigned char** new_class_data) {
+  if (strcmp(name, "java/lang/Object") != 0) {
+    return;
+  }
+
+  dex::Reader reader(class_data, class_data_len);
+  dex::u4 class_index = reader.FindClassIndex("Ljava/lang/Object;");
+  if (class_index == dex::kNoIndex) {
+    env->ThrowNew(env->FindClass("java/lang/RuntimeException"),
+                  "Failed to find object in dex file!");
+    return;
+  }
+
+  reader.CreateClassIr(class_index);
+  auto dex_ir = reader.GetIr();
+  dex::Writer writer(dex_ir);
+
+  class JvmtiAllocator : public dex::Writer::Allocator {
+   public:
+    explicit JvmtiAllocator(jvmtiEnv* jvmti) : jvmti_(jvmti) {}
+
+    void* Allocate(size_t size) override {
+      unsigned char* res = nullptr;
+      jvmti_->Allocate(size, &res);
+      return res;
+    }
+
+    void Free(void* ptr) override {
+      jvmti_->Deallocate(reinterpret_cast<unsigned char*>(ptr));
+    }
+
+   private:
+    jvmtiEnv* jvmti_;
+  };
+  JvmtiAllocator allocator(jvmti_env);
+  size_t new_size;
+  *new_class_data = writer.CreateImage(&allocator, &new_size);
+  if (new_size > std::numeric_limits<jint>::max()) {
+    *new_class_data = nullptr;
+    env->ThrowNew(env->FindClass("java/lang/RuntimeException"),
+                  "transform result is too large!");
+    return;
+  }
+  *new_class_data_len = static_cast<jint>(new_size);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_forceRedefine(JNIEnv* env,
+                                                          jclass klass ATTRIBUTE_UNUSED,
+                                                          jclass obj_class,
+                                                          jthread thr) {
+  if (IsJVM()) {
+    // RI so don't do anything.
+    return;
+  }
+  jvmtiCapabilities caps {.can_retransform_classes = 1};
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->AddCapabilities(&caps))) {
+    return;
+  }
+  jvmtiEventCallbacks cb {.ClassFileLoadHook = RedefineObjectHook };
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->SetEventCallbacks(&cb, sizeof(cb)))) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+                                                                JVMTI_EVENT_CLASS_FILE_LOAD_HOOK,
+                                                                thr))) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->RetransformClasses(1, &obj_class))) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+                                                                JVMTI_EVENT_CLASS_FILE_LOAD_HOOK,
+                                                                thr))) {
+    return;
+  }
+}
+
+}  // namespace Test1959RedefineObjectInstrument
+}  // namespace art
+
diff --git a/test/1959-redefine-object-instrument/info.txt b/test/1959-redefine-object-instrument/info.txt
new file mode 100644
index 0000000..d15c0e0
--- /dev/null
+++ b/test/1959-redefine-object-instrument/info.txt
@@ -0,0 +1,9 @@
+Regression test for bug related to interaction between instrumentation
+installation and class redefinition.
+
+Redefining a class does not update the instrumentation stack of a thread.
+This is generally fine because the method pointer in the instrumentation
+stack is only used for some sanity-checks, logging and method-exit events
+(where it being the non-obsolete version is advantageous). Unfortunately some
+of the checks fail to account for obsolete methods and can fail sanity
+checks.
\ No newline at end of file
diff --git a/test/1959-redefine-object-instrument/run b/test/1959-redefine-object-instrument/run
new file mode 100755
index 0000000..c6e62ae
--- /dev/null
+++ b/test/1959-redefine-object-instrument/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/1959-redefine-object-instrument/src/Main.java b/test/1959-redefine-object-instrument/src/Main.java
new file mode 100644
index 0000000..b3201f6
--- /dev/null
+++ b/test/1959-redefine-object-instrument/src/Main.java
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Base64;
+import java.util.LinkedList;
+import java.lang.reflect.Executable;
+
+import art.*;
+
+public class Main {
+  /**
+   * NB This test cannot be run on the RI.
+   * TODO We should make this run on the RI.
+   */
+
+  public static void main(String[] args) throws Exception {
+    doTest();
+  }
+
+  public static volatile boolean started = false;
+
+  public static void doNothing() {}
+  public static void notifyBreakpointReached(Thread thr, Executable e, long l) {}
+
+  public static void doTest() throws Exception {
+    final Object lock = new Object();
+    Breakpoint.Manager man = new Breakpoint.Manager();
+    Breakpoint.startBreakpointWatch(
+        Main.class,
+        Main.class.getDeclaredMethod("notifyBreakpointReached", Thread.class, Executable.class, Long.TYPE),
+        null);
+    Thread thr = new Thread(() -> {
+      synchronized (lock) {
+        started = true;
+        // Wait basically forever.
+        try {
+          lock.wait(Integer.MAX_VALUE - 1);
+        } catch (Exception e) {
+          throw new Error("WAIT EXCEPTION", e);
+        }
+      }
+    });
+    // set the breakpoint.
+    man.setBreakpoint(Main.class.getDeclaredMethod("doNothing"), 0l);
+    thr.start();
+    while (!started || thr.getState() != Thread.State.TIMED_WAITING);
+    // Redefine while thread is paused.
+    forceRedefine(Object.class, Thread.currentThread());
+    // Clear breakpoints.
+    man.clearAllBreakpoints();
+    // set the breakpoint again.
+    man.setBreakpoint(Main.class.getDeclaredMethod("doNothing"), 0l);
+    // Wakeup
+    synchronized(lock) {
+      lock.notifyAll();
+    }
+    thr.join();
+  }
+
+  private static native void forceRedefine(Class c, Thread thr);
+}
diff --git a/test/1959-redefine-object-instrument/src/art/Breakpoint.java b/test/1959-redefine-object-instrument/src/art/Breakpoint.java
new file mode 100644
index 0000000..bbb89f7
--- /dev/null
+++ b/test/1959-redefine-object-instrument/src/art/Breakpoint.java
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Executable;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.Objects;
+
+public class Breakpoint {
+  public static class Manager {
+    public static class BP {
+      public final Executable method;
+      public final long location;
+
+      public BP(Executable method) {
+        this(method, getStartLocation(method));
+      }
+
+      public BP(Executable method, long location) {
+        this.method = method;
+        this.location = location;
+      }
+
+      @Override
+      public boolean equals(Object other) {
+        return (other instanceof BP) &&
+            method.equals(((BP)other).method) &&
+            location == ((BP)other).location;
+      }
+
+      @Override
+      public String toString() {
+        return method.toString() + " @ " + getLine();
+      }
+
+      @Override
+      public int hashCode() {
+        return Objects.hash(method, location);
+      }
+
+      public int getLine() {
+        try {
+          LineNumber[] lines = getLineNumberTable(method);
+          int best = -1;
+          for (LineNumber l : lines) {
+            if (l.location > location) {
+              break;
+            } else {
+              best = l.line;
+            }
+          }
+          return best;
+        } catch (Exception e) {
+          return -1;
+        }
+      }
+    }
+
+    private Set<BP> breaks = new HashSet<>();
+
+    public void setBreakpoints(BP... bs) {
+      for (BP b : bs) {
+        if (breaks.add(b)) {
+          Breakpoint.setBreakpoint(b.method, b.location);
+        }
+      }
+    }
+    public void setBreakpoint(Executable method, long location) {
+      setBreakpoints(new BP(method, location));
+    }
+
+    public void clearBreakpoints(BP... bs) {
+      for (BP b : bs) {
+        if (breaks.remove(b)) {
+          Breakpoint.clearBreakpoint(b.method, b.location);
+        }
+      }
+    }
+    public void clearBreakpoint(Executable method, long location) {
+      clearBreakpoints(new BP(method, location));
+    }
+
+    public void clearAllBreakpoints() {
+      clearBreakpoints(breaks.toArray(new BP[0]));
+    }
+  }
+
+  public static void startBreakpointWatch(Class<?> methodClass,
+                                          Executable breakpointReached,
+                                          Thread thr) {
+    startBreakpointWatch(methodClass, breakpointReached, false, thr);
+  }
+
+  /**
+   * Enables the trapping of breakpoint events.
+   *
+   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
+   */
+  public static native void startBreakpointWatch(Class<?> methodClass,
+                                                 Executable breakpointReached,
+                                                 boolean allowRecursive,
+                                                 Thread thr);
+  public static native void stopBreakpointWatch(Thread thr);
+
+  public static final class LineNumber implements Comparable<LineNumber> {
+    public final long location;
+    public final int line;
+
+    private LineNumber(long loc, int line) {
+      this.location = loc;
+      this.line = line;
+    }
+
+    public boolean equals(Object other) {
+      return other instanceof LineNumber && ((LineNumber)other).line == line &&
+          ((LineNumber)other).location == location;
+    }
+
+    public int compareTo(LineNumber other) {
+      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
+      if (v != 0) {
+        return v;
+      } else {
+        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
+      }
+    }
+  }
+
+  public static native void setBreakpoint(Executable m, long loc);
+  public static void setBreakpoint(Executable m, LineNumber l) {
+    setBreakpoint(m, l.location);
+  }
+
+  public static native void clearBreakpoint(Executable m, long loc);
+  public static void clearBreakpoint(Executable m, LineNumber l) {
+    clearBreakpoint(m, l.location);
+  }
+
+  private static native Object[] getLineNumberTableNative(Executable m);
+  public static LineNumber[] getLineNumberTable(Executable m) {
+    Object[] nativeTable = getLineNumberTableNative(m);
+    long[] location = (long[])(nativeTable[0]);
+    int[] lines = (int[])(nativeTable[1]);
+    if (lines.length != location.length) {
+      throw new Error("Lines and locations have different lengths!");
+    }
+    LineNumber[] out = new LineNumber[lines.length];
+    for (int i = 0; i < lines.length; i++) {
+      out[i] = new LineNumber(location[i], lines[i]);
+    }
+    return out;
+  }
+
+  public static native long getStartLocation(Executable m);
+
+  public static int locationToLine(Executable m, long location) {
+    try {
+      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
+      int best = -1;
+      for (Breakpoint.LineNumber l : lines) {
+        if (l.location > location) {
+          break;
+        } else {
+          best = l.line;
+        }
+      }
+      return best;
+    } catch (Exception e) {
+      return -1;
+    }
+  }
+
+  public static long lineToLocation(Executable m, int line) throws Exception {
+    try {
+      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
+      for (Breakpoint.LineNumber l : lines) {
+        if (l.line == line) {
+          return l.location;
+        }
+      }
+      throw new Exception("Unable to find line " + line + " in " + m);
+    } catch (Exception e) {
+      throw new Exception("Unable to get line number info for " + m, e);
+    }
+  }
+}
+
diff --git a/test/566-polymorphic-inlining/polymorphic_inline.cc b/test/566-polymorphic-inlining/polymorphic_inline.cc
index 17ccd9a..00827cf 100644
--- a/test/566-polymorphic-inlining/polymorphic_inline.cc
+++ b/test/566-polymorphic-inlining/polymorphic_inline.cc
@@ -46,7 +46,7 @@
       usleep(1000);
     }
     // Will either ensure it's compiled or do the compilation itself.
-    jit->CompileMethod(method, soa.Self(), /* osr */ false);
+    jit->CompileMethod(method, soa.Self(), /*baseline=*/ false, /*osr=*/ false);
   }
 
   CodeInfo info(header);
diff --git a/test/570-checker-osr/osr.cc b/test/570-checker-osr/osr.cc
index b2b3634..dc0e94c 100644
--- a/test/570-checker-osr/osr.cc
+++ b/test/570-checker-osr/osr.cc
@@ -128,7 +128,7 @@
           // Sleep to yield to the compiler thread.
           usleep(1000);
           // Will either ensure it's compiled or do the compilation itself.
-          jit->CompileMethod(m, Thread::Current(), /* osr */ true);
+          jit->CompileMethod(m, Thread::Current(), /*baseline=*/ false, /*osr=*/ true);
         }
       });
 }
diff --git a/test/580-crc32/src/Main.java b/test/580-crc32/src/Main.java
index 7fc1273..6199e9b 100644
--- a/test/580-crc32/src/Main.java
+++ b/test/580-crc32/src/Main.java
@@ -15,29 +15,29 @@
  */
 
 import java.util.zip.CRC32;
+import java.util.Random;
 
 /**
- * The ART compiler can use intrinsics for the java.util.zip.CRC32 method:
- *    private native static int update(int crc, int b)
+ * The ART compiler can use intrinsics for the java.util.zip.CRC32 methods:
+ *   private native static int update(int crc, int b)
+ *   private native static int updateBytes(int crc, byte[] b, int off, int len)
  *
- * As the method is private it is not possible to check the use of intrinsics
- * for it directly.
+ * As the methods are private it is not possible to check the use of intrinsics
+ * for them directly.
  * The tests check that correct checksums are produced.
  */
 public class Main {
-  private static CRC32 crc32 = new CRC32();
-
   public Main() {
   }
 
-  public static long TestInt(int value) {
-    crc32.reset();
+  public static long CRC32Byte(int value) {
+    CRC32 crc32 = new CRC32();
     crc32.update(value);
     return crc32.getValue();
   }
 
-  public static long TestInt(int... values) {
-    crc32.reset();
+  public static long CRC32BytesUsingUpdateInt(int... values) {
+    CRC32 crc32 = new CRC32();
     for (int value : values) {
       crc32.update(value);
     }
@@ -50,82 +50,301 @@
     }
   }
 
-  public static void main(String args[]) {
+  private static void assertEqual(boolean expected, boolean actual) {
+    if (expected != actual) {
+      throw new Error("Expected: " + expected + ", found: " + actual);
+    }
+  }
+
+  private static void TestCRC32Update() {
     // public void update(int b)
     //
     // Tests for checksums of the byte 0x0
-    assertEqual(0xD202EF8DL, TestInt(0x0));
-    assertEqual(0xD202EF8DL, TestInt(0x0100));
-    assertEqual(0xD202EF8DL, TestInt(0x010000));
-    assertEqual(0xD202EF8DL, TestInt(0x01000000));
-    assertEqual(0xD202EF8DL, TestInt(0xff00));
-    assertEqual(0xD202EF8DL, TestInt(0xffff00));
-    assertEqual(0xD202EF8DL, TestInt(0xffffff00));
-    assertEqual(0xD202EF8DL, TestInt(0x1200));
-    assertEqual(0xD202EF8DL, TestInt(0x123400));
-    assertEqual(0xD202EF8DL, TestInt(0x12345600));
-    assertEqual(0xD202EF8DL, TestInt(Integer.MIN_VALUE));
+    // Check that only the low eight bits of the argument are used.
+    assertEqual(0xD202EF8DL, CRC32Byte(0x0));
+    assertEqual(0xD202EF8DL, CRC32Byte(0x0100));
+    assertEqual(0xD202EF8DL, CRC32Byte(0x010000));
+    assertEqual(0xD202EF8DL, CRC32Byte(0x01000000));
+    assertEqual(0xD202EF8DL, CRC32Byte(0xff00));
+    assertEqual(0xD202EF8DL, CRC32Byte(0xffff00));
+    assertEqual(0xD202EF8DL, CRC32Byte(0xffffff00));
+    assertEqual(0xD202EF8DL, CRC32Byte(0x1200));
+    assertEqual(0xD202EF8DL, CRC32Byte(0x123400));
+    assertEqual(0xD202EF8DL, CRC32Byte(0x12345600));
+    assertEqual(0xD202EF8DL, CRC32Byte(Integer.MIN_VALUE));
 
     // Tests for checksums of the byte 0x1
-    assertEqual(0xA505DF1BL, TestInt(0x1));
-    assertEqual(0xA505DF1BL, TestInt(0x0101));
-    assertEqual(0xA505DF1BL, TestInt(0x010001));
-    assertEqual(0xA505DF1BL, TestInt(0x01000001));
-    assertEqual(0xA505DF1BL, TestInt(0xff01));
-    assertEqual(0xA505DF1BL, TestInt(0xffff01));
-    assertEqual(0xA505DF1BL, TestInt(0xffffff01));
-    assertEqual(0xA505DF1BL, TestInt(0x1201));
-    assertEqual(0xA505DF1BL, TestInt(0x123401));
-    assertEqual(0xA505DF1BL, TestInt(0x12345601));
+    // Check that only the low eight bits of the argument are used.
+    assertEqual(0xA505DF1BL, CRC32Byte(0x1));
+    assertEqual(0xA505DF1BL, CRC32Byte(0x0101));
+    assertEqual(0xA505DF1BL, CRC32Byte(0x010001));
+    assertEqual(0xA505DF1BL, CRC32Byte(0x01000001));
+    assertEqual(0xA505DF1BL, CRC32Byte(0xff01));
+    assertEqual(0xA505DF1BL, CRC32Byte(0xffff01));
+    assertEqual(0xA505DF1BL, CRC32Byte(0xffffff01));
+    assertEqual(0xA505DF1BL, CRC32Byte(0x1201));
+    assertEqual(0xA505DF1BL, CRC32Byte(0x123401));
+    assertEqual(0xA505DF1BL, CRC32Byte(0x12345601));
 
     // Tests for checksums of the byte 0x0f
-    assertEqual(0x42BDF21CL, TestInt(0x0f));
-    assertEqual(0x42BDF21CL, TestInt(0x010f));
-    assertEqual(0x42BDF21CL, TestInt(0x01000f));
-    assertEqual(0x42BDF21CL, TestInt(0x0100000f));
-    assertEqual(0x42BDF21CL, TestInt(0xff0f));
-    assertEqual(0x42BDF21CL, TestInt(0xffff0f));
-    assertEqual(0x42BDF21CL, TestInt(0xffffff0f));
-    assertEqual(0x42BDF21CL, TestInt(0x120f));
-    assertEqual(0x42BDF21CL, TestInt(0x12340f));
-    assertEqual(0x42BDF21CL, TestInt(0x1234560f));
+    // Check that only the low eight bits of the argument are used.
+    assertEqual(0x42BDF21CL, CRC32Byte(0x0f));
+    assertEqual(0x42BDF21CL, CRC32Byte(0x010f));
+    assertEqual(0x42BDF21CL, CRC32Byte(0x01000f));
+    assertEqual(0x42BDF21CL, CRC32Byte(0x0100000f));
+    assertEqual(0x42BDF21CL, CRC32Byte(0xff0f));
+    assertEqual(0x42BDF21CL, CRC32Byte(0xffff0f));
+    assertEqual(0x42BDF21CL, CRC32Byte(0xffffff0f));
+    assertEqual(0x42BDF21CL, CRC32Byte(0x120f));
+    assertEqual(0x42BDF21CL, CRC32Byte(0x12340f));
+    assertEqual(0x42BDF21CL, CRC32Byte(0x1234560f));
 
     // Tests for checksums of the byte 0xff
-    assertEqual(0xFF000000L, TestInt(0x00ff));
-    assertEqual(0xFF000000L, TestInt(0x01ff));
-    assertEqual(0xFF000000L, TestInt(0x0100ff));
-    assertEqual(0xFF000000L, TestInt(0x010000ff));
-    assertEqual(0xFF000000L, TestInt(0x0000ffff));
-    assertEqual(0xFF000000L, TestInt(0x00ffffff));
-    assertEqual(0xFF000000L, TestInt(0xffffffff));
-    assertEqual(0xFF000000L, TestInt(0x12ff));
-    assertEqual(0xFF000000L, TestInt(0x1234ff));
-    assertEqual(0xFF000000L, TestInt(0x123456ff));
-    assertEqual(0xFF000000L, TestInt(Integer.MAX_VALUE));
+    // Check that only the low eight bits of the argument are used.
+    assertEqual(0xFF000000L, CRC32Byte(0x00ff));
+    assertEqual(0xFF000000L, CRC32Byte(0x01ff));
+    assertEqual(0xFF000000L, CRC32Byte(0x0100ff));
+    assertEqual(0xFF000000L, CRC32Byte(0x010000ff));
+    assertEqual(0xFF000000L, CRC32Byte(0x0000ffff));
+    assertEqual(0xFF000000L, CRC32Byte(0x00ffffff));
+    assertEqual(0xFF000000L, CRC32Byte(0xffffffff));
+    assertEqual(0xFF000000L, CRC32Byte(0x12ff));
+    assertEqual(0xFF000000L, CRC32Byte(0x1234ff));
+    assertEqual(0xFF000000L, CRC32Byte(0x123456ff));
+    assertEqual(0xFF000000L, CRC32Byte(Integer.MAX_VALUE));
 
     // Tests for sequences
-    assertEqual(0xFF41D912L, TestInt(0, 0, 0));
-    assertEqual(0xFF41D912L, TestInt(0x0100, 0x010000, 0x01000000));
-    assertEqual(0xFF41D912L, TestInt(0xff00, 0xffff00, 0xffffff00));
-    assertEqual(0xFF41D912L, TestInt(0x1200, 0x123400, 0x12345600));
+    // Check that only the low eight bits of the values are used.
+    assertEqual(0xFF41D912L, CRC32BytesUsingUpdateInt(0, 0, 0));
+    assertEqual(0xFF41D912L,
+                CRC32BytesUsingUpdateInt(0x0100, 0x010000, 0x01000000));
+    assertEqual(0xFF41D912L,
+                CRC32BytesUsingUpdateInt(0xff00, 0xffff00, 0xffffff00));
+    assertEqual(0xFF41D912L,
+                CRC32BytesUsingUpdateInt(0x1200, 0x123400, 0x12345600));
 
-    assertEqual(0x909FB2F2L, TestInt(1, 1, 1));
-    assertEqual(0x909FB2F2L, TestInt(0x0101, 0x010001, 0x01000001));
-    assertEqual(0x909FB2F2L, TestInt(0xff01, 0xffff01, 0xffffff01));
-    assertEqual(0x909FB2F2L, TestInt(0x1201, 0x123401, 0x12345601));
+    assertEqual(0x909FB2F2L, CRC32BytesUsingUpdateInt(1, 1, 1));
+    assertEqual(0x909FB2F2L,
+                CRC32BytesUsingUpdateInt(0x0101, 0x010001, 0x01000001));
+    assertEqual(0x909FB2F2L,
+                CRC32BytesUsingUpdateInt(0xff01, 0xffff01, 0xffffff01));
+    assertEqual(0x909FB2F2L,
+                CRC32BytesUsingUpdateInt(0x1201, 0x123401, 0x12345601));
 
-    assertEqual(0xE33A9F71L, TestInt(0x0f, 0x0f, 0x0f));
-    assertEqual(0xE33A9F71L, TestInt(0x010f, 0x01000f, 0x0100000f));
-    assertEqual(0xE33A9F71L, TestInt(0xff0f, 0xffff0f, 0xffffff0f));
-    assertEqual(0xE33A9F71L, TestInt(0x120f, 0x12340f, 0x1234560f));
+    assertEqual(0xE33A9F71L, CRC32BytesUsingUpdateInt(0x0f, 0x0f, 0x0f));
+    assertEqual(0xE33A9F71L,
+                CRC32BytesUsingUpdateInt(0x010f, 0x01000f, 0x0100000f));
+    assertEqual(0xE33A9F71L,
+                CRC32BytesUsingUpdateInt(0xff0f, 0xffff0f, 0xffffff0f));
+    assertEqual(0xE33A9F71L,
+                CRC32BytesUsingUpdateInt(0x120f, 0x12340f, 0x1234560f));
 
-    assertEqual(0xFFFFFF00L, TestInt(0x0ff, 0x0ff, 0x0ff));
-    assertEqual(0xFFFFFF00L, TestInt(0x01ff, 0x0100ff, 0x010000ff));
-    assertEqual(0xFFFFFF00L, TestInt(0x00ffff, 0x00ffffff, 0xffffffff));
-    assertEqual(0xFFFFFF00L, TestInt(0x12ff, 0x1234ff, 0x123456ff));
+    assertEqual(0xFFFFFF00L, CRC32BytesUsingUpdateInt(0x0ff, 0x0ff, 0x0ff));
+    assertEqual(0xFFFFFF00L,
+                CRC32BytesUsingUpdateInt(0x01ff, 0x0100ff, 0x010000ff));
+    assertEqual(0xFFFFFF00L,
+                CRC32BytesUsingUpdateInt(0x00ffff, 0x00ffffff, 0xffffffff));
+    assertEqual(0xFFFFFF00L,
+                CRC32BytesUsingUpdateInt(0x12ff, 0x1234ff, 0x123456ff));
 
-    assertEqual(0xB6CC4292L, TestInt(0x01, 0x02));
+    assertEqual(0xB6CC4292L, CRC32BytesUsingUpdateInt(0x01, 0x02));
 
-    assertEqual(0xB2DE047CL, TestInt(0x0, -1, Integer.MIN_VALUE, Integer.MAX_VALUE));
+    assertEqual(0xB2DE047CL,
+                CRC32BytesUsingUpdateInt(0x0, -1, Integer.MIN_VALUE, Integer.MAX_VALUE));
+  }
+
+  private static long CRC32ByteArray(byte[] bytes, int off, int len) {
+    CRC32 crc32 = new CRC32();
+    crc32.update(bytes, off, len);
+    return crc32.getValue();
+  }
+
+  // This is used to test we generate correct code for constant offsets.
+  // In this case the offset is 0.
+  private static long CRC32ByteArray(byte[] bytes) {
+    CRC32 crc32 = new CRC32();
+    crc32.update(bytes);
+    return crc32.getValue();
+  }
+
+  private static long CRC32ByteAndByteArray(int value, byte[] bytes) {
+    CRC32 crc32 = new CRC32();
+    crc32.update(value);
+    crc32.update(bytes);
+    return crc32.getValue();
+  }
+
+  private static long CRC32ByteArrayAndByte(byte[] bytes, int value) {
+    CRC32 crc32 = new CRC32();
+    crc32.update(bytes);
+    crc32.update(value);
+    return crc32.getValue();
+  }
+
+  private static boolean CRC32ByteArrayThrowsAIOOBE(byte[] bytes, int off, int len) {
+    try {
+      CRC32 crc32 = new CRC32();
+      crc32.update(bytes, off, len);
+    } catch (ArrayIndexOutOfBoundsException ex) {
+      return true;
+    }
+    return false;
+  }
+
+  private static boolean CRC32ByteArrayThrowsNPE() {
+    try {
+      CRC32 crc32 = new CRC32();
+      crc32.update(null, 0, 0);
+      return false;
+    } catch (NullPointerException e) {}
+
+    try {
+      CRC32 crc32 = new CRC32();
+      crc32.update(null, 1, 2);
+      return false;
+    } catch (NullPointerException e) {}
+
+    try {
+      CRC32 crc32 = new CRC32();
+      crc32.update((byte[])null);
+      return false;
+    } catch (NullPointerException e) {}
+
+    return true;
+  }
+
+  private static long CRC32BytesUsingUpdateInt(byte[] bytes, int off, int len) {
+    CRC32 crc32 = new CRC32();
+    while (len-- > 0) {
+      crc32.update(bytes[off++]);
+    }
+    return crc32.getValue();
+  }
+
+  private static void TestCRC32UpdateBytes() {
+    assertEqual(0L, CRC32ByteArray(new byte[] {}));
+    assertEqual(0L, CRC32ByteArray(new byte[] {}, 0, 0));
+    assertEqual(0L, CRC32ByteArray(new byte[] {0}, 0, 0));
+    assertEqual(0L, CRC32ByteArray(new byte[] {0}, 1, 0));
+    assertEqual(0L, CRC32ByteArray(new byte[] {0, 0}, 1, 0));
+
+    assertEqual(true, CRC32ByteArrayThrowsNPE());
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {}, -1, 0));
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {0}, -1, 1));
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {0}, 0, -1));
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {}, 0, -1));
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {}, 1, 0));
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {}, -1, 1));
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {}, 1, -1));
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {}, 0, 1));
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {}, 0, 10));
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {0}, 0, 10));
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {}, 10, 10));
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {0, 0, 0, 0}, 2, 3));
+    assertEqual(true, CRC32ByteArrayThrowsAIOOBE(new byte[] {0, 0, 0, 0}, 3, 2));
+
+    assertEqual(CRC32Byte(0), CRC32ByteArray(new byte[] {0}));
+    assertEqual(CRC32Byte(0), CRC32ByteArray(new byte[] {0}, 0, 1));
+    assertEqual(CRC32Byte(1), CRC32ByteArray(new byte[] {1}));
+    assertEqual(CRC32Byte(1), CRC32ByteArray(new byte[] {1}, 0, 1));
+    assertEqual(CRC32Byte(0x0f), CRC32ByteArray(new byte[] {0x0f}));
+    assertEqual(CRC32Byte(0x0f), CRC32ByteArray(new byte[] {0x0f}, 0, 1));
+    assertEqual(CRC32Byte(0xff), CRC32ByteArray(new byte[] {-1}));
+    assertEqual(CRC32Byte(0xff), CRC32ByteArray(new byte[] {-1}, 0, 1));
+    assertEqual(CRC32BytesUsingUpdateInt(0, 0, 0),
+                CRC32ByteArray(new byte[] {0, 0, 0}));
+    assertEqual(CRC32BytesUsingUpdateInt(0, 0, 0),
+                CRC32ByteArray(new byte[] {0, 0, 0}, 0, 3));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 1, 1),
+                CRC32ByteArray(new byte[] {1, 1, 1}));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 1, 1),
+                CRC32ByteArray(new byte[] {1, 1, 1}, 0, 3));
+    assertEqual(CRC32BytesUsingUpdateInt(0x0f, 0x0f, 0x0f),
+                CRC32ByteArray(new byte[] {0x0f, 0x0f, 0x0f}));
+    assertEqual(CRC32BytesUsingUpdateInt(0x0f, 0x0f, 0x0f),
+                CRC32ByteArray(new byte[] {0x0f, 0x0f, 0x0f}, 0, 3));
+    assertEqual(CRC32BytesUsingUpdateInt(0xff, 0xff, 0xff),
+                CRC32ByteArray(new byte[] {-1, -1, -1}));
+    assertEqual(CRC32BytesUsingUpdateInt(0xff, 0xff, 0xff),
+                CRC32ByteArray(new byte[] {-1, -1, -1}, 0, 3));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 2),
+                CRC32ByteArray(new byte[] {1, 2}));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 2),
+                CRC32ByteArray(new byte[] {1, 2}, 0, 2));
+    assertEqual(
+        CRC32BytesUsingUpdateInt(0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE),
+        CRC32ByteArray(new byte[] {0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE}));
+    assertEqual(
+        CRC32BytesUsingUpdateInt(0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE),
+        CRC32ByteArray(new byte[] {0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE}, 0, 4));
+
+    assertEqual(CRC32BytesUsingUpdateInt(0, 0, 0),
+                CRC32ByteAndByteArray(0, new byte[] {0, 0}));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 1, 1),
+                CRC32ByteAndByteArray(1, new byte[] {1, 1}));
+    assertEqual(CRC32BytesUsingUpdateInt(0x0f, 0x0f, 0x0f),
+                CRC32ByteAndByteArray(0x0f, new byte[] {0x0f, 0x0f}));
+    assertEqual(CRC32BytesUsingUpdateInt(0xff, 0xff, 0xff),
+                CRC32ByteAndByteArray(-1, new byte[] {-1, -1}));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 2, 3),
+                CRC32ByteAndByteArray(1, new byte[] {2, 3}));
+    assertEqual(
+        CRC32BytesUsingUpdateInt(0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE),
+        CRC32ByteAndByteArray(0, new byte[] {-1, Byte.MIN_VALUE, Byte.MAX_VALUE}));
+
+    assertEqual(CRC32BytesUsingUpdateInt(0, 0, 0),
+                CRC32ByteArrayAndByte(new byte[] {0, 0}, 0));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 1, 1),
+                CRC32ByteArrayAndByte(new byte[] {1, 1}, 1));
+    assertEqual(CRC32BytesUsingUpdateInt(0x0f, 0x0f, 0x0f),
+                CRC32ByteArrayAndByte(new byte[] {0x0f, 0x0f}, 0x0f));
+    assertEqual(CRC32BytesUsingUpdateInt(0xff, 0xff, 0xff),
+                CRC32ByteArrayAndByte(new byte[] {-1, -1}, -1));
+    assertEqual(CRC32BytesUsingUpdateInt(1, 2, 3),
+                CRC32ByteArrayAndByte(new byte[] {1, 2}, 3));
+    assertEqual(
+        CRC32BytesUsingUpdateInt(0, -1, Byte.MIN_VALUE, Byte.MAX_VALUE),
+        CRC32ByteArrayAndByte(new byte[] {0, -1, Byte.MIN_VALUE}, Byte.MAX_VALUE));
+
+    byte[] bytes = new byte[128 * 1024];
+    Random rnd = new Random(0);
+    rnd.nextBytes(bytes);
+
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, bytes.length),
+                CRC32ByteArray(bytes));
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, 8 * 1024),
+                CRC32ByteArray(bytes, 0, 8 * 1024));
+
+    int off = rnd.nextInt(bytes.length / 2);
+    for (int len = 0; len <= 16; ++len) {
+      assertEqual(CRC32BytesUsingUpdateInt(bytes, off, len),
+                  CRC32ByteArray(bytes, off, len));
+    }
+
+    // Check there are no issues with unaligned accesses.
+    for (int o = 1; o < 8; ++o) {
+      for (int l = 0; l <= 16; ++l) {
+        assertEqual(CRC32BytesUsingUpdateInt(bytes, o, l),
+                    CRC32ByteArray(bytes, o, l));
+      }
+    }
+
+    int len = bytes.length / 2;
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, len - 1),
+                CRC32ByteArray(bytes, 0, len - 1));
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, len),
+                CRC32ByteArray(bytes, 0, len));
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, 0, len + 1),
+                CRC32ByteArray(bytes, 0, len + 1));
+
+    len = rnd.nextInt(bytes.length + 1);
+    off = rnd.nextInt(bytes.length - len);
+    assertEqual(CRC32BytesUsingUpdateInt(bytes, off, len),
+                CRC32ByteArray(bytes, off, len));
+  }
+
+  public static void main(String args[]) {
+    TestCRC32Update();
+    TestCRC32UpdateBytes();
   }
 }
diff --git a/test/924-threads/src/art/Test924.java b/test/924-threads/src/art/Test924.java
index e8e9781..e97c9c6 100644
--- a/test/924-threads/src/art/Test924.java
+++ b/test/924-threads/src/art/Test924.java
@@ -27,6 +27,7 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.time.Instant;
 
 public class Test924 {
   public static void run() throws Exception {
@@ -109,6 +110,7 @@
     final CountDownLatch cdl4 = new CountDownLatch(1);
     final CountDownLatch cdl5 = new CountDownLatch(1);
     final Holder h = new Holder();
+    final long ALMOST_INFINITE = 100000000;  // 1.1 days!
     final NativeWaiter w = new NativeWaiter();
     Runnable r = new Runnable() {
       @Override
@@ -121,7 +123,7 @@
 
           cdl2.countDown();
           synchronized(cdl2) {
-            cdl2.wait(1000);  // Wait a second.
+            cdl2.wait(ALMOST_INFINITE);
           }
 
           cdl3_1.await();
@@ -131,7 +133,9 @@
           }
 
           cdl4.countDown();
-          Thread.sleep(1000);
+          try {
+            Thread.sleep(ALMOST_INFINITE);
+          } catch (InterruptedException e) { }
 
           cdl5.countDown();
           while (!h.flag) {
@@ -152,18 +156,20 @@
 
     // Waiting.
     cdl1.await();
-    Thread.yield();
-    Thread.sleep(100);
-    printThreadState(t);
+    // This is super inconsistent so just wait for the desired state for up to 5 minutes then give
+    // up and continue
+    final int WAITING_INDEF = 0x191;
+    waitForState(t, WAITING_INDEF);
     synchronized(cdl1) {
       cdl1.notifyAll();
     }
 
     // Timed waiting.
     cdl2.await();
-    Thread.yield();
-    Thread.sleep(100);
-    printThreadState(t);
+    // This is super inconsistent so just wait for the desired state for up to 5 minutes then give
+    // up and continue
+    final int WAITING_TIMED = 0x1a1;
+    waitForState(t, WAITING_TIMED);
     synchronized(cdl2) {
       cdl2.notifyAll();
     }
@@ -185,14 +191,16 @@
 
     // Sleeping.
     cdl4.await();
-    Thread.yield();
-    Thread.sleep(100);
-    printThreadState(t);
+    // This is super inconsistent so just wait for the desired state for up to 5 minutes then give
+    // up and continue
+    final int WAITING_SLEEP = 0xe1;
+    waitForState(t, WAITING_SLEEP);
+    t.interrupt();
 
     // Running.
     cdl5.await();
     Thread.yield();
-    Thread.sleep(100);
+    Thread.sleep(1000);
     printThreadState(t);
     h.flag = true;
 
@@ -204,11 +212,26 @@
     // Dying.
     t.join();
     Thread.yield();
-    Thread.sleep(100);
+    Thread.sleep(1000);
 
     printThreadState(t);
   }
 
+  private static void waitForState(Thread t, int desired) throws Exception {
+    Thread.yield();
+    Thread.sleep(1000);
+    // This is super inconsistent so just wait for the desired state for up to 5 minutes then give
+    // up and continue
+    int state;
+    Instant deadline = Instant.now().plusSeconds(60 * 5);
+    while ((state = getThreadState(t)) != desired && deadline.isAfter(Instant.now())) {
+      Thread.yield();
+      Thread.sleep(100);
+      Thread.yield();
+    }
+    printThreadState(state);
+  }
+
   private static void doAllThreadsTests() {
     Thread[] threads = getAllThreads();
     List<Thread> threadList = new ArrayList<>(Arrays.asList(threads));
diff --git a/test/999-redefine-hiddenapi/src-redefine/gen.sh b/test/999-redefine-hiddenapi/src-redefine/gen.sh
index b5e2aea..f92d797 100755
--- a/test/999-redefine-hiddenapi/src-redefine/gen.sh
+++ b/test/999-redefine-hiddenapi/src-redefine/gen.sh
@@ -23,16 +23,10 @@
 (cd "$TMP" && \
     javac -d "${TMP}" "$DIR/${CLASS}.java" && \
     d8 --output . "$TMP/${CLASS}.class" &&
-    hiddenapi encode --input-dex="$TMP/classes.dex" \
-                     --output-dex="$TMP/classes-hiddenapi.dex" \
-                     --api-flags="$DIR/../hiddenapi-flags.csv" \
-                     --no-force-assign-all)
 
 echo '  private static final byte[] CLASS_BYTES = Base64.getDecoder().decode('
 base64 "${TMP}/${CLASS}.class" | sed -E 's/^/    "/' | sed ':a;N;$!ba;s/\n/" +\n/g' | sed -E '$ s/$/");/'
 echo '  private static final byte[] DEX_BYTES = Base64.getDecoder().decode('
 base64 "${TMP}/classes.dex" | sed -E 's/^/    "/' | sed ':a;N;$!ba;s/\n/" +\n/g' | sed -E '$ s/$/");/'
-echo '  private static final byte[] DEX_BYTES_HIDDEN = Base64.getDecoder().decode('
-base64 "${TMP}/classes-hiddenapi.dex" | sed -E 's/^/    "/' | sed ':a;N;$!ba;s/\n/" +\n/g' | sed -E '$ s/$/");/'
 
 rm -rf "$TMP"
diff --git a/test/999-redefine-hiddenapi/src/Main.java b/test/999-redefine-hiddenapi/src/Main.java
index 4627b4f..3d9bbda 100644
--- a/test/999-redefine-hiddenapi/src/Main.java
+++ b/test/999-redefine-hiddenapi/src/Main.java
@@ -31,53 +31,33 @@
 
     // Find the test class in boot class loader and verify that its members are hidden.
     Class<?> klass = Class.forName("art.Test999", true, BOOT_CLASS_LOADER);
-    assertMethodIsHidden(true, klass, "before redefinition");
-    assertFieldIsHidden(true, klass, "before redefinition");
+    assertFieldIsHidden(klass, "before redefinition");
+    assertMethodIsHidden(klass, "before redefinition");
 
     // Redefine the class using JVMTI. Use dex file without hiddenapi flags.
     art.Redefinition.setTestConfiguration(art.Redefinition.Config.COMMON_REDEFINE);
     art.Redefinition.doCommonClassRedefinition(klass, CLASS_BYTES, DEX_BYTES);
 
-    // Verify that the class members are not hidden anymore.
-    assertMethodIsHidden(false, klass, "after first redefinition");
-    assertFieldIsHidden(false, klass, "after first redefinition");
-
-    // Redefine the class using JVMTI, this time with a dex file with hiddenapi flags.
-    art.Redefinition.setTestConfiguration(art.Redefinition.Config.COMMON_REDEFINE);
-    art.Redefinition.doCommonClassRedefinition(klass, CLASS_BYTES, DEX_BYTES_HIDDEN);
-
-    // Verify that the class members are still accessible.
-    assertMethodIsHidden(false, klass, "after second redefinition");
-    assertFieldIsHidden(false, klass, "after second redefinition");
+    // Verify that the class members are still hidden.
+    assertFieldIsHidden(klass, "after first redefinition");
+    assertMethodIsHidden(klass, "after first redefinition");
   }
 
-  private static void assertMethodIsHidden(boolean expectedHidden, Class<?> klass, String msg) {
+  private static void assertMethodIsHidden(Class<?> klass, String msg) {
     try {
       klass.getDeclaredMethod("foo");
-      if (expectedHidden) {
-        // Unexpected. Should have thrown NoSuchMethodException.
-        throw new RuntimeException("Method should not be accessible " + msg);
-      }
+      // Unexpected. Should have thrown NoSuchMethodException.
+      throw new RuntimeException("Method should not be accessible " + msg);
     } catch (NoSuchMethodException ex) {
-      if (!expectedHidden) {
-        // Unexpected. Should not have thrown NoSuchMethodException.
-        throw new RuntimeException("Method should be accessible " + msg);
-      }
     }
   }
 
-  private static void assertFieldIsHidden(boolean expectedHidden, Class<?> klass, String msg) {
+  private static void assertFieldIsHidden(Class<?> klass, String msg) {
     try {
       klass.getDeclaredField("bar");
-      if (expectedHidden) {
-        // Unexpected. Should have thrown NoSuchFieldException.
-        throw new RuntimeException("Field should not be accessible " + msg);
-      }
+      // Unexpected. Should have thrown NoSuchFieldException.
+      throw new RuntimeException("Field should not be accessible " + msg);
     } catch (NoSuchFieldException ex) {
-      if (!expectedHidden) {
-        // Unexpected. Should not have thrown NoSuchFieldException.
-        throw new RuntimeException("Field should be accessible " + msg);
-      }
     }
   }
 
@@ -127,21 +107,4 @@
     "AAAABAAAAAIAAADkAAAABQAAAAQAAAD0AAAABgAAAAEAAAAUAQAAASAAAAIAAAA0AQAAAyAAAAIA" +
     "AAB0AQAAARAAAAEAAACAAQAAAiAAABAAAACGAQAAACAAAAEAAACgAgAAAxAAAAEAAACwAgAAABAA" +
     "AAEAAAC0AgAA");
-  private static final byte[] DEX_BYTES_HIDDEN = Base64.getDecoder().decode(
-    "ZGV4CjAzNQDsgG5ufKulToQpDF+P4dsgeOkgfzzH+5l4AwAAcAAAAHhWNBIAAAAAAAAAAMACAAAQ" +
-    "AAAAcAAAAAcAAACwAAAAAgAAAMwAAAACAAAA5AAAAAQAAAD0AAAAAQAAABQBAABEAgAANAEAAIYB" +
-    "AACOAQAAlwEAAJoBAACpAQAAwAEAANQBAADoAQAA/AEAAAoCAAANAgAAEQIAABYCAAAbAgAAIAIA" +
-    "ACkCAAACAAAAAwAAAAQAAAAFAAAABgAAAAcAAAAJAAAACQAAAAYAAAAAAAAACgAAAAYAAACAAQAA" +
-    "AQAAAAsAAAAFAAIADQAAAAEAAAAAAAAAAQAAAAwAAAACAAEADgAAAAMAAAAAAAAAAQAAAAEAAAAD" +
-    "AAAAAAAAAAgAAAAAAAAAoAIAAAAAAAACAAEAAQAAAHQBAAAIAAAAcBADAAEAEwBAAFkQAAAOAAMA" +
-    "AQACAAAAeQEAAAgAAABiAAEAGgEBAG4gAgAQAA4AEwAOQAAVAA54AAAAAQAAAAQABjxpbml0PgAH" +
-    "R29vZGJ5ZQABSQANTGFydC9UZXN0OTk5OwAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABJMamF2YS9s" +
-    "YW5nL09iamVjdDsAEkxqYXZhL2xhbmcvU3RyaW5nOwASTGphdmEvbGFuZy9TeXN0ZW07AAxUZXN0" +
-    "OTk5LmphdmEAAVYAAlZMAANiYXIAA2ZvbwADb3V0AAdwcmludGxuAHV+fkQ4eyJjb21waWxhdGlv" +
-    "bi1tb2RlIjoiZGVidWciLCJtaW4tYXBpIjoxLCJzaGEtMSI6ImQyMmFiNGYxOWI3NTYxNDQ3NTI4" +
-    "NTdjYTg2YjJjZWU0ZGQ5Y2ExNjYiLCJ2ZXJzaW9uIjoiMS40LjktZGV2In0AAAEBAQABAIGABLQC" +
-    "AQHUAgAAAAALAAAACAAAAAIAAgAPAAAAAAAAAAEAAAAAAAAAAQAAABAAAABwAAAAAgAAAAcAAACw" +
-    "AAAAAwAAAAIAAADMAAAABAAAAAIAAADkAAAABQAAAAQAAAD0AAAABgAAAAEAAAAUAQAAASAAAAIA" +
-    "AAA0AQAAAyAAAAIAAAB0AQAAARAAAAEAAACAAQAAAiAAABAAAACGAQAAACAAAAEAAACgAgAAAxAA" +
-    "AAEAAACwAgAAAPAAAAEAAAC0AgAAABAAAAEAAADAAgAA");
 }
diff --git a/test/Android.bp b/test/Android.bp
index d85e2a6..57fcd86 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -322,6 +322,7 @@
         "1940-ddms-ext/ddm_ext.cc",
         "1944-sudden-exit/sudden_exit.cc",
         // "1952-pop-frame-jit/pop_frame.cc",
+        "1959-redefine-object-instrument/fake_redef_object.cc",
     ],
     static_libs: [
         "libz",
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index ca860e2..5d07601 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -50,6 +50,9 @@
 ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar
 ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar
 ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/core-simple-testdex.jar
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/conscrypt-testdex.jar
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/okhttp-testdex.jar
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/bouncycastle-testdex.jar
 
 # All tests require the host executables. The tests also depend on the core images, but on
 # specific version depending on the compiler.
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index 65127fc..55631a9 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -227,7 +227,7 @@
       // Make sure there is a profiling info, required by the compiler.
       ProfilingInfo::Create(self, method, /* retry_allocation */ true);
       // Will either ensure it's compiled or do the compilation itself.
-      jit->CompileMethod(method, self, /* osr */ false);
+      jit->CompileMethod(method, self, /*baseline=*/ false, /*osr=*/ false);
     }
   }
 }
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 5fede0e..4c31ee5 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -9,6 +9,7 @@
 }
 
 ANDROID_ROOT="/system"
+ANDROID_RUNTIME_ROOT="/apex/com.android.runtime"
 ARCHITECTURES_32="(arm|x86|mips|none)"
 ARCHITECTURES_64="(arm64|x86_64|mips64|none)"
 ARCHITECTURES_PATTERN="${ARCHITECTURES_32}"
@@ -33,6 +34,8 @@
 HOST="n"
 BIONIC="n"
 CREATE_ANDROID_ROOT="n"
+USE_ZIPAPEX="n"
+ZIPAPEX_LOC=""
 INTERPRETER="n"
 JIT="n"
 INVOKE_WITH=""
@@ -209,7 +212,8 @@
         shift
     elif [ "x$1" = "x--host" ]; then
         HOST="y"
-        ANDROID_ROOT="$ANDROID_HOST_OUT"
+        ANDROID_ROOT="${ANDROID_HOST_OUT}"
+        ANDROID_RUNTIME_ROOT="${ANDROID_HOST_OUT}/com.android.runtime"
         shift
     elif [ "x$1" = "x--bionic" ]; then
         BIONIC="y"
@@ -218,6 +222,11 @@
         # host ones which are in a different location.
         CREATE_ANDROID_ROOT="y"
         shift
+    elif [ "x$1" = "x--runtime-zipapex" ]; then
+        shift
+        USE_ZIPAPEX="y"
+        ZIPAPEX_LOC="$1"
+        shift
     elif [ "x$1" = "x--no-prebuild" ]; then
         PREBUILD="n"
         shift
@@ -539,7 +548,10 @@
   exit
 fi
 
-bpath_modules="core-oj core-libart core-simple"
+# Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
+# because that's what we use for compiling the core.art image.
+# It may contain additional modules from TEST_CORE_JARS.
+bpath_modules="core-oj core-libart core-simple okhttp bouncycastle conscrypt"
 if [ "${HOST}" = "y" ]; then
     framework="${ANDROID_HOST_OUT}/framework"
     if [ "${ANDROID_HOST_OUT:0:${#ANDROID_BUILD_TOP}+1}" = "${ANDROID_BUILD_TOP}/" ]; then
@@ -701,6 +713,8 @@
     exit 1
 fi
 
+BIN_DIR=$ANDROID_ROOT/bin
+
 profman_cmdline="true"
 dex2oat_cmdline="true"
 vdex_cmdline="true"
@@ -710,6 +724,8 @@
 sync_cmdline="true"
 linkroot_cmdline="true"
 linkroot_overlay_cmdline="true"
+setupapex_cmdline="true"
+installapex_cmdline="true"
 
 linkdirs() {
   find "$1" -maxdepth 1 -mindepth 1 -type d | xargs -i ln -sf '{}' "$2"
@@ -724,10 +740,23 @@
   fi
 fi
 
+if [ "$USE_ZIPAPEX" = "y" ]; then
+  # TODO Currently this only works for linux_bionic zipapexes because those are
+  # stripped and so small enough that the ulimit doesn't kill us.
+  mkdir_locations="${mkdir_locations} $DEX_LOCATION/zipapex"
+  zip_options="-qq"
+  if [ "$DEV_MODE" = "y" ]; then
+    zip_options=""
+  fi
+  setupapex_cmdline="unzip -u ${zip_options} ${ZIPAPEX_LOC} apex_payload.zip -d ${DEX_LOCATION}"
+  installapex_cmdline="unzip -u ${zip_options} ${DEX_LOCATION}/apex_payload.zip -d ${DEX_LOCATION}/zipapex"
+  BIN_DIR=$DEX_LOCATION/zipapex/bin
+fi
+
 # PROFILE takes precedence over RANDOM_PROFILE, since PROFILE tests require a
 # specific profile to run properly.
 if [ "$PROFILE" = "y" ] || [ "$RANDOM_PROFILE" = "y" ]; then
-  profman_cmdline="${ANDROID_ROOT}/bin/profman  \
+  profman_cmdline="$BIN_DIR/profman  \
     --apk=$DEX_LOCATION/$TEST_NAME.jar \
     --dex-location=$DEX_LOCATION/$TEST_NAME.jar"
   if [ -f $DEX_LOCATION/$TEST_NAME-ex.jar ]; then
@@ -757,7 +786,7 @@
   if  [[ "$TEST_IS_NDEBUG" = "y" ]]; then
     dex2oat_binary=dex2oat
   fi
-  dex2oat_cmdline="$INVOKE_WITH $ANDROID_ROOT/bin/$dex2oat_binary \
+  dex2oat_cmdline="$INVOKE_WITH $BIN_DIR/$dex2oat_binary \
                       $COMPILE_FLAGS \
                       --boot-image=${BOOT_IMAGE} \
                       --dex-file=$DEX_LOCATION/$TEST_NAME.jar \
@@ -814,7 +843,7 @@
 # We set DumpNativeStackOnSigQuit to false to avoid stressing libunwind.
 # b/27185632
 # b/24664297
-dalvikvm_cmdline="$INVOKE_WITH $GDB $ANDROID_ROOT/bin/$DALVIKVM \
+dalvikvm_cmdline="$INVOKE_WITH $GDB $BIN_DIR/$DALVIKVM \
                   $GDB_ARGS \
                   $FLAGS \
                   $DEX_VERIFY \
@@ -917,11 +946,12 @@
              export ANDROID_ADDITIONAL_PUBLIC_LIBRARIES=$PUBLIC_LIBS && \
              export DEX_LOCATION=$DEX_LOCATION && \
              export ANDROID_ROOT=$ANDROID_ROOT && \
+             export ANDROID_RUNTIME_ROOT=$ANDROID_RUNTIME_ROOT && \
              export ANDROID_LOG_TAGS=$ANDROID_LOG_TAGS && \
              rm -rf ${DEX_LOCATION}/dalvik-cache/ && \
              mkdir -p ${mkdir_locations} && \
              export LD_LIBRARY_PATH=$LD_LIBRARY_PATH && \
-             export PATH=$ANDROID_ROOT/bin:$PATH && \
+             export PATH=$BIN_DIR:$PATH && \
              $profman_cmdline && \
              $dex2oat_cmdline && \
              $dm_cmdline && \
@@ -961,9 +991,14 @@
 
     export ANDROID_DATA="$DEX_LOCATION"
     export ANDROID_ROOT="${ANDROID_ROOT}"
+    export ANDROID_RUNTIME_ROOT="${ANDROID_RUNTIME_ROOT}"
     export LD_LIBRARY_PATH="${ANDROID_ROOT}/${LIBRARY_DIRECTORY}:${ANDROID_ROOT}/${TEST_DIRECTORY}"
+    if [ "$USE_ZIPAPEX" = "y" ]; then
+      # Put the zipapex files in front of the ld-library-path
+      export LD_LIBRARY_PATH="${ANDROID_DATA}/zipapex/${LIBRARY_DIRECTORY}:${LD_LIBRARY_PATH}"
+    fi
     export DYLD_LIBRARY_PATH="${ANDROID_ROOT}/${LIBRARY_DIRECTORY}:${ANDROID_ROOT}/${TEST_DIRECTORY}"
-    export PATH="$PATH:${ANDROID_ROOT}/bin"
+    export PATH="$PATH:$BIN_DIR"
 
     # Temporarily disable address space layout randomization (ASLR).
     # This is needed on the host so that the linker loads core.oat at the necessary address.
@@ -1002,7 +1037,7 @@
         echo EXPORT $var=${!var}
       done
       echo "$(declare -f linkdirs)"
-      echo "mkdir -p ${mkdir_locations} && $linkroot_cmdline && $linkroot_overlay_cmdline && $profman_cmdline && $dex2oat_cmdline && $dm_cmdline && $vdex_cmdline && $strip_cmdline && $sync_cmdline && $cmdline"
+      echo "mkdir -p ${mkdir_locations} && $setupapex_cmdline && $installapex_cmdline && $linkroot_cmdline && $linkroot_overlay_cmdline && $profman_cmdline && $dex2oat_cmdline && $dm_cmdline && $vdex_cmdline && $strip_cmdline && $sync_cmdline && $cmdline"
     fi
 
     cd $ANDROID_BUILD_TOP
@@ -1016,6 +1051,8 @@
     export ASAN_OPTIONS=$RUN_TEST_ASAN_OPTIONS
 
     mkdir -p ${mkdir_locations} || exit 1
+    $setupapex_cmdline || { echo "zipapex extraction failed." >&2 ; exit 2; }
+    $installapex_cmdline || { echo "zipapex install failed." >&2 ; exit 2; }
     $linkroot_cmdline || { echo "create symlink android-root failed." >&2 ; exit 2; }
     $linkroot_overlay_cmdline || { echo "overlay android-root failed." >&2 ; exit 2; }
     $profman_cmdline || { echo "Profman failed." >&2 ; exit 2; }
diff --git a/test/run-test b/test/run-test
index 2363152..83c726e 100755
--- a/test/run-test
+++ b/test/run-test
@@ -164,6 +164,15 @@
 image_suffix=""
 run_optimizing="false"
 
+# To cause tests to fail fast, limit the file sizes created by dx, dex2oat and
+# ART output to approximately 128MB. This should be more than sufficient
+# for any test while still catching cases of runaway output.
+# Set a hard limit to encourage ART developers to increase the ulimit here if
+# needed to support a test case rather than resetting the limit in the run
+# script for the particular test in question. Adjust this if needed for
+# particular configurations.
+file_ulimit=128000
+
 while true; do
     if [ "x$1" = "x--host" ]; then
         target_mode="no"
@@ -395,6 +404,18 @@
         DEX_LOCATION=$tmp_dir
         host_lib_root=$OUT_DIR/soong/host/linux_bionic-x86
         shift
+    elif [ "x$1" = "x--runtime-zipapex" ]; then
+        shift
+        # TODO Should we allow the java.library.path to search the zipapex too?
+        # Not needed at the moment and adding it will be complicated so for now
+        # we'll ignore this.
+        run_args="${run_args} --host --runtime-zipapex $1"
+        target_mode="no"
+        DEX_LOCATION=$tmp_dir
+        # apex_payload.zip is quite large we need a high enough ulimit to
+        # extract it. 512mb should be good enough.
+        file_ulimit=512000
+        shift
     elif [ "x$1" = "x--trace" ]; then
         trace="true"
         shift
@@ -722,6 +743,8 @@
              "files."
         echo "    --64                  Run the test in 64-bit mode"
         echo "    --bionic              Use the (host, 64-bit only) linux_bionic libc runtime"
+        echo "    --runtime-zipapex [file]"
+        echo "                          Use the given zipapex file to provide runtime binaries"
         echo "    --trace               Run with method tracing"
         echo "    --strace              Run with syscall tracing from strace."
         echo "    --stream              Run method tracing in streaming mode (requires --trace)"
@@ -823,13 +846,7 @@
 
 run_args="${run_args} --testlib ${testlib}"
 
-# To cause tests to fail fast, limit the file sizes created by dx, dex2oat and
-# ART output to approximately 128MB. This should be more than sufficient
-# for any test while still catching cases of runaway output.
-# Set a hard limit to encourage ART developers to increase the ulimit here if
-# needed to support a test case rather than resetting the limit in the run
-# script for the particular test in question.
-if ! ulimit -f 128000; then
+if ! ulimit -f ${file_ulimit}; then
   err_echo "ulimit file size setting failed"
 fi
 
diff --git a/test/testrunner/run_build_test_target.py b/test/testrunner/run_build_test_target.py
index b4a4ada..139d1af 100755
--- a/test/testrunner/run_build_test_target.py
+++ b/test/testrunner/run_build_test_target.py
@@ -108,7 +108,7 @@
   run_test_command = [os.path.join(env.ANDROID_BUILD_TOP,
                                    'art/test/testrunner/testrunner.py')]
   test_flags = target.get('run-test', [])
-  run_test_command += test_flags
+  run_test_command += list(map(lambda a: a.format(SOONG_OUT_DIR=env.SOONG_OUT_DIR), test_flags))
   # Let testrunner compute concurrency based on #cpus.
   # b/65822340
   # run_test_command += ['-j', str(n_threads)]
diff --git a/test/testrunner/target_config.py b/test/testrunner/target_config.py
index 077129f..bc22360 100644
--- a/test/testrunner/target_config.py
+++ b/test/testrunner/target_config.py
@@ -336,4 +336,12 @@
                      '--64',
                      '--no-build-dependencies'],
     },
+    'art-linux-bionic-x64-zipapex': {
+        'build': '{ANDROID_BUILD_TOP}/art/tools/build_linux_bionic_tests.sh {MAKE_OPTIONS} com.android.runtime.host',
+        'run-test': ['--run-test-option=--bionic',
+                     "--run-test-option='--runtime-zipapex {SOONG_OUT_DIR}/host/linux_bionic-x86/apex/com.android.runtime.host.zipapex'",
+                     '--host',
+                     '--64',
+                     '--no-build-dependencies'],
+    },
 }
diff --git a/tools/art b/tools/art
index fbc7992..eeb7c68 100644
--- a/tools/art
+++ b/tools/art
@@ -261,6 +261,7 @@
   # Run dalvikvm.
   verbose_run ANDROID_DATA="$ANDROID_DATA"                  \
               ANDROID_ROOT="$ANDROID_ROOT"                  \
+              ANDROID_RUNTIME_ROOT="$ANDROID_RUNTIME_ROOT"  \
               LD_LIBRARY_PATH="$LD_LIBRARY_PATH"            \
               PATH="$ANDROID_ROOT/bin:$PATH"                \
               LD_USE_LOAD_BIAS=1                            \
@@ -386,6 +387,7 @@
 
 PROG_DIR="$(cd "${PROG_NAME%/*}" ; pwd -P)"
 ANDROID_ROOT=$PROG_DIR/..
+ANDROID_RUNTIME_ROOT=$ANDROID_ROOT/com.android.runtime
 ART_BINARY_PATH=$ANDROID_ROOT/bin/$ART_BINARY
 
 if [ ! -x "$ART_BINARY_PATH" ]; then
diff --git a/tools/bootjars.sh b/tools/bootjars.sh
index ecd7084..9f22827 100755
--- a/tools/bootjars.sh
+++ b/tools/bootjars.sh
@@ -72,7 +72,9 @@
   # FIXME: The soong invocation we're using for getting the variables does not give us anything
   # defined in Android.common_path.mk, otherwise we would just use HOST-/TARGET_TEST_CORE_JARS.
 
-  # The core_jars_list must match the TEST_CORE_JARS variable in the Android.common_path.mk .
+  # Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
+  # because that's what we use for compiling the core.art image.
+  # It may contain additional modules from TEST_CORE_JARS.
   core_jars_list="core-oj core-libart core-simple"
   core_jars_suffix=
   if [[ $mode == target ]]; then
diff --git a/tools/build_linux_bionic.sh b/tools/build_linux_bionic.sh
new file mode 100755
index 0000000..94ccc41
--- /dev/null
+++ b/tools/build_linux_bionic.sh
@@ -0,0 +1,71 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This will build a target using linux_bionic. It can be called with normal make
+# flags.
+#
+# TODO This runs a 'm clean' prior to building the targets in order to ensure
+# that obsolete kati files don't mess up the build.
+
+if [[ -z $ANDROID_BUILD_TOP ]]; then
+  pushd .
+else
+  pushd $ANDROID_BUILD_TOP
+fi
+
+if [ ! -d art ]; then
+  echo "Script needs to be run at the root of the android tree"
+  exit 1
+fi
+
+source build/envsetup.sh >&/dev/null # for get_build_var
+# Soong needs a bunch of variables set and will not run if they are missing.
+# The default values of these variables is only contained in make, so use
+# nothing to create the variables then remove all the other artifacts.
+build/soong/soong_ui.bash --make-mode nothing
+if [ $? != 0 ]; then
+  exit 1
+fi
+
+out_dir=$(get_build_var OUT_DIR)
+host_out=$(get_build_var HOST_OUT)
+
+# TODO(b/31559095) Figure out a better way to do this.
+#
+# There is no good way to force soong to generate host-bionic builds currently
+# so this is a hacky workaround.
+tmp_soong_var=$(mktemp --tmpdir soong.variables.bak.XXXXXX)
+
+cat $out_dir/soong/soong.variables > ${tmp_soong_var}
+build/soong/soong_ui.bash --make-mode clean
+mkdir -p $out_dir/soong
+
+python3 <<END - ${tmp_soong_var} ${out_dir}/soong/soong.variables
+import json
+import sys
+x = json.load(open(sys.argv[1]))
+x['Allow_missing_dependencies'] = True
+x['HostArch'] = 'x86_64'
+x['CrossHost'] = 'linux_bionic'
+x['CrossHostArch'] = 'x86_64'
+if 'CrossHostSecondaryArch' in x:
+  del x['CrossHostSecondaryArch']
+json.dump(x, open(sys.argv[2], mode='w'))
+END
+
+rm $tmp_soong_var
+
+build/soong/soong_ui.bash --make-mode --skip-make $@
diff --git a/tools/build_linux_bionic_tests.sh b/tools/build_linux_bionic_tests.sh
index 2b178f2..c532c90 100755
--- a/tools/build_linux_bionic_tests.sh
+++ b/tools/build_linux_bionic_tests.sh
@@ -81,6 +81,7 @@
   $soong_out/bin/hiddenapi
   $soong_out/bin/hprof-conv
   $soong_out/bin/timeout_dumper
+  $(find $host_out/apex -type f | sed "s:$host_out:$soong_out:g")
   $(find $host_out/lib64 -type f | sed "s:$host_out:$soong_out:g")
   $(find $host_out/nativetest64 -type f | sed "s:$host_out:$soong_out:g"))
 
diff --git a/tools/class2greylist/src/com/android/class2greylist/AnnotatedClassContext.java b/tools/class2greylist/src/com/android/class2greylist/AnnotatedClassContext.java
new file mode 100644
index 0000000..1dd74dd
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/AnnotatedClassContext.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.class2greylist;
+
+import java.util.Formatter;
+import java.util.Locale;
+import org.apache.bcel.Const;
+import org.apache.bcel.classfile.FieldOrMethod;
+import org.apache.bcel.classfile.JavaClass;
+
+/**
+ * Encapsulates context for a single annotation on a class.
+ */
+public class AnnotatedClassContext extends AnnotationContext {
+
+    public final String signatureFormatString;
+
+    public AnnotatedClassContext(
+            Status status,
+            JavaClass definingClass,
+            String signatureFormatString) {
+        super(status, definingClass);
+        this.signatureFormatString = signatureFormatString;
+    }
+
+    @Override
+    public String getMemberDescriptor() {
+        return String.format(Locale.US, signatureFormatString, getClassDescriptor());
+    }
+
+    @Override
+    public void reportError(String message, Object... args) {
+        Formatter error = new Formatter();
+        error
+            .format("%s: %s: ", definingClass.getSourceFileName(), definingClass.getClassName())
+            .format(Locale.US, message, args);
+
+        status.error(error.toString());
+    }
+
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/AnnotatedMemberContext.java b/tools/class2greylist/src/com/android/class2greylist/AnnotatedMemberContext.java
new file mode 100644
index 0000000..4802788
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/AnnotatedMemberContext.java
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.class2greylist;
+
+import java.util.Formatter;
+import org.apache.bcel.Const;
+import org.apache.bcel.classfile.FieldOrMethod;
+import org.apache.bcel.classfile.JavaClass;
+
+import java.util.Locale;
+
+/**
+ * Encapsulates context for a single annotation on a class member.
+ */
+public class AnnotatedMemberContext extends AnnotationContext {
+
+    public final FieldOrMethod member;
+    public final String signatureFormatString;
+
+    public AnnotatedMemberContext(
+        Status status,
+        JavaClass definingClass,
+        FieldOrMethod member,
+        String signatureFormatString) {
+        super(status, definingClass);
+        this.member = member;
+        this.signatureFormatString = signatureFormatString;
+    }
+
+    @Override
+    public String getMemberDescriptor() {
+        return String.format(Locale.US, signatureFormatString,
+            getClassDescriptor(), member.getName(), member.getSignature());
+    }
+
+    @Override
+    public void reportError(String message, Object... args) {
+        Formatter error = new Formatter();
+        error
+            .format("%s: %s.%s: ", definingClass.getSourceFileName(),
+                definingClass.getClassName(), member.getName())
+            .format(Locale.US, message, args);
+
+        status.error(error.toString());
+    }
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/AnnotationContext.java b/tools/class2greylist/src/com/android/class2greylist/AnnotationContext.java
index eb54a33..73b74a9 100644
--- a/tools/class2greylist/src/com/android/class2greylist/AnnotationContext.java
+++ b/tools/class2greylist/src/com/android/class2greylist/AnnotationContext.java
@@ -1,66 +1,51 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package com.android.class2greylist;
 
 import org.apache.bcel.Const;
-import org.apache.bcel.classfile.FieldOrMethod;
 import org.apache.bcel.classfile.JavaClass;
 
-import java.util.Locale;
-
 /**
- * Encapsulates context for a single annotation on a class member.
  */
-public class AnnotationContext {
+public abstract class AnnotationContext {
 
-    public final Status status;
-    public final FieldOrMethod member;
-    public final JavaClass definingClass;
-    public final String signatureFormatString;
+  public final Status status;
+  public final JavaClass definingClass;
 
-    public AnnotationContext(
-            Status status,
-            FieldOrMethod member,
-            JavaClass definingClass,
-            String signatureFormatString) {
-        this.status = status;
-        this.member = member;
-        this.definingClass = definingClass;
-        this.signatureFormatString = signatureFormatString;
-    }
+  public AnnotationContext(Status status, JavaClass definingClass) {
+    this.status = status;
+    this.definingClass = definingClass;
+  }
 
-    /**
-     * @return the full descriptor of enclosing class.
-     */
-    public String getClassDescriptor() {
-        // JavaClass.getName() returns the Java-style name (with . not /), so we must fetch
-        // the original class name from the constant pool.
-        return definingClass.getConstantPool().getConstantString(
-                definingClass.getClassNameIndex(), Const.CONSTANT_Class);
-    }
+  public String getClassDescriptor() {
+      // JavaClass.getName() returns the Java-style name (with . not /), so we must fetch
+      // the original class name from the constant pool.
+      return definingClass.getConstantPool().getConstantString(
+              definingClass.getClassNameIndex(), Const.CONSTANT_Class);
+  }
 
-    /**
-     * @return the full descriptor of this member, in the format expected in
-     * the greylist.
-     */
-    public String getMemberDescriptor() {
-        return String.format(Locale.US, signatureFormatString,
-                getClassDescriptor(), member.getName(), member.getSignature());
-    }
+  /**
+   * @return the full descriptor of this member, in the format expected in
+   * the greylist.
+   */
+  public abstract String getMemberDescriptor();
 
-    /**
-     * Report an error in this context. The final error message will include
-     * the class and member names, and the source file name.
-     */
-    public void reportError(String message, Object... args) {
-        StringBuilder error = new StringBuilder();
-        error.append(definingClass.getSourceFileName())
-                .append(": ")
-                .append(definingClass.getClassName())
-                .append(".")
-                .append(member.getName())
-                .append(": ")
-                .append(String.format(Locale.US, message, args));
-
-        status.error(error.toString());
-    }
-
+  /**
+   * Report an error in this context. The final error message will include
+   * the class and member names, and the source file name.
+   */
+  public abstract void reportError(String message, Object... args);
 }
diff --git a/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java b/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
index 57ccbdc..3a58cf1 100644
--- a/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
+++ b/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
@@ -55,6 +55,10 @@
 
     public void visit() {
         mStatus.debug("Visit class %s", mClass.getClassName());
+        AnnotationContext context = new AnnotatedClassContext(mStatus, mClass, "L%s;");
+        AnnotationEntry[] annotationEntries = mClass.getAnnotationEntries();
+        handleAnnotations(context, annotationEntries);
+
         mDescendingVisitor.visit();
     }
 
@@ -70,9 +74,15 @@
 
     private void visitMember(FieldOrMethod member, String signatureFormatString) {
         mStatus.debug("Visit member %s : %s", member.getName(), member.getSignature());
-        AnnotationContext context = new AnnotationContext(mStatus, member,
-                (JavaClass) mDescendingVisitor.predecessor(), signatureFormatString);
-        for (AnnotationEntry a : member.getAnnotationEntries()) {
+        AnnotationContext context = new AnnotatedMemberContext(mStatus,
+            (JavaClass) mDescendingVisitor.predecessor(), member,
+            signatureFormatString);
+        AnnotationEntry[] annotationEntries = member.getAnnotationEntries();
+        handleAnnotations(context, annotationEntries);
+    }
+
+    private void handleAnnotations(AnnotationContext context, AnnotationEntry[] annotationEntries) {
+        for (AnnotationEntry a : annotationEntries) {
             if (mAnnotationHandlers.containsKey(a.getAnnotationType())) {
                 mStatus.debug("Member has annotation %s for which we have a handler",
                         a.getAnnotationType());
diff --git a/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java b/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
index 621ee11..1da848b 100644
--- a/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
+++ b/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
@@ -17,7 +17,6 @@
 package com.android.class2greylist;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Joiner;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableMap.Builder;
 import com.google.common.collect.ImmutableSet;
@@ -35,16 +34,10 @@
 import java.io.File;
 import java.io.IOException;
 import java.nio.charset.Charset;
-import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
 import java.util.Map;
-import java.util.HashMap;
 import java.util.Set;
-import java.util.function.Predicate;
 
 /**
  * Build time tool for extracting a list of members from jar files that have the @UsedByApps
@@ -54,21 +47,22 @@
 
     private static final Set<String> GREYLIST_ANNOTATIONS =
             ImmutableSet.of(
-                    "Landroid/annotation/UnsupportedAppUsage;",
-                    "Ldalvik/annotation/compat/UnsupportedAppUsage;");
+                    "android.annotation.UnsupportedAppUsage",
+                    "dalvik.annotation.compat.UnsupportedAppUsage");
     private static final Set<String> WHITELIST_ANNOTATIONS = ImmutableSet.of();
 
     public static final String FLAG_WHITELIST = "whitelist";
     public static final String FLAG_GREYLIST = "greylist";
     public static final String FLAG_BLACKLIST = "blacklist";
     public static final String FLAG_GREYLIST_MAX_O = "greylist-max-o";
+    public static final String FLAG_GREYLIST_MAX_P = "greylist-max-p";
 
     private static final Map<Integer, String> TARGET_SDK_TO_LIST_MAP;
     static {
         Map<Integer, String> map = new HashMap<>();
         map.put(null, FLAG_GREYLIST);
         map.put(26, FLAG_GREYLIST_MAX_O);
-        map.put(28, FLAG_GREYLIST);
+        map.put(28, FLAG_GREYLIST_MAX_P);
         TARGET_SDK_TO_LIST_MAP = Collections.unmodifiableMap(map);
     }
 
@@ -191,13 +185,41 @@
         UnsupportedAppUsageAnnotationHandler greylistAnnotationHandler =
                 new UnsupportedAppUsageAnnotationHandler(
                     mStatus, mOutput, mPublicApis, TARGET_SDK_TO_LIST_MAP);
-        GREYLIST_ANNOTATIONS.forEach(a -> builder.put(a, greylistAnnotationHandler));
+        GREYLIST_ANNOTATIONS
+            .forEach(a -> addRepeatedAnnotationHandlers(
+                builder,
+                classNameToSignature(a),
+                classNameToSignature(a + "$Container"),
+                greylistAnnotationHandler));
+
+        CovariantReturnTypeHandler covariantReturnTypeHandler = new CovariantReturnTypeHandler(
+            mOutput, mPublicApis, FLAG_WHITELIST);
+
+        return addRepeatedAnnotationHandlers(builder, CovariantReturnTypeHandler.ANNOTATION_NAME,
+            CovariantReturnTypeHandler.REPEATED_ANNOTATION_NAME, covariantReturnTypeHandler)
+            .build();
+    }
+
+    private String classNameToSignature(String a) {
+        return "L" + a.replace('.', '/') + ";";
+    }
+
+    /**
+     * Add a handler for an annotation as well as an handler for the container annotation that is
+     * used when the annotation is repeated.
+     *
+     * @param builder the builder for the map to which the handlers will be added.
+     * @param annotationName the name of the annotation.
+     * @param containerAnnotationName the name of the annotation container.
+     * @param handler the handler for the annotation.
+     */
+    private static Builder<String, AnnotationHandler> addRepeatedAnnotationHandlers(
+        Builder<String, AnnotationHandler> builder,
+        String annotationName, String containerAnnotationName,
+        AnnotationHandler handler) {
         return builder
-                .put(CovariantReturnTypeHandler.ANNOTATION_NAME,
-                        new CovariantReturnTypeHandler(mOutput, mPublicApis, FLAG_WHITELIST))
-                .put(CovariantReturnTypeMultiHandler.ANNOTATION_NAME,
-                        new CovariantReturnTypeMultiHandler(mOutput, mPublicApis, FLAG_WHITELIST))
-                .build();
+            .put(annotationName, handler)
+            .put(containerAnnotationName, new RepeatedAnnotationHandler(annotationName, handler));
     }
 
     private void main() throws IOException {
diff --git a/tools/class2greylist/src/com/android/class2greylist/CovariantReturnTypeHandler.java b/tools/class2greylist/src/com/android/class2greylist/CovariantReturnTypeHandler.java
index b8de7e9..eb2e42d 100644
--- a/tools/class2greylist/src/com/android/class2greylist/CovariantReturnTypeHandler.java
+++ b/tools/class2greylist/src/com/android/class2greylist/CovariantReturnTypeHandler.java
@@ -4,15 +4,10 @@
 import com.google.common.collect.ImmutableSet;
 
 import org.apache.bcel.classfile.AnnotationEntry;
-import org.apache.bcel.classfile.Constant;
-import org.apache.bcel.classfile.ConstantPool;
-import org.apache.bcel.classfile.ElementValue;
 import org.apache.bcel.classfile.ElementValuePair;
 import org.apache.bcel.classfile.Method;
 
-import java.util.List;
 import java.util.Locale;
-import java.util.Map;
 import java.util.Set;
 
 /**
@@ -30,6 +25,8 @@
 
     private static final String SHORT_NAME = "CovariantReturnType";
     public static final String ANNOTATION_NAME = "Ldalvik/annotation/codegen/CovariantReturnType;";
+    public static final String REPEATED_ANNOTATION_NAME =
+        "Ldalvik/annotation/codegen/CovariantReturnType$CovariantReturnTypes;";
 
     private static final String RETURN_TYPE = "returnType";
 
@@ -46,6 +43,13 @@
 
     @Override
     public void handleAnnotation(AnnotationEntry annotation, AnnotationContext context) {
+        if (context instanceof AnnotatedClassContext) {
+            return;
+        }
+        handleAnnotation(annotation, (AnnotatedMemberContext) context);
+    }
+
+    private void handleAnnotation(AnnotationEntry annotation, AnnotatedMemberContext context) {
         // Verify that the annotation has been applied to what we expect, and
         // has the right form. Note, this should not strictly be necessary, as
         // the annotation has a target of just 'method' and the property
diff --git a/tools/class2greylist/src/com/android/class2greylist/CovariantReturnTypeMultiHandler.java b/tools/class2greylist/src/com/android/class2greylist/CovariantReturnTypeMultiHandler.java
deleted file mode 100644
index f2bc525..0000000
--- a/tools/class2greylist/src/com/android/class2greylist/CovariantReturnTypeMultiHandler.java
+++ /dev/null
@@ -1,73 +0,0 @@
-package com.android.class2greylist;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-
-import org.apache.bcel.classfile.AnnotationElementValue;
-import org.apache.bcel.classfile.AnnotationEntry;
-import org.apache.bcel.classfile.ArrayElementValue;
-import org.apache.bcel.classfile.ElementValue;
-import org.apache.bcel.classfile.ElementValuePair;
-
-import java.util.Set;
-
-/**
- * Handles {@code CovariantReturnType$CovariantReturnTypes} annotations, which
- * are generated by the compiler when multiple {@code CovariantReturnType}
- * annotations appear on a single method.
- *
- * <p>The enclosed annotations are passed to {@link CovariantReturnTypeHandler}.
- */
-public class CovariantReturnTypeMultiHandler extends AnnotationHandler {
-
-    public static final String ANNOTATION_NAME =
-            "Ldalvik/annotation/codegen/CovariantReturnType$CovariantReturnTypes;";
-
-    private static final String VALUE = "value";
-
-    private final CovariantReturnTypeHandler mWrappedHandler;
-    private final String mInnerAnnotationName;
-
-    public CovariantReturnTypeMultiHandler(AnnotationConsumer consumer, Set<String> publicApis,
-            String hiddenapiFlag) {
-        this(consumer, publicApis, hiddenapiFlag, CovariantReturnTypeHandler.ANNOTATION_NAME);
-    }
-
-    @VisibleForTesting
-    public CovariantReturnTypeMultiHandler(AnnotationConsumer consumer, Set<String> publicApis,
-            String hiddenapiFlag, String innerAnnotationName) {
-        mWrappedHandler = new CovariantReturnTypeHandler(consumer, publicApis, hiddenapiFlag);
-        mInnerAnnotationName = innerAnnotationName;
-    }
-
-    @Override
-    public void handleAnnotation(AnnotationEntry annotation, AnnotationContext context) {
-        // Verify that the annotation has the form we expect
-        ElementValuePair value = findValue(annotation);
-        if (value == null) {
-            context.reportError("No value found on CovariantReturnType$CovariantReturnTypes");
-            return;
-        }
-        Preconditions.checkArgument(value.getValue() instanceof ArrayElementValue);
-        ArrayElementValue array = (ArrayElementValue) value.getValue();
-
-        // call wrapped handler on each enclosed annotation:
-        for (ElementValue v : array.getElementValuesArray()) {
-            Preconditions.checkArgument(v instanceof AnnotationElementValue);
-            AnnotationElementValue aev = (AnnotationElementValue) v;
-            Preconditions.checkArgument(
-                    aev.getAnnotationEntry().getAnnotationType().equals(mInnerAnnotationName));
-            mWrappedHandler.handleAnnotation(aev.getAnnotationEntry(), context);
-        }
-    }
-
-    private ElementValuePair findValue(AnnotationEntry a) {
-        for (ElementValuePair property : a.getElementValuePairs()) {
-            if (property.getNameString().equals(VALUE)) {
-                return property;
-            }
-        }
-        // not found
-        return null;
-    }
-}
diff --git a/tools/class2greylist/src/com/android/class2greylist/MemberDumpingVisitor.java b/tools/class2greylist/src/com/android/class2greylist/MemberDumpingVisitor.java
index 6677a3f..89c8bd7 100644
--- a/tools/class2greylist/src/com/android/class2greylist/MemberDumpingVisitor.java
+++ b/tools/class2greylist/src/com/android/class2greylist/MemberDumpingVisitor.java
@@ -40,8 +40,9 @@
     }
 
     private void visitMember(FieldOrMethod member, String signatureFormatString) {
-        AnnotationContext context = new AnnotationContext(mStatus, member,
-                (JavaClass) mDescendingVisitor.predecessor(), signatureFormatString);
+        AnnotationContext context = new AnnotatedMemberContext(mStatus,
+            (JavaClass) mDescendingVisitor.predecessor(), member,
+            signatureFormatString);
         System.out.println(context.getMemberDescriptor());
     }
 }
diff --git a/tools/class2greylist/src/com/android/class2greylist/RepeatedAnnotationHandler.java b/tools/class2greylist/src/com/android/class2greylist/RepeatedAnnotationHandler.java
new file mode 100644
index 0000000..61949e3
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/RepeatedAnnotationHandler.java
@@ -0,0 +1,57 @@
+package com.android.class2greylist;
+
+import com.google.common.base.Preconditions;
+import org.apache.bcel.classfile.AnnotationElementValue;
+import org.apache.bcel.classfile.AnnotationEntry;
+import org.apache.bcel.classfile.ArrayElementValue;
+import org.apache.bcel.classfile.ElementValue;
+import org.apache.bcel.classfile.ElementValuePair;
+
+/**
+ * Handles a repeated annotation container.
+ *
+ * <p>The enclosed annotations are passed to the {@link #mWrappedHandler}.
+ */
+public class RepeatedAnnotationHandler extends AnnotationHandler {
+
+    private static final String VALUE = "value";
+
+    private final AnnotationHandler mWrappedHandler;
+    private final String mInnerAnnotationName;
+
+    RepeatedAnnotationHandler(String innerAnnotationName, AnnotationHandler wrappedHandler) {
+        mWrappedHandler = wrappedHandler;
+        mInnerAnnotationName = innerAnnotationName;
+    }
+
+    @Override
+    public void handleAnnotation(AnnotationEntry annotation, AnnotationContext context) {
+        // Verify that the annotation has the form we expect
+        ElementValuePair value = findValue(annotation);
+        if (value == null) {
+            context.reportError("No value found on %s", annotation.getAnnotationType());
+            return;
+        }
+        Preconditions.checkArgument(value.getValue() instanceof ArrayElementValue);
+        ArrayElementValue array = (ArrayElementValue) value.getValue();
+
+        // call wrapped handler on each enclosed annotation:
+        for (ElementValue v : array.getElementValuesArray()) {
+            Preconditions.checkArgument(v instanceof AnnotationElementValue);
+            AnnotationElementValue aev = (AnnotationElementValue) v;
+            Preconditions.checkArgument(
+                    aev.getAnnotationEntry().getAnnotationType().equals(mInnerAnnotationName));
+            mWrappedHandler.handleAnnotation(aev.getAnnotationEntry(), context);
+        }
+    }
+
+    private ElementValuePair findValue(AnnotationEntry a) {
+        for (ElementValuePair property : a.getElementValuePairs()) {
+            if (property.getNameString().equals(VALUE)) {
+                return property;
+            }
+        }
+        // not found
+        return null;
+    }
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/UnsupportedAppUsageAnnotationHandler.java b/tools/class2greylist/src/com/android/class2greylist/UnsupportedAppUsageAnnotationHandler.java
index d1f3e77..b45e1b3 100644
--- a/tools/class2greylist/src/com/android/class2greylist/UnsupportedAppUsageAnnotationHandler.java
+++ b/tools/class2greylist/src/com/android/class2greylist/UnsupportedAppUsageAnnotationHandler.java
@@ -1,7 +1,6 @@
 package com.android.class2greylist;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Joiner;
 import com.google.common.collect.ImmutableSet;
 
 import org.apache.bcel.Const;
@@ -20,11 +19,11 @@
  * Processes {@code UnsupportedAppUsage} annotations to generate greylist
  * entries.
  *
- * Any annotations with a {@link #EXPECTED_SIGNATURE} property will have their
+ * Any annotations with a {@link #EXPECTED_SIGNATURE_PROPERTY} property will have their
  * generated signature verified against this, and an error will be reported if
  * it does not match. Exclusions are made for bridge methods.
  *
- * Any {@link #MAX_TARGET_SDK} properties will be validated against the given
+ * Any {@link #MAX_TARGET_SDK_PROPERTY} properties will be validated against the given
  * set of valid values, then passed through to the greylist consumer.
  */
 public class UnsupportedAppUsageAnnotationHandler extends AnnotationHandler {
@@ -32,6 +31,7 @@
     // properties of greylist annotations:
     private static final String EXPECTED_SIGNATURE_PROPERTY = "expectedSignature";
     private static final String MAX_TARGET_SDK_PROPERTY = "maxTargetSdk";
+    private static final String IMPLICIT_MEMBER_PROPERTY = "implicitMember";
 
     private final Status mStatus;
     private final Predicate<ClassMember> mClassMemberFilter;
@@ -80,16 +80,20 @@
 
     @Override
     public void handleAnnotation(AnnotationEntry annotation, AnnotationContext context) {
-        FieldOrMethod member = context.member;
-
-        boolean isBridgeMethod = (member instanceof Method) &&
+        boolean isBridgeMethod = false;
+        if (context instanceof AnnotatedMemberContext) {
+            AnnotatedMemberContext memberContext = (AnnotatedMemberContext) context;
+            FieldOrMethod member = memberContext.member;
+            isBridgeMethod = (member instanceof Method) &&
                 (member.getAccessFlags() & Const.ACC_BRIDGE) != 0;
-        if (isBridgeMethod) {
-            mStatus.debug("Member is a bridge method");
+            if (isBridgeMethod) {
+                mStatus.debug("Member is a bridge method");
+            }
         }
 
         String signature = context.getMemberDescriptor();
         Integer maxTargetSdk = null;
+        String implicitMemberSignature = null;
 
         for (ElementValuePair property : annotation.getElementValuePairs()) {
             switch (property.getNameString()) {
@@ -113,9 +117,30 @@
 
                     maxTargetSdk = ((SimpleElementValue) property.getValue()).getValueInt();
                     break;
+                case IMPLICIT_MEMBER_PROPERTY:
+                    implicitMemberSignature = property.getValue().stringifyValue();
+                    if (context instanceof AnnotatedClassContext) {
+                        signature = String.format("L%s;->%s",
+                            context.getClassDescriptor(), implicitMemberSignature);
+                    } else {
+                        context.reportError(
+                            "Expected annotation with an %s property to be on a class but is on %s",
+                            IMPLICIT_MEMBER_PROPERTY,
+                            signature);
+                        return;
+                    }
+                    break;
             }
         }
 
+        if (context instanceof AnnotatedClassContext && implicitMemberSignature == null) {
+            context.reportError(
+                "Missing property %s on annotation on class %s",
+                IMPLICIT_MEMBER_PROPERTY,
+                signature);
+            return;
+        }
+
         // Verify that maxTargetSdk is valid.
         if (!mSdkVersionToFlagMap.containsKey(maxTargetSdk)) {
             context.reportError("Invalid value for %s: got %d, expected one of [%s]",
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeHandlerTest.java b/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeHandlerTest.java
index 9d2f014..9f924b2 100644
--- a/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeHandlerTest.java
+++ b/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeHandlerTest.java
@@ -67,7 +67,7 @@
                 "  @Annotation(returnType=Integer.class)",
                 "  public String method() {return null;}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION,
@@ -91,7 +91,7 @@
                 "  @Annotation(returnType=Integer.class)",
                 "  public String method() {return null;}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION,
@@ -113,7 +113,7 @@
                 "  @Annotation(returnType=Integer.class)",
                 "  public String method() {return null;}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION,
@@ -138,7 +138,7 @@
                 "  @Annotation(returnType=Integer.class)",
                 "  public String field;",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION,
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeMultiHandlerTest.java b/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeMultiHandlerTest.java
deleted file mode 100644
index 1202564..0000000
--- a/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeMultiHandlerTest.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.android.class2greylist;
-
-import static com.google.common.truth.Truth.assertThat;
-
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.Mockito.atLeastOnce;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-import static java.util.Collections.emptySet;
-
-import com.google.common.base.Joiner;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-
-import java.io.IOException;
-import java.util.Map;
-
-public class CovariantReturnTypeMultiHandlerTest extends AnnotationHandlerTestBase {
-
-    private static final String FLAG = "test-flag";
-
-    @Before
-    public void setup() throws IOException {
-        // To keep the test simpler and more concise, we don't use the real
-        // @CovariantReturnType annotation here, but use our own @Annotation
-        // and @Annotation.Multi that have the same semantics. It doesn't have
-        // to match the real annotation, just have the same properties
-        // (returnType and value).
-        mJavac.addSource("annotation.Annotation", Joiner.on('\n').join(
-                "package annotation;",
-                "import static java.lang.annotation.RetentionPolicy.CLASS;",
-                "import java.lang.annotation.Repeatable;",
-                "import java.lang.annotation.Retention;",
-                "@Repeatable(Annotation.Multi.class)",
-                "@Retention(CLASS)",
-                "public @interface Annotation {",
-                "  Class<?> returnType();",
-                "  @Retention(CLASS)",
-                "  @interface Multi {",
-                "    Annotation[] value();",
-                "  }",
-                "}"));
-    }
-
-    @Test
-    public void testReturnTypeMulti() throws IOException {
-        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
-                "package a.b;",
-                "import annotation.Annotation;",
-                "public class Class {",
-                "  @Annotation(returnType=Integer.class)",
-                "  @Annotation(returnType=Long.class)",
-                "  public String method() {return null;}",
-                "}"));
-        assertThat(mJavac.compile()).isTrue();
-
-        Map<String, AnnotationHandler> handlerMap =
-                ImmutableMap.of("Lannotation/Annotation$Multi;",
-                        new CovariantReturnTypeMultiHandler(
-                                mConsumer,
-                                ImmutableSet.of("La/b/Class;->method()Ljava/lang/String;"),
-                                FLAG,
-                                "Lannotation/Annotation;"));
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
-
-        assertNoErrors();
-        ArgumentCaptor<String> whitelist = ArgumentCaptor.forClass(String.class);
-        verify(mConsumer, times(2)).consume(whitelist.capture(), any(),
-                eq(ImmutableSet.of(FLAG)));
-        assertThat(whitelist.getAllValues()).containsExactly(
-                "La/b/Class;->method()Ljava/lang/Integer;",
-                "La/b/Class;->method()Ljava/lang/Long;");
-    }
-
-    @Test
-    public void testReturnTypeMultiNotPublicApi() throws IOException {
-        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
-                "package a.b;",
-                "import annotation.Annotation;",
-                "public class Class {",
-                "  @Annotation(returnType=Integer.class)",
-                "  @Annotation(returnType=Long.class)",
-                "  public String method() {return null;}",
-                "}"));
-        assertThat(mJavac.compile()).isTrue();
-
-        Map<String, AnnotationHandler> handlerMap =
-                ImmutableMap.of("Lannotation/Annotation$Multi;",
-                        new CovariantReturnTypeMultiHandler(
-                                mConsumer,
-                                emptySet(),
-                                FLAG,
-                                "Lannotation/Annotation;"));
-        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
-
-        verify(mStatus, atLeastOnce()).error(any(), any());
-    }
-}
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/RepeatedAnnotationHandlerTest.java b/tools/class2greylist/test/src/com/android/class2greylist/RepeatedAnnotationHandlerTest.java
new file mode 100644
index 0000000..f2f70ee
--- /dev/null
+++ b/tools/class2greylist/test/src/com/android/class2greylist/RepeatedAnnotationHandlerTest.java
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.class2greylist;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableMap;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import org.apache.bcel.classfile.AnnotationEntry;
+import org.junit.Before;
+import org.junit.Test;
+
+public class RepeatedAnnotationHandlerTest extends AnnotationHandlerTestBase {
+
+    @Before
+    public void setup() {
+        // To keep the test simpler and more concise, we don't use a real annotation here, but use
+        // our own @Annotation and @Annotation.Multi that have the same relationship.
+        mJavac.addSource("annotation.Annotation", Joiner.on('\n').join(
+                "package annotation;",
+                "import static java.lang.annotation.RetentionPolicy.CLASS;",
+                "import java.lang.annotation.Repeatable;",
+                "import java.lang.annotation.Retention;",
+                "@Repeatable(Annotation.Multi.class)",
+                "@Retention(CLASS)",
+                "public @interface Annotation {",
+                "  Class<?> clazz();",
+                "  @Retention(CLASS)",
+                "  @interface Multi {",
+                "    Annotation[] value();",
+                "  }",
+                "}"));
+    }
+
+    @Test
+    public void testRepeated() throws IOException {
+        mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+                "package a.b;",
+                "import annotation.Annotation;",
+                "public class Class {",
+                "  @Annotation(clazz=Integer.class)",
+                "  @Annotation(clazz=Long.class)",
+                "  public String method() {return null;}",
+                "}"));
+        mJavac.compile();
+
+        TestAnnotationHandler handler = new TestAnnotationHandler();
+        Map<String, AnnotationHandler> handlerMap =
+            ImmutableMap.of("Lannotation/Annotation$Multi;",
+                new RepeatedAnnotationHandler("Lannotation/Annotation;", handler));
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
+
+        assertNoErrors();
+        assertThat(handler.getClasses()).containsExactly(
+                "Ljava/lang/Integer;",
+                "Ljava/lang/Long;");
+    }
+
+    private static class TestAnnotationHandler extends AnnotationHandler {
+
+        private final List<String> classes;
+
+        private TestAnnotationHandler() {
+            this.classes = new ArrayList<>();
+        }
+
+        @Override
+        void handleAnnotation(AnnotationEntry annotation,
+            AnnotationContext context) {
+            classes.add(annotation.getElementValuePairs()[0].getValue().stringifyValue());
+        }
+
+        private List<String> getClasses() {
+            return classes;
+        }
+    }
+}
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/UnsupportedAppUsageAnnotationHandlerTest.java b/tools/class2greylist/test/src/com/android/class2greylist/UnsupportedAppUsageAnnotationHandlerTest.java
index cdf01af..a6d6a16 100644
--- a/tools/class2greylist/test/src/com/android/class2greylist/UnsupportedAppUsageAnnotationHandlerTest.java
+++ b/tools/class2greylist/test/src/com/android/class2greylist/UnsupportedAppUsageAnnotationHandlerTest.java
@@ -59,10 +59,17 @@
                 "package annotation;",
                 "import static java.lang.annotation.RetentionPolicy.CLASS;",
                 "import java.lang.annotation.Retention;",
+                "import java.lang.annotation.Repeatable;",
                 "@Retention(CLASS)",
+                "@Repeatable(Anno.Container.class)",
                 "public @interface Anno {",
                 "  String expectedSignature() default \"\";",
                 "  int maxTargetSdk() default Integer.MAX_VALUE;",
+                "  String implicitMember() default \"\";",
+                "  @Retention(CLASS)",
+                "  public @interface Container {",
+                "    Anno[] value();",
+                "  }",
                 "}"));
     }
 
@@ -82,7 +89,7 @@
                 "  @Anno",
                 "  public void method() {}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
@@ -103,7 +110,7 @@
                 "  @Anno",
                 "  public Class() {}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
@@ -124,7 +131,7 @@
                 "  @Anno",
                 "  public int i;",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
@@ -137,6 +144,70 @@
     }
 
     @Test
+    public void testGreylistImplicit() throws IOException {
+        mJavac.addSource("a.b.EnumClass", Joiner.on('\n').join(
+            "package a.b;",
+            "import annotation.Anno;",
+            "@Anno(implicitMember=\"values()[La/b/EnumClass;\")",
+            "public enum EnumClass {",
+            "  VALUE",
+            "}"));
+        mJavac.compile();
+
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.EnumClass"), mStatus,
+            ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
+        ).visit();
+
+        assertNoErrors();
+        ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+        verify(mConsumer, times(1)).consume(greylist.capture(), any(), any());
+        assertThat(greylist.getValue()).isEqualTo("La/b/EnumClass;->values()[La/b/EnumClass;");
+    }
+
+    @Test
+    public void testGreylistImplicit_Invalid_MissingOnClass() throws IOException {
+        mJavac.addSource("a.b.EnumClass", Joiner.on('\n').join(
+            "package a.b;",
+            "import annotation.Anno;",
+            "@Anno",
+            "public enum EnumClass {",
+            "  VALUE",
+            "}"));
+        mJavac.compile();
+
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.EnumClass"), mStatus,
+            ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
+        ).visit();
+
+        ArgumentCaptor<String> format = ArgumentCaptor.forClass(String.class);
+        verify(mStatus, times(1)).error(format.capture(), any());
+        // Ensure that the correct error is reported.
+        assertThat(format.getValue())
+            .contains("Missing property implicitMember on annotation on class");
+    }
+
+    @Test
+    public void testGreylistImplicit_Invalid_PresentOnMember() throws IOException {
+        mJavac.addSource("a.b.EnumClass", Joiner.on('\n').join(
+            "package a.b;",
+            "import annotation.Anno;",
+            "public enum EnumClass {",
+            "  @Anno(implicitMember=\"values()[La/b/EnumClass;\")",
+            "  VALUE",
+            "}"));
+        mJavac.compile();
+
+        new AnnotationVisitor(mJavac.getCompiledClass("a.b.EnumClass"), mStatus,
+            ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
+        ).visit();
+
+        ArgumentCaptor<String> format = ArgumentCaptor.forClass(String.class);
+        verify(mStatus, times(1)).error(format.capture(), any());
+        assertThat(format.getValue())
+            .contains("Expected annotation with an implicitMember property to be on a class");
+    }
+
+    @Test
     public void testGreylistMethodExpectedSignature() throws IOException {
         mJavac.addSource("a.b.Class", Joiner.on('\n').join(
                 "package a.b;",
@@ -145,7 +216,7 @@
                 "  @Anno(expectedSignature=\"La/b/Class;->method()V\")",
                 "  public void method() {}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
@@ -166,7 +237,7 @@
                 "  @Anno(expectedSignature=\"La/b/Class;->nomethod()V\")",
                 "  public void method() {}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
@@ -186,7 +257,7 @@
                 "    public void method() {}",
                 "  }",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class$Inner"), mStatus,
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
@@ -205,7 +276,7 @@
                 "public class Class {",
                 "  public void method() {}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
@@ -224,7 +295,7 @@
                 "  @Anno(expectedSignature=\"La/b/Class;->method(Ljava/lang/String;)V\")",
                 "  public void method(T arg) {}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
@@ -252,7 +323,7 @@
                 "  @Anno(expectedSignature=\"La/b/Class;->method(Ljava/lang/String;)V\")",
                 "  public void method(T arg) {}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP));
@@ -284,7 +355,7 @@
                 "  @Anno(expectedSignature=\"La/b/Class;->method(Ljava/lang/String;)V\")",
                 "  public void method(T arg) {}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP));
@@ -320,7 +391,7 @@
                 "package a.b;",
                 "public class Class extends Base implements Interface {",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP));
@@ -354,7 +425,7 @@
                 "  @Anno",
                 "  public void method(T arg) {}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Set<String> publicApis = Sets.newHashSet(
                 "La/b/Base;->method(Ljava/lang/Object;)V",
@@ -385,7 +456,7 @@
                 "  @Anno(expectedSignature=\"La/b/Class;->field:I\")",
                 "  public volatile int field;",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(
@@ -407,7 +478,7 @@
                 "  @Anno(expectedSignature=\"La/b/Class;->wrong:I\")",
                 "  public volatile int field;",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP));
@@ -424,7 +495,7 @@
                 "  @Anno(maxTargetSdk=1)",
                 "  public int field;",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(
@@ -444,7 +515,7 @@
                 "  @Anno",
                 "  public int field;",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(
@@ -464,7 +535,7 @@
                 "  @Anno(maxTargetSdk=2)",
                 "  public int field;",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(
@@ -493,7 +564,7 @@
                 "  @Anno2(maxTargetSdk=2, trackingBug=123456789)",
                 "  public int field;",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
         new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
                 ImmutableMap.of("Lannotation/Anno2;", createGreylistHandler(x -> true,
                         ImmutableMap.of(2, "flag2")))
diff --git a/tools/class2greylist/test/src/com/android/javac/Javac.java b/tools/class2greylist/test/src/com/android/javac/Javac.java
index 202f412..94e4e49 100644
--- a/tools/class2greylist/test/src/com/android/javac/Javac.java
+++ b/tools/class2greylist/test/src/com/android/javac/Javac.java
@@ -18,6 +18,7 @@
 
 import com.google.common.io.Files;
 
+import java.util.stream.Collectors;
 import org.apache.bcel.classfile.ClassParser;
 import org.apache.bcel.classfile.JavaClass;
 
@@ -76,15 +77,24 @@
         return this;
     }
 
-    public boolean compile() {
+    public void compile() {
+        DiagnosticCollector<JavaFileObject> diagnosticCollector = new DiagnosticCollector<>();
         JavaCompiler.CompilationTask task = mJavac.getTask(
                 null,
                 mFileMan,
-                null,
+                diagnosticCollector,
                 null,
                 null,
                 mCompilationUnits);
-        return task.call();
+        boolean result = task.call();
+        if (!result) {
+            throw new IllegalStateException(
+                "Compilation failed:" +
+                    diagnosticCollector.getDiagnostics()
+                        .stream()
+                        .map(Object::toString)
+                        .collect(Collectors.joining("\n")));
+        }
     }
 
     public InputStream getClassFile(String classname) throws IOException {
diff --git a/tools/common/common.py b/tools/common/common.py
index b728e8d..6206dfb 100755
--- a/tools/common/common.py
+++ b/tools/common/common.py
@@ -299,11 +299,13 @@
       os.mkdir(arch_cache_path)
     lib = 'lib64' if x64 else 'lib'
     android_root = GetEnvVariableOrError('ANDROID_HOST_OUT')
+    android_runtime_root = android_root + '/com.android.runtime'
     library_path = android_root + '/' + lib
     path = android_root + '/bin'
     self._shell_env = os.environ.copy()
     self._shell_env['ANDROID_DATA'] = self._env_path
     self._shell_env['ANDROID_ROOT'] = android_root
+    self._shell_env['ANDROID_RUNTIME_ROOT'] = android_runtime_root
     self._shell_env['LD_LIBRARY_PATH'] = library_path
     self._shell_env['DYLD_LIBRARY_PATH'] = library_path
     self._shell_env['PATH'] = (path + ':' + self._shell_env['PATH'])
diff --git a/tools/dist_linux_bionic.sh b/tools/dist_linux_bionic.sh
new file mode 100755
index 0000000..4c7ba1c
--- /dev/null
+++ b/tools/dist_linux_bionic.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+# Builds the given targets using linux-bionic and moves the output files to the
+# DIST_DIR. Takes normal make arguments.
+
+if [[ -z $ANDROID_BUILD_TOP ]]; then
+  pushd .
+else
+  pushd $ANDROID_BUILD_TOP
+fi
+
+if [[ -z $DIST_DIR ]]; then
+  echo "DIST_DIR must be set!"
+  exit 1
+fi
+
+if [ ! -d art ]; then
+  echo "Script needs to be run at the root of the android tree"
+  exit 1
+fi
+
+source build/envsetup.sh >&/dev/null # for get_build_var
+out_dir=$(get_build_var OUT_DIR)
+
+./art/tools/build_linux_bionic.sh $@
+
+mkdir -p $DIST_DIR
+cp -R ${out_dir}/soong/host/* $DIST_DIR/
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index a5fa332..f97dd4f 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -157,7 +157,6 @@
 {
   description: "Missing resource in classpath",
   result: EXEC_FAILED,
-  modes: [device],
   names: ["libcore.java.util.prefs.OldAbstractPreferencesTest#testClear",
           "libcore.java.util.prefs.OldAbstractPreferencesTest#testExportNode",
           "libcore.java.util.prefs.OldAbstractPreferencesTest#testExportSubtree",
@@ -187,7 +186,8 @@
           "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testExportSubtree",
           "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testFlush",
           "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testSync",
-          "org.apache.harmony.tests.java.util.prefs.FilePreferencesImplTest#testPutGet"]
+          "org.apache.harmony.tests.java.util.prefs.FilePreferencesImplTest#testPutGet"],
+  bug: 120526172
 },
 {
   description: "Only work with --mode=activity",
diff --git a/tools/luci/config/cr-buildbucket.cfg b/tools/luci/config/cr-buildbucket.cfg
index 29cca39..8df8433 100644
--- a/tools/luci/config/cr-buildbucket.cfg
+++ b/tools/luci/config/cr-buildbucket.cfg
@@ -27,8 +27,6 @@
   swarming {
     hostname: "chromium-swarm.appspot.com"
     builder_defaults {
-      dimensions: "cores:8"
-      dimensions: "cpu:x86-64"
       dimensions: "pool:luci.art.ci"
       service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
       execution_timeout_secs: 10800  # 3h
diff --git a/tools/luci/config/luci-milo.cfg b/tools/luci/config/luci-milo.cfg
index ce22293..60e8404 100644
--- a/tools/luci/config/luci-milo.cfg
+++ b/tools/luci/config/luci-milo.cfg
@@ -6,6 +6,7 @@
   repo_url: "https://android.googlesource.com/platform/art"
   refs: "refs/heads/master"
   manifest_name: "REVISION"
+  include_experimental_builds: true
 
   builders {
     name: "buildbucket/luci.art.ci/angler-armv7-debug"
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index 20e5c64..c85a5ed 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -43,6 +43,23 @@
 java_lib_location="${ANDROID_HOST_OUT}/../common/obj/JAVA_LIBRARIES"
 make_target_name="apache-harmony-jdwp-tests-hostdex"
 
+function boot_classpath_arg {
+  local dir="$1"
+  local suffix="$2"
+  shift 2
+  local separator=""
+  for var
+  do
+    printf -- "${separator}${dir}/${var}${suffix}.jar";
+    separator=":"
+  done
+}
+
+# Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
+# because that's what we use for compiling the core.art image.
+# It may contain additional modules from TEST_CORE_JARS.
+BOOT_CLASSPATH_JARS="core-oj core-libart core-simple okhttp bouncycastle conscrypt"
+
 vm_args=""
 art="$android_root/bin/art"
 art_debugee="sh $android_root/bin/art"
@@ -59,6 +76,8 @@
 explicit_debug="no"
 verbose="no"
 image="-Ximage:/data/art-test/core.art"
+boot_classpath="$(boot_classpath_arg /system/framework -testdex $BOOT_CLASSPATH_JARS)"
+boot_classpath_locations=""
 with_jdwp_path=""
 agent_wrapper=""
 vm_args=""
@@ -90,6 +109,17 @@
     art_debugee="bash ${OUT_DIR-out}/host/linux-x86/bin/art"
     # We force generation of a new image to avoid build-time and run-time classpath differences.
     image="-Ximage:/system/non/existent/vogar.art"
+    # Pass the host boot classpath.
+    if [ "${ANDROID_HOST_OUT:0:${#ANDROID_BUILD_TOP}+1}" = "${ANDROID_BUILD_TOP}/" ]; then
+      framework_location="${ANDROID_HOST_OUT:${#ANDROID_BUILD_TOP}+1}/framework"
+    else
+      echo "error: ANDROID_BUILD_TOP/ is not a prefix of ANDROID_HOST_OUT"
+      echo "ANDROID_BUILD_TOP=${ANDROID_BUILD_TOP}"
+      echo "ANDROID_HOST_OUT=${ANDROID_HOST_OUT}"
+      exit
+    fi
+    boot_classpath="$(boot_classpath_arg ${ANDROID_HOST_OUT}/framework -hostdex $BOOT_CLASSPATH_JARS)"
+    boot_classpath_locations="$(boot_classpath_arg ${framework_location} -hostdex $BOOT_CLASSPATH_JARS)"
     # We do not need a device directory on host.
     device_dir=""
     # Vogar knows which VM to use on host.
@@ -104,6 +134,8 @@
     debuggee_args=""
     # No image. On the RI.
     image=""
+    boot_classpath=""
+    boot_classpath_locations=""
     # We do not need a device directory on RI.
     device_dir=""
     # Vogar knows which VM to use on RI.
@@ -305,6 +337,15 @@
 
 if [[ "$image" != "" ]]; then
   vm_args="$vm_args --vm-arg $image"
+  debuggee_args="$debuggee_args $image"
+fi
+if [[ "$boot_classpath" != "" ]]; then
+  vm_args="$vm_args --vm-arg -Xbootclasspath:${boot_classpath}"
+  debuggee_args="$debuggee_args -Xbootclasspath:${boot_classpath}"
+fi
+if [[ "$boot_classpath_locations" != "" ]]; then
+  vm_args="$vm_args --vm-arg -Xbootclasspath-locations:${boot_classpath_locations}"
+  debuggee_args="$debuggee_args -Xbootclasspath-locations:${boot_classpath_locations}"
 fi
 
 if [[ "$plugin" != "" ]]; then
@@ -363,7 +404,7 @@
       --vm-arg -Djpda.settings.waitingTime=$jdwp_test_timeout \
       --vm-arg -Djpda.settings.transportAddress=127.0.0.1:55107 \
       --vm-arg -Djpda.settings.dumpProcess="$dump_command" \
-      --vm-arg -Djpda.settings.debuggeeJavaPath="$art_debugee $plugin $image $debuggee_args" \
+      --vm-arg -Djpda.settings.debuggeeJavaPath="$art_debugee $plugin $debuggee_args" \
       --classpath "$test_jar" \
       $toolchain_args \
       $test
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index 2d39b2a..63fe81b 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -46,6 +46,22 @@
   done
 }
 
+function boot_classpath_arg {
+  local dir="$1"
+  local suffix="$2"
+  shift 2
+  printf -- "--vm-arg -Xbootclasspath"
+  for var
+  do
+    printf -- ":${dir}/${var}${suffix}.jar";
+  done
+}
+
+# Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
+# because that's what we use for compiling the core.art image.
+# It may contain additional modules from TEST_CORE_JARS.
+BOOT_CLASSPATH_JARS="core-oj core-libart core-simple okhttp bouncycastle conscrypt"
+
 DEPS="core-tests jsr166-tests mockito-target"
 
 for lib in $DEPS
@@ -110,6 +126,7 @@
   if [[ "$1" == "--mode=device" ]]; then
     device_mode=true
     vogar_args="$vogar_args --vm-arg -Ximage:/data/art-test/core.art"
+    vogar_args="$vogar_args $(boot_classpath_arg /system/framework -testdex $BOOT_CLASSPATH_JARS)"
     shift
   elif [[ "$1" == "--mode=host" ]]; then
     # We explicitly give a wrong path for the image, to ensure vogar