Merge "Refactor CompilerDriver::CompileAll()."
diff --git a/build/Android.bp b/build/Android.bp
index 09d3a18..46fb0c5 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -7,6 +7,7 @@
         "blueprint-proptools",
         "soong",
         "soong-android",
+        "soong-apex",
         "soong-cc",
     ],
     srcs: [
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index 96d3648..03e68ae 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -74,18 +74,21 @@
 TARGET_CORE_IMG_LOCATION := $(ART_TARGET_TEST_OUT)/core.art
 
 # Jar files for core.art.
-HOST_CORE_DEX_LOCATIONS   := $(foreach jar,$(HOST_CORE_JARS),  $(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar)
+TEST_CORE_JARS := core-oj core-libart core-simple conscrypt okhttp bouncycastle
+HOST_TEST_CORE_JARS   := $(addsuffix -hostdex,$(TEST_CORE_JARS))
+TARGET_TEST_CORE_JARS := $(addsuffix -testdex,$(TEST_CORE_JARS))
+HOST_CORE_DEX_LOCATIONS   := $(foreach jar,$(HOST_TEST_CORE_JARS),  $(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar)
 ifeq ($(ART_TEST_ANDROID_ROOT),)
-TARGET_CORE_DEX_LOCATIONS := $(foreach jar,$(TARGET_CORE_JARS),/$(DEXPREOPT_BOOT_JAR_DIR)/$(jar).jar)
+TARGET_CORE_DEX_LOCATIONS := $(foreach jar,$(TARGET_TEST_CORE_JARS),/$(DEXPREOPT_BOOT_JAR_DIR)/$(jar).jar)
 else
-TARGET_CORE_DEX_LOCATIONS := $(foreach jar,$(TARGET_CORE_JARS),$(ART_TEST_ANDROID_ROOT)/framework/$(jar).jar)
+TARGET_CORE_DEX_LOCATIONS := $(foreach jar,$(TARGET_TEST_CORE_JARS),$(ART_TEST_ANDROID_ROOT)/framework/$(jar).jar)
 endif
 
-HOST_CORE_DEX_FILES   := $(foreach jar,$(HOST_CORE_JARS),  $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar)
-TARGET_CORE_DEX_FILES := $(foreach jar,$(TARGET_CORE_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar)
+HOST_CORE_DEX_FILES   := $(foreach jar,$(HOST_TEST_CORE_JARS),  $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar)
+TARGET_CORE_DEX_FILES := $(foreach jar,$(TARGET_TEST_CORE_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar)
 
-ART_HOST_DEX_DEPENDENCIES := $(foreach jar,$(HOST_CORE_JARS),$(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar)
-ART_TARGET_DEX_DEPENDENCIES := $(foreach jar,$(TARGET_CORE_JARS),$(TARGET_OUT_JAVA_LIBRARIES)/$(jar).jar)
+ART_HOST_DEX_DEPENDENCIES := $(foreach jar,$(HOST_TEST_CORE_JARS),$(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar)
+ART_TARGET_DEX_DEPENDENCIES := $(foreach jar,$(TARGET_TEST_CORE_JARS),$(TARGET_OUT_JAVA_LIBRARIES)/$(jar).jar)
 
 ART_CORE_SHARED_LIBRARIES := libjavacore libopenjdk libopenjdkjvm libopenjdkjvmti
 ART_CORE_SHARED_DEBUG_LIBRARIES := libopenjdkd libopenjdkjvmd libopenjdkjvmtid
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index 17d0232..d8014bd 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -129,7 +129,7 @@
     LOCAL_DEX_PREOPT := false
     LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_test.mk $(4)
     LOCAL_MODULE_TAGS := tests
-    LOCAL_JAVA_LIBRARIES := $(TARGET_CORE_JARS)
+    LOCAL_JAVA_LIBRARIES := $(TARGET_TEST_CORE_JARS)
     LOCAL_MODULE_PATH := $(3)
     LOCAL_DEX_PREOPT_IMAGE_LOCATION := $(TARGET_CORE_IMG_OUT)
     ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),)
@@ -145,7 +145,7 @@
     LOCAL_NO_STANDARD_LIBRARIES := true
     LOCAL_DEX_PREOPT := false
     LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_test.mk $(4)
-    LOCAL_JAVA_LIBRARIES := $(HOST_CORE_JARS)
+    LOCAL_JAVA_LIBRARIES := $(HOST_TEST_CORE_JARS)
     LOCAL_DEX_PREOPT_IMAGE := $(HOST_CORE_IMG_LOCATION)
     ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),)
       LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 9a5e26b..6885946 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -454,7 +454,10 @@
     $$($(3)TARGET_OUT_SHARED_LIBRARIES)/libopenjdkd.so \
     $$(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar \
     $$(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar \
-    $$(TARGET_OUT_JAVA_LIBRARIES)/core-simple-testdex.jar
+    $$(TARGET_OUT_JAVA_LIBRARIES)/core-simple-testdex.jar \
+    $$(TARGET_OUT_JAVA_LIBRARIES)/conscrypt-testdex.jar \
+    $$(TARGET_OUT_JAVA_LIBRARIES)/okhttp-testdex.jar \
+    $$(TARGET_OUT_JAVA_LIBRARIES)/bouncycastle-testdex.jar
 
   ART_TEST_TARGET_GTEST_DEPENDENCIES += $$(gtest_deps)
 
diff --git a/build/apex/Android.bp b/build/apex/Android.bp
index 159e5c1..88178a0 100644
--- a/build/apex/Android.bp
+++ b/build/apex/Android.bp
@@ -49,10 +49,13 @@
 ]
 
 // Modules listed in LOCAL_REQUIRED_MODULES for module art-tools in art/Android.mk.
-art_tools_binaries = [
+art_tools_common_binaries = [
     "dexdiag",
     "dexdump",
     "dexlist",
+]
+
+art_tools_device_binaries = [
     "oatdump",
 ]
 
@@ -66,12 +69,21 @@
     // ...
 ]
 
+art_tools_binaries = art_tools_common_binaries + art_tools_device_binaries
+
 apex_key {
     name: "com.android.runtime.key",
     public_key: "com.android.runtime.avbpubkey",
     private_key: "com.android.runtime.pem",
 }
 
+prebuilt_etc {
+    name: "com.android.runtime.ld.config.txt",
+    src: "ld.config.txt",
+    filename: "ld.config.txt",
+    installable: false,
+}
+
 // TODO: Introduce `apex_defaults` to factor common parts of `apex`
 // module definitions below?
 
@@ -97,9 +109,8 @@
             binaries: [],
         }
     },
+    prebuilts: ["com.android.runtime.ld.config.txt"],
     key: "com.android.runtime.key",
-    // TODO: Also package a `ld.config.txt` config file (to be placed in `etc/`).
-    // ...
 }
 
 // "Debug" version of the Runtime APEX module (containing both release and
@@ -126,7 +137,42 @@
             binaries: art_tools_binaries,
         }
     },
+    prebuilts: ["com.android.runtime.ld.config.txt"],
     key: "com.android.runtime.key",
-    // TODO: Also package a `ld.config.txt` config file (to be placed in `etc/`).
-    // ...
+}
+
+// TODO: Do this better. art_apex will disable host builds when
+// HOST_PREFER_32_BIT is set. We cannot simply use com.android.runtime.debug
+// because binaries have different multilib classes and 'multilib: {}' isn't
+// supported by target: { ... }.
+// See b/120617876 for more information.
+art_apex {
+    name: "com.android.runtime.host",
+    compile_multilib: "both",
+    payload_type: "zip",
+    host_supported: true,
+    device_supported: false,
+    manifest: "manifest.json",
+    native_shared_libs: art_runtime_base_native_shared_libs
+        + art_runtime_fake_native_shared_libs
+        + art_runtime_debug_native_shared_libs,
+    multilib: {
+        both: {
+            // TODO: Add logic to create a `dalvikvm` symlink to `dalvikvm32` or `dalvikvm64`
+            // (see `symlink_preferred_arch` in art/dalvikvm/Android.bp).
+            binaries: art_runtime_base_binaries_both,
+        },
+        first: {
+            // TODO: oatdump cannot link with host linux_bionic due to not using clang ld
+            binaries: art_tools_common_binaries
+                + art_runtime_base_binaries_prefer32
+                + art_runtime_debug_binaries_prefer32,
+        }
+    },
+    key: "com.android.runtime.key",
+    target: {
+        darwin: {
+            enabled: false,
+        },
+    },
 }
diff --git a/build/apex/ld.config.txt b/build/apex/ld.config.txt
new file mode 100644
index 0000000..ac4d1eb
--- /dev/null
+++ b/build/apex/ld.config.txt
@@ -0,0 +1 @@
+# TODO: Write me.
diff --git a/build/apex/runtests.sh b/build/apex/runtests.sh
index 86cd8cb..c19c7bd 100755
--- a/build/apex/runtests.sh
+++ b/build/apex/runtests.sh
@@ -69,51 +69,6 @@
 work_dir=$(mktemp -d)
 mount_point="$work_dir/image"
 
-# Garbage collection.
-function finish {
-  # Don't fail early during cleanup.
-  set +e
-  guestunmount "$mount_point"
-  rm -rf "$work_dir"
-}
-
-trap finish EXIT
-
-# TODO: Also exercise the Release Runtime APEX (`com.android.runtime.release`).
-apex_module="com.android.runtime.debug"
-
-# Build the Android Runtime APEX package (optional).
-$build_apex_p && say "Building package" && make "$apex_module"
-
-system_apexdir="$ANDROID_PRODUCT_OUT/system/apex"
-apex_package="$system_apexdir/$apex_module.apex"
-
-say "Extracting and mounting image"
-
-# Extract the image from the Android Runtime APEX.
-image_filename="image.img"
-unzip -q "$apex_package" "$image_filename" -d "$work_dir"
-mkdir "$mount_point"
-image_file="$work_dir/$image_filename"
-
-# Check filesystems in the image.
-image_filesystems="$work_dir/image_filesystems"
-virt-filesystems -a "$image_file" >"$image_filesystems"
-# We expect a single partition (/dev/sda) in the image.
-partition="/dev/sda"
-echo "$partition" | cmp "$image_filesystems" -
-
-# Mount the image from the Android Runtime APEX.
-guestmount -a "$image_file" -m "$partition" "$mount_point"
-
-# List the contents of the mounted image (optional).
-$list_image_files_p && say "Listing image files" && ls -ld "$mount_point" && tree -ap "$mount_point"
-
-say "Running tests"
-
-# Check that the mounted image contains a manifest.
-[[ -f "$mount_point/manifest.json" ]]
-
 function check_binary {
   [[ -x "$mount_point/bin/$1" ]] || die "Cannot find binary '$1' in mounted image"
 }
@@ -136,65 +91,178 @@
     || die "Cannot find library '$1' in mounted image"
 }
 
-# Check that the mounted image contains ART base binaries.
-check_multilib_binary dalvikvm
-# TODO: Does not work yet.
-: check_binary_symlink dalvikvm
-check_binary dex2oat
-check_binary dexoptanalyzer
-check_binary profman
+function build_apex {
+  if $build_apex_p; then
+    say "Building package $1" && make "$1" || die "Cannot build $1"
+  fi
+}
 
-# Check that the mounted image contains ART tools binaries.
-check_binary dexdiag
-check_binary dexdump
-check_binary dexlist
+function check_contents {
+
+  # Check that the mounted image contains a manifest.
+  [[ -f "$mount_point/apex_manifest.json" ]] || die "no manifest"
+
+  # Check that the mounted image contains ART base binaries.
+  check_multilib_binary dalvikvm
+  # TODO: Does not work yet.
+  : check_binary_symlink dalvikvm
+  check_binary dex2oat
+  check_binary dexoptanalyzer
+  check_binary profman
+
+  # Check that the mounted image contains ART tools binaries.
+  check_binary dexdiag
+  check_binary dexdump
+  check_binary dexlist
+  # oatdump is only in device apex's due to build rules
+  # check_binary oatdump
+
+  # Check that the mounted image contains ART debug binaries.
+  check_binary dex2oatd
+  check_binary dexoptanalyzerd
+  check_binary profmand
+
+  # Check that the mounted image contains ART libraries.
+  check_library libart-compiler.so
+  check_library libart.so
+  check_library libopenjdkjvm.so
+  check_library libopenjdkjvmti.so
+  check_library libadbconnection.so
+  # TODO: Should we check for these libraries too, even if they are not explicitly
+  # listed as dependencies in the Android Runtime APEX module rule?
+  check_library libartbase.so
+  check_library libart-dexlayout.so
+  check_library libdexfile.so
+  check_library libprofile.so
+
+  # Check that the mounted image contains ART debug libraries.
+  check_library libartd-compiler.so
+  check_library libartd.so
+  check_library libopenjdkd.so
+  check_library libopenjdkjvmd.so
+  check_library libopenjdkjvmtid.so
+  check_library libadbconnectiond.so
+  # TODO: Should we check for these libraries too, even if they are not explicitly
+  # listed as dependencies in the Android Runtime APEX module rule?
+  check_library libdexfiled.so
+  check_library libartbased.so
+  check_library libartd-dexlayout.so
+  check_library libprofiled.so
+
+  # TODO: Should we check for other libraries, such as:
+  #
+  #   libbacktrace.so
+  #   libbase.so
+  #   liblog.so
+  #   libsigchain.so
+  #   libtombstoned_client.so
+  #   libunwindstack.so
+  #   libvixl.so
+  #   libvixld.so
+  #   ...
+  #
+  # ?
+}
+
+
+# *****************************************
+# * Testing for com.android.runtime.debug *
+# *****************************************
+
+# Garbage collection.
+function finish_device_debug {
+  # Don't fail early during cleanup.
+  set +e
+  guestunmount "$mount_point"
+  rm -rf "$work_dir"
+}
+
+trap finish_device_debug EXIT
+
+# TODO: Also exercise the Release Runtime APEX (`com.android.runtime.release`).
+apex_module="com.android.runtime.debug"
+
+# Build the Android Runtime APEX package (optional).
+build_apex $apex_module
+
+system_apexdir="$ANDROID_PRODUCT_OUT/system/apex"
+apex_package="$system_apexdir/$apex_module.apex"
+
+say "Extracting and mounting image"
+
+# Extract the payload from the Android Runtime APEX.
+image_filename="apex_payload.img"
+unzip -q "$apex_package" "$image_filename" -d "$work_dir"
+mkdir "$mount_point"
+image_file="$work_dir/$image_filename"
+
+# Check filesystems in the image.
+image_filesystems="$work_dir/image_filesystems"
+virt-filesystems -a "$image_file" >"$image_filesystems"
+# We expect a single partition (/dev/sda) in the image.
+partition="/dev/sda"
+echo "$partition" | cmp "$image_filesystems" -
+
+# Mount the image from the Android Runtime APEX.
+guestmount -a "$image_file" -m "$partition" "$mount_point"
+
+# List the contents of the mounted image (optional).
+$list_image_files_p && say "Listing image files" && ls -ld "$mount_point" && tree -ap "$mount_point"
+
+say "Running tests"
+
+check_contents
+
+# Check for files pulled in from device-only oatdump.
 check_binary oatdump
-
-# Check that the mounted image contains ART debug binaries.
-check_binary dex2oatd
-check_binary dexoptanalyzerd
-check_binary profmand
-
-# Check that the mounted image contains ART libraries.
-check_library libart-compiler.so
-check_library libart.so
-check_library libopenjdkjvm.so
-check_library libopenjdkjvmti.so
-check_library libadbconnection.so
-# TODO: Should we check for these libraries too, even if they are not explicitly
-# listed as dependencies in the Android Runtime APEX module rule?
-check_library libartbase.so
-check_library libart-dexlayout.so
 check_library libart-disassembler.so
-check_library libdexfile.so
-check_library libprofile.so
 
-# Check that the mounted image contains ART debug libraries.
-check_library libartd-compiler.so
-check_library libartd.so
-check_library libdexfiled.so
-check_library libopenjdkd.so
-check_library libopenjdkjvmd.so
-check_library libopenjdkjvmtid.so
-check_library libadbconnectiond.so
-# TODO: Should we check for these libraries too, even if they are not explicitly
-# listed as dependencies in the Android Runtime APEX module rule?
-check_library libartbased.so
-check_library libartd-dexlayout.so
-check_library libprofiled.so
+# Cleanup
+trap - EXIT
+guestunmount "$mount_point"
+rm -rf "$work_dir"
 
-# TODO: Should we check for other libraries, such as:
-#
-#   libbacktrace.so
-#   libbase.so
-#   liblog.so
-#   libsigchain.so
-#   libtombstoned_client.so
-#   libunwindstack.so
-#   libvixl.so
-#   libvixld.so
-#   ...
-#
-# ?
+say "$apex_module Tests passed"
+
+# ****************************************
+# * Testing for com.android.runtime.host *
+# ****************************************
+
+# Garbage collection.
+function finish_host {
+  # Don't fail early during cleanup.
+  set +e
+  rm -rf "$work_dir"
+}
+
+work_dir=$(mktemp -d)
+mount_point="$work_dir/zip"
+
+trap finish_host EXIT
+
+apex_module="com.android.runtime.host"
+
+# Build the Android Runtime APEX package (optional).
+build_apex $apex_module
+
+system_apexdir="$ANDROID_HOST_OUT/apex"
+apex_package="$system_apexdir/$apex_module.zipapex"
+
+say "Extracting payload"
+
+# Extract the payload from the Android Runtime APEX.
+image_filename="apex_payload.zip"
+unzip -q "$apex_package" "$image_filename" -d "$work_dir"
+mkdir "$mount_point"
+image_file="$work_dir/$image_filename"
+
+# Unzipping the payload
+unzip -q "$image_file" -d "$mount_point"
+
+say "Running tests"
+
+check_contents
+
+say "$apex_module Tests passed"
 
 say "Tests passed"
diff --git a/build/art.go b/build/art.go
index 1c8be0f..01848c8 100644
--- a/build/art.go
+++ b/build/art.go
@@ -16,8 +16,10 @@
 
 import (
 	"android/soong/android"
+	"android/soong/apex"
 	"android/soong/cc"
 	"fmt"
+	"log"
 	"sync"
 
 	"github.com/google/blueprint/proptools"
@@ -289,6 +291,36 @@
 	android.RegisterModuleType("libart_static_cc_defaults", libartStaticDefaultsFactory)
 	android.RegisterModuleType("art_global_defaults", artGlobalDefaultsFactory)
 	android.RegisterModuleType("art_debug_defaults", artDebugDefaultsFactory)
+
+	// TODO: This makes the module disable itself for host if HOST_PREFER_32_BIT is
+	// set. We need this because the multilib types of binaries listed in the apex
+	// rule must match the declared type. This is normally not difficult but HOST_PREFER_32_BIT
+	// changes this to 'prefer32' on all host binaries. Since HOST_PREFER_32_BIT is
+	// only used for testing we can just disable the module.
+	// See b/120617876 for more information.
+	android.RegisterModuleType("art_apex", artApexBundleFactory)
+}
+
+func artApexBundleFactory() android.Module {
+	module := apex.ApexBundleFactory()
+	android.AddLoadHook(module, func(ctx android.LoadHookContext) {
+		if envTrue(ctx, "HOST_PREFER_32_BIT") {
+			type props struct {
+				Target struct {
+					Host struct {
+						Enabled *bool
+					}
+				}
+			}
+
+			p := &props{}
+			p.Target.Host.Enabled = proptools.BoolPtr(false)
+			ctx.AppendProperties(p)
+			log.Print("Disabling host build of " + ctx.ModuleName() + " for HOST_PREFER_32_BIT=true")
+		}
+	})
+
+	return module
 }
 
 func artGlobalDefaultsFactory() android.Module {
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 97daafa..101e5c4 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -63,6 +63,12 @@
     return expected == actual;
   }
 
+  template <char Separator>
+  bool UsuallyEquals(const std::vector<std::string>& expected,
+                     const ParseStringList<Separator>& actual) {
+    return expected == static_cast<std::vector<std::string>>(actual);
+  }
+
   // Try to use memcmp to compare simple plain-old-data structs.
   //
   // This should *not* generate false positives, but it can generate false negatives.
@@ -218,8 +224,13 @@
   }
 
   EXPECT_SINGLE_PARSE_EXISTS("-Xzygote", M::Zygote);
-  EXPECT_SINGLE_PARSE_VALUE_STR("/hello/world", "-Xbootclasspath:/hello/world", M::BootClassPath);
-  EXPECT_SINGLE_PARSE_VALUE("/hello/world", "-Xbootclasspath:/hello/world", M::BootClassPath);
+  EXPECT_SINGLE_PARSE_VALUE(std::vector<std::string>({"/hello/world"}),
+                            "-Xbootclasspath:/hello/world",
+                            M::BootClassPath);
+  EXPECT_SINGLE_PARSE_VALUE(std::vector<std::string>({"/hello", "/world"}),
+                            "-Xbootclasspath:/hello:/world",
+                            M::BootClassPath);
+  EXPECT_SINGLE_PARSE_VALUE_STR("/hello/world", "-classpath /hello/world", M::ClassPath);
   EXPECT_SINGLE_PARSE_VALUE(Memory<1>(234), "-Xss234", M::StackSize);
   EXPECT_SINGLE_PARSE_VALUE(MemoryKiB(1234*MB), "-Xms1234m", M::MemoryInitialSize);
   EXPECT_SINGLE_PARSE_VALUE(true, "-XX:EnableHSpaceCompactForOOM", M::EnableHSpaceCompactForOOM);
diff --git a/compiler/debug/elf_debug_writer.cc b/compiler/debug/elf_debug_writer.cc
index 71422d4..baf8643 100644
--- a/compiler/debug/elf_debug_writer.cc
+++ b/compiler/debug/elf_debug_writer.cc
@@ -16,8 +16,9 @@
 
 #include "elf_debug_writer.h"
 
-#include <vector>
+#include <type_traits>
 #include <unordered_map>
+#include <vector>
 
 #include "base/array_ref.h"
 #include "debug/dwarf/dwarf_constants.h"
@@ -29,6 +30,7 @@
 #include "debug/elf_symtab_writer.h"
 #include "debug/method_debug_info.h"
 #include "debug/xz_utils.h"
+#include "elf.h"
 #include "linker/elf_builder.h"
 #include "linker/vector_output_stream.h"
 #include "oat.h"
@@ -36,6 +38,8 @@
 namespace art {
 namespace debug {
 
+using ElfRuntimeTypes = std::conditional<sizeof(void*) == 4, ElfTypes32, ElfTypes64>::type;
+
 template <typename ElfTypes>
 void WriteDebugInfo(linker::ElfBuilder<ElfTypes>* builder,
                     const DebugInfo& debug_info,
@@ -165,22 +169,16 @@
   }
 }
 
-template <typename ElfTypes>
-static std::vector<uint8_t> MakeElfFileForJITInternal(
+std::vector<uint8_t> MakeElfFileForJIT(
     InstructionSet isa,
     const InstructionSetFeatures* features,
     bool mini_debug_info,
-    ArrayRef<const MethodDebugInfo> method_infos) {
-  CHECK_GT(method_infos.size(), 0u);
-  uint64_t min_address = std::numeric_limits<uint64_t>::max();
-  uint64_t max_address = 0;
-  for (const MethodDebugInfo& mi : method_infos) {
-    CHECK_EQ(mi.is_code_address_text_relative, false);
-    min_address = std::min(min_address, mi.code_address);
-    max_address = std::max(max_address, mi.code_address + mi.code_size);
-  }
+    const MethodDebugInfo& method_info) {
+  using ElfTypes = ElfRuntimeTypes;
+  CHECK_EQ(sizeof(ElfTypes::Addr), static_cast<size_t>(GetInstructionSetPointerSize(isa)));
+  CHECK_EQ(method_info.is_code_address_text_relative, false);
   DebugInfo debug_info{};
-  debug_info.compiled_methods = method_infos;
+  debug_info.compiled_methods = ArrayRef<const MethodDebugInfo>(&method_info, 1);
   std::vector<uint8_t> buffer;
   buffer.reserve(KB);
   linker::VectorOutputStream out("Debug ELF file", &buffer);
@@ -188,28 +186,16 @@
       new linker::ElfBuilder<ElfTypes>(isa, features, &out));
   // No program headers since the ELF file is not linked and has no allocated sections.
   builder->Start(false /* write_program_headers */);
+  builder->GetText()->AllocateVirtualMemory(method_info.code_address, method_info.code_size);
   if (mini_debug_info) {
-    if (method_infos.size() > 1) {
-      std::vector<uint8_t> mdi = MakeMiniDebugInfo(isa,
-                                                   features,
-                                                   min_address,
-                                                   max_address - min_address,
-                                                   /* dex_section_address */ 0,
-                                                   /* dex_section_size */ 0,
-                                                   debug_info);
-      builder->WriteSection(".gnu_debugdata", &mdi);
-    } else {
-      // The compression is great help for multiple methods but it is not worth it for a
-      // single method due to the overheads so skip the compression here for performance.
-      builder->GetText()->AllocateVirtualMemory(min_address, max_address - min_address);
-      WriteDebugSymbols(builder.get(), true /* mini-debug-info */, debug_info);
-      WriteCFISection(builder.get(),
-                      debug_info.compiled_methods,
-                      dwarf::DW_DEBUG_FRAME_FORMAT,
-                      false /* write_oat_paches */);
-    }
+    // The compression is great help for multiple methods but it is not worth it for a
+    // single method due to the overheads so skip the compression here for performance.
+    WriteDebugSymbols(builder.get(), true /* mini-debug-info */, debug_info);
+    WriteCFISection(builder.get(),
+                    debug_info.compiled_methods,
+                    dwarf::DW_DEBUG_FRAME_FORMAT,
+                    false /* write_oat_paches */);
   } else {
-    builder->GetText()->AllocateVirtualMemory(min_address, max_address - min_address);
     WriteDebugInfo(builder.get(),
                    debug_info,
                    dwarf::DW_DEBUG_FRAME_FORMAT,
@@ -220,24 +206,13 @@
   return buffer;
 }
 
-std::vector<uint8_t> MakeElfFileForJIT(
-    InstructionSet isa,
-    const InstructionSetFeatures* features,
-    bool mini_debug_info,
-    ArrayRef<const MethodDebugInfo> method_infos) {
-  if (Is64BitInstructionSet(isa)) {
-    return MakeElfFileForJITInternal<ElfTypes64>(isa, features, mini_debug_info, method_infos);
-  } else {
-    return MakeElfFileForJITInternal<ElfTypes32>(isa, features, mini_debug_info, method_infos);
-  }
-}
-
-template <typename ElfTypes>
-static std::vector<uint8_t> WriteDebugElfFileForClassesInternal(
+std::vector<uint8_t> WriteDebugElfFileForClasses(
     InstructionSet isa,
     const InstructionSetFeatures* features,
     const ArrayRef<mirror::Class*>& types)
     REQUIRES_SHARED(Locks::mutator_lock_) {
+  using ElfTypes = ElfRuntimeTypes;
+  CHECK_EQ(sizeof(ElfTypes::Addr), static_cast<size_t>(GetInstructionSetPointerSize(isa)));
   std::vector<uint8_t> buffer;
   buffer.reserve(KB);
   linker::VectorOutputStream out("Debug ELF file", &buffer);
@@ -256,16 +231,6 @@
   return buffer;
 }
 
-std::vector<uint8_t> WriteDebugElfFileForClasses(InstructionSet isa,
-                                                 const InstructionSetFeatures* features,
-                                                 const ArrayRef<mirror::Class*>& types) {
-  if (Is64BitInstructionSet(isa)) {
-    return WriteDebugElfFileForClassesInternal<ElfTypes64>(isa, features, types);
-  } else {
-    return WriteDebugElfFileForClassesInternal<ElfTypes32>(isa, features, types);
-  }
-}
-
 // Explicit instantiations
 template void WriteDebugInfo<ElfTypes32>(
     linker::ElfBuilder<ElfTypes32>* builder,
diff --git a/compiler/debug/elf_debug_writer.h b/compiler/debug/elf_debug_writer.h
index e442e00..8ad0c42 100644
--- a/compiler/debug/elf_debug_writer.h
+++ b/compiler/debug/elf_debug_writer.h
@@ -54,7 +54,7 @@
     InstructionSet isa,
     const InstructionSetFeatures* features,
     bool mini_debug_info,
-    ArrayRef<const MethodDebugInfo> method_infos);
+    const MethodDebugInfo& method_info);
 
 std::vector<uint8_t> WriteDebugElfFileForClasses(
     InstructionSet isa,
diff --git a/compiler/dex/dex_to_dex_compiler.h b/compiler/dex/dex_to_dex_compiler.h
index 7536c31..7253488 100644
--- a/compiler/dex/dex_to_dex_compiler.h
+++ b/compiler/dex/dex_to_dex_compiler.h
@@ -22,6 +22,7 @@
 #include <unordered_set>
 
 #include "base/bit_vector.h"
+#include "base/mutex.h"
 #include "dex/dex_file.h"
 #include "dex/invoke_type.h"
 #include "dex/method_reference.h"
diff --git a/compiler/linker/elf_builder.h b/compiler/linker/elf_builder.h
index 81ecc17..44f3296 100644
--- a/compiler/linker/elf_builder.h
+++ b/compiler/linker/elf_builder.h
@@ -18,6 +18,7 @@
 #define ART_COMPILER_LINKER_ELF_BUILDER_H_
 
 #include <vector>
+#include <deque>
 
 #include "arch/instruction_set.h"
 #include "arch/mips/instruction_set_features_mips.h"
@@ -357,57 +358,49 @@
     }
 
     // Buffer symbol for this section.  It will be written later.
-    // If the symbol's section is null, it will be considered absolute (SHN_ABS).
-    // (we use this in JIT to reference code which is stored outside the debug ELF file)
     void Add(Elf_Word name,
              const Section* section,
              Elf_Addr addr,
              Elf_Word size,
              uint8_t binding,
              uint8_t type) {
-      Elf_Word section_index;
-      if (section != nullptr) {
-        DCHECK_LE(section->GetAddress(), addr);
-        DCHECK_LE(addr, section->GetAddress() + section->header_.sh_size);
-        section_index = section->GetSectionIndex();
-      } else {
-        section_index = static_cast<Elf_Word>(SHN_ABS);
-      }
-      Add(name, section_index, addr, size, binding, type);
-    }
-
-    // Buffer symbol for this section.  It will be written later.
-    void Add(Elf_Word name,
-             Elf_Word section_index,
-             Elf_Addr addr,
-             Elf_Word size,
-             uint8_t binding,
-             uint8_t type) {
       Elf_Sym sym = Elf_Sym();
       sym.st_name = name;
       sym.st_value = addr;
       sym.st_size = size;
       sym.st_other = 0;
-      sym.st_shndx = section_index;
       sym.st_info = (binding << 4) + (type & 0xf);
-      syms_.push_back(sym);
+      Add(sym, section);
+    }
+
+    // Buffer symbol for this section.  It will be written later.
+    void Add(Elf_Sym sym, const Section* section) {
+      DCHECK(section != nullptr);
+      DCHECK_LE(section->GetAddress(), sym.st_value);
+      DCHECK_LE(sym.st_value, section->GetAddress() + section->header_.sh_size);
+      sym.st_shndx = section->GetSectionIndex();
 
       // The sh_info file must be set to index one-past the last local symbol.
-      if (binding == STB_LOCAL) {
-        this->header_.sh_info = syms_.size();
+      if (sym.getBinding() == STB_LOCAL) {
+        DCHECK_EQ(syms_.back().getBinding(), STB_LOCAL);
+        this->header_.sh_info = syms_.size() + 1;
       }
+
+      syms_.push_back(sym);
     }
 
     Elf_Word GetCacheSize() { return syms_.size() * sizeof(Elf_Sym); }
 
     void WriteCachedSection() {
       this->Start();
-      this->WriteFully(syms_.data(), syms_.size() * sizeof(Elf_Sym));
+      for (; !syms_.empty(); syms_.pop_front()) {
+        this->WriteFully(&syms_.front(), sizeof(Elf_Sym));
+      }
       this->End();
     }
 
    private:
-    std::vector<Elf_Sym> syms_;  // Buffered/cached content of the whole section.
+    std::deque<Elf_Sym> syms_;  // Buffered/cached content of the whole section.
   };
 
   class AbiflagsSection final : public Section {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 6108522..13c8684 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -26,6 +26,7 @@
 #include "base/arena_object.h"
 #include "base/array_ref.h"
 #include "base/iteration_range.h"
+#include "base/mutex.h"
 #include "base/quasi_atomic.h"
 #include "base/stl_util.h"
 #include "base/transform_array_ref.h"
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 52335d3..92aaa19 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1469,7 +1469,7 @@
       compiler_options.GetInstructionSet(),
       compiler_options.GetInstructionSetFeatures(),
       mini_debug_info,
-      ArrayRef<const debug::MethodDebugInfo>(&info, 1));
+      info);
   MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
   AddNativeDebugInfoForJit(reinterpret_cast<const void*>(info.code_address), elf_file);
 
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index f729934..a5bba9b 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -103,6 +103,7 @@
 
 using android::base::StringAppendV;
 using android::base::StringPrintf;
+using gc::space::ImageSpace;
 
 static constexpr size_t kDefaultMinDexFilesForSwap = 2;
 static constexpr size_t kDefaultMinDexFileCumulativeSizeForSwap = 20 * MB;
@@ -964,89 +965,22 @@
   }
 
   void ExpandOatAndImageFilenames() {
-    std::string base_oat = oat_filenames_[0];
-    size_t last_oat_slash = base_oat.rfind('/');
-    if (last_oat_slash == std::string::npos) {
-      Usage("Unusable boot image oat filename %s", base_oat.c_str());
+    if (image_filenames_[0].rfind('/') == std::string::npos) {
+      Usage("Unusable boot image filename %s", image_filenames_[0].c_str());
     }
-    // We also need to honor path components that were encoded through '@'. Otherwise the loading
-    // code won't be able to find the images.
-    if (base_oat.find('@', last_oat_slash) != std::string::npos) {
-      last_oat_slash = base_oat.rfind('@');
-    }
-    base_oat = base_oat.substr(0, last_oat_slash + 1);
+    image_filenames_ = ImageSpace::ExpandMultiImageLocations(dex_locations_, image_filenames_[0]);
 
-    std::string base_img = image_filenames_[0];
-    size_t last_img_slash = base_img.rfind('/');
-    if (last_img_slash == std::string::npos) {
-      Usage("Unusable boot image filename %s", base_img.c_str());
+    if (oat_filenames_[0].rfind('/') == std::string::npos) {
+      Usage("Unusable boot image oat filename %s", oat_filenames_[0].c_str());
     }
-    // We also need to honor path components that were encoded through '@'. Otherwise the loading
-    // code won't be able to find the images.
-    if (base_img.find('@', last_img_slash) != std::string::npos) {
-      last_img_slash = base_img.rfind('@');
-    }
+    oat_filenames_ = ImageSpace::ExpandMultiImageLocations(dex_locations_, oat_filenames_[0]);
 
-    // Get the prefix, which is the primary image name (without path components). Strip the
-    // extension.
-    std::string prefix = base_img.substr(last_img_slash + 1);
-    if (prefix.rfind('.') != std::string::npos) {
-      prefix = prefix.substr(0, prefix.rfind('.'));
-    }
-    if (!prefix.empty()) {
-      prefix = prefix + "-";
-    }
-
-    base_img = base_img.substr(0, last_img_slash + 1);
-
-    std::string base_symbol_oat;
     if (!oat_unstripped_.empty()) {
-      base_symbol_oat = oat_unstripped_[0];
-      size_t last_symbol_oat_slash = base_symbol_oat.rfind('/');
-      if (last_symbol_oat_slash == std::string::npos) {
-        Usage("Unusable boot image symbol filename %s", base_symbol_oat.c_str());
+      if (oat_unstripped_[0].rfind('/') == std::string::npos) {
+        Usage("Unusable boot image symbol filename %s", oat_unstripped_[0].c_str());
       }
-      base_symbol_oat = base_symbol_oat.substr(0, last_symbol_oat_slash + 1);
+      oat_unstripped_ = ImageSpace::ExpandMultiImageLocations(dex_locations_, oat_unstripped_[0]);
     }
-
-    // Now create the other names. Use a counted loop to skip the first one.
-    for (size_t i = 1; i < dex_locations_.size(); ++i) {
-      // TODO: Make everything properly std::string.
-      std::string image_name = CreateMultiImageName(dex_locations_[i], prefix, ".art");
-      char_backing_storage_.push_front(base_img + image_name);
-      image_filenames_.push_back(char_backing_storage_.front().c_str());
-
-      std::string oat_name = CreateMultiImageName(dex_locations_[i], prefix, ".oat");
-      char_backing_storage_.push_front(base_oat + oat_name);
-      oat_filenames_.push_back(char_backing_storage_.front().c_str());
-
-      if (!base_symbol_oat.empty()) {
-        char_backing_storage_.push_front(base_symbol_oat + oat_name);
-        oat_unstripped_.push_back(char_backing_storage_.front().c_str());
-      }
-    }
-  }
-
-  // Modify the input string in the following way:
-  //   0) Assume input is /a/b/c.d
-  //   1) Strip the path  -> c.d
-  //   2) Inject prefix p -> pc.d
-  //   3) Replace suffix with s if it's "jar"  -> d == "jar" -> pc.s
-  static std::string CreateMultiImageName(std::string in,
-                                          const std::string& prefix,
-                                          const char* replace_suffix) {
-    size_t last_dex_slash = in.rfind('/');
-    if (last_dex_slash != std::string::npos) {
-      in = in.substr(last_dex_slash + 1);
-    }
-    if (!prefix.empty()) {
-      in = prefix + in;
-    }
-    if (android::base::EndsWith(in, ".jar")) {
-      in = in.substr(0, in.length() - strlen(".jar")) +
-          (replace_suffix != nullptr ? replace_suffix : "");
-    }
-    return in;
   }
 
   void InsertCompileOptions(int argc, char** argv) {
@@ -1497,11 +1431,8 @@
 
     if (IsBootImage()) {
       // If we're compiling the boot image, store the boot classpath into the Key-Value store.
-      // We need this for the multi-image case.
-      key_value_store_->Put(OatHeader::kBootClassPathKey,
-                            gc::space::ImageSpace::GetMultiImageBootClassPath(dex_locations_,
-                                                                              oat_filenames_,
-                                                                              image_filenames_));
+      // We use this when loading the boot image.
+      key_value_store_->Put(OatHeader::kBootClassPathKey, android::base::Join(dex_locations_, ':'));
     }
 
     if (!IsBootImage()) {
@@ -1513,8 +1444,7 @@
 
       if (CompilerFilter::DependsOnImageChecksum(compiler_options_->GetCompilerFilter())) {
         TimingLogger::ScopedTiming t3("Loading image checksum", timings_);
-        std::vector<gc::space::ImageSpace*> image_spaces =
-            Runtime::Current()->GetHeap()->GetBootImageSpaces();
+        std::vector<ImageSpace*> image_spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces();
         boot_image_checksum_ = image_spaces[0]->GetImageHeader().GetImageChecksum();
       } else {
         boot_image_checksum_ = 0u;
@@ -1953,7 +1883,7 @@
     if (IsImage()) {
       if (IsAppImage() && image_base_ == 0) {
         gc::Heap* const heap = Runtime::Current()->GetHeap();
-        for (gc::space::ImageSpace* image_space : heap->GetBootImageSpaces()) {
+        for (ImageSpace* image_space : heap->GetBootImageSpaces()) {
           image_base_ = std::max(image_base_, RoundUp(
               reinterpret_cast<uintptr_t>(image_space->GetImageHeader().GetOatFileEnd()),
               kPageSize));
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 97a5f24..92dd932 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -192,24 +192,12 @@
   }
 
   int Dex2Oat(const std::vector<std::string>& dex2oat_args, std::string* error_msg) {
-    Runtime* runtime = Runtime::Current();
-
-    const std::vector<gc::space::ImageSpace*>& image_spaces =
-        runtime->GetHeap()->GetBootImageSpaces();
-    if (image_spaces.empty()) {
-      *error_msg = "No image location found for Dex2Oat.";
+    std::vector<std::string> argv;
+    if (!CommonRuntimeTest::StartDex2OatCommandLine(&argv, error_msg)) {
       return false;
     }
-    std::string image_location = image_spaces[0]->GetImageLocation();
 
-    std::vector<std::string> argv;
-    argv.push_back(runtime->GetCompilerExecutable());
-
-    if (runtime->IsJavaDebuggable()) {
-      argv.push_back("--debuggable");
-    }
-    runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
-
+    Runtime* runtime = Runtime::Current();
     if (!runtime->IsVerificationEnabled()) {
       argv.push_back("--compiler-filter=assume-verified");
     }
@@ -226,11 +214,6 @@
       argv.push_back("--host");
     }
 
-    argv.push_back("--boot-image=" + image_location);
-
-    std::vector<std::string> compiler_options = runtime->GetCompilerOptions();
-    argv.insert(argv.end(), compiler_options.begin(), compiler_options.end());
-
     argv.insert(argv.end(), dex2oat_args.begin(), dex2oat_args.end());
 
     // We must set --android-root.
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index bdb5291..bd8cf5a 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -169,10 +169,11 @@
   {
     // Create a generic tmp file, to be the base of the .art and .oat temporary files.
     ScratchFile location;
-    for (int i = 0; i < static_cast<int>(class_path.size()); ++i) {
-      std::string cur_location =
-          android::base::StringPrintf("%s-%d.art", location.GetFilename().c_str(), i);
-      out_helper.image_locations.push_back(ScratchFile(cur_location));
+    std::vector<std::string> image_locations =
+        gc::space::ImageSpace::ExpandMultiImageLocations(out_helper.dex_file_locations,
+                                                         location.GetFilename() + ".art");
+    for (size_t i = 0u; i != class_path.size(); ++i) {
+      out_helper.image_locations.push_back(ScratchFile(image_locations[i]));
     }
   }
   std::vector<std::string> image_filenames;
@@ -223,10 +224,7 @@
       TimingLogger::ScopedTiming t("WriteElf", &timings);
       SafeMap<std::string, std::string> key_value_store;
       key_value_store.Put(OatHeader::kBootClassPathKey,
-                          gc::space::ImageSpace::GetMultiImageBootClassPath(
-                              out_helper.dex_file_locations,
-                              oat_filenames,
-                              image_filenames));
+                          android::base::Join(out_helper.dex_file_locations, ':'));
 
       std::vector<std::unique_ptr<ElfWriter>> elf_writers;
       std::vector<std::unique_ptr<OatWriter>> oat_writers;
@@ -440,6 +438,9 @@
   MemMap::Init();
 
   RuntimeOptions options;
+  options.emplace_back(GetClassPathOption("-Xbootclasspath:", GetLibCoreDexFileNames()), nullptr);
+  options.emplace_back(
+      GetClassPathOption("-Xbootclasspath-locations:", GetLibCoreDexLocations()), nullptr);
   std::string image("-Ximage:");
   image.append(helper.image_locations[0].GetFilename());
   options.push_back(std::make_pair(image.c_str(), static_cast<void*>(nullptr)));
diff --git a/dexoptanalyzer/dexoptanalyzer_test.cc b/dexoptanalyzer/dexoptanalyzer_test.cc
index b9116f0..f6fd1fb 100644
--- a/dexoptanalyzer/dexoptanalyzer_test.cc
+++ b/dexoptanalyzer/dexoptanalyzer_test.cc
@@ -46,6 +46,10 @@
     if (assume_profile_changed) {
       argv_str.push_back("--assume-profile-changed");
     }
+    argv_str.push_back("--runtime-arg");
+    argv_str.push_back(GetClassPathOption("-Xbootclasspath:", GetLibCoreDexFileNames()));
+    argv_str.push_back("--runtime-arg");
+    argv_str.push_back(GetClassPathOption("-Xbootclasspath-locations:", GetLibCoreDexLocations()));
     argv_str.push_back("--image=" + GetImageLocation());
     argv_str.push_back("--android-data=" + android_data_);
 
diff --git a/imgdiag/imgdiag_test.cc b/imgdiag/imgdiag_test.cc
index 73df2a2..739d9b8 100644
--- a/imgdiag/imgdiag_test.cc
+++ b/imgdiag/imgdiag_test.cc
@@ -34,12 +34,8 @@
 
 namespace art {
 
-static const char* kImgDiagDiffPid = "--image-diff-pid";
-static const char* kImgDiagBootImage = "--boot-image";
 static const char* kImgDiagBinaryName = "imgdiag";
 
-static const char* kImgDiagZygoteDiffPid = "--zygote-diff-pid";
-
 // from kernel <include/linux/threads.h>
 #define PID_MAX_LIMIT (4*1024*1024)  // Upper bound. Most kernel configs will have smaller max pid.
 
@@ -93,25 +89,15 @@
     EXPECT_TRUE(OS::FileExists(file_path.c_str())) << file_path << " should be a valid file path";
 
     // Run imgdiag --image-diff-pid=$image_diff_pid and wait until it's done with a 0 exit code.
-    std::string diff_pid_args;
-    std::string zygote_diff_pid_args;
-    {
-      std::stringstream diff_pid_args_ss;
-      diff_pid_args_ss << kImgDiagDiffPid << "=" << image_diff_pid;
-      diff_pid_args = diff_pid_args_ss.str();
-    }
-    {
-      std::stringstream zygote_pid_args_ss;
-      zygote_pid_args_ss << kImgDiagZygoteDiffPid << "=" << image_diff_pid;
-      zygote_diff_pid_args = zygote_pid_args_ss.str();
-    }
-    std::string boot_image_args = std::string(kImgDiagBootImage) + "=" + boot_image;
-
     std::vector<std::string> exec_argv = {
         file_path,
-        diff_pid_args,
-        zygote_diff_pid_args,
-        boot_image_args
+        "--image-diff-pid=" + std::to_string(image_diff_pid),
+        "--zygote-diff-pid=" + std::to_string(image_diff_pid),
+        "--runtime-arg",
+        GetClassPathOption("-Xbootclasspath:", GetLibCoreDexFileNames()),
+        "--runtime-arg",
+        GetClassPathOption("-Xbootclasspath-locations:", GetLibCoreDexLocations()),
+        "--boot-image=" + boot_image
     };
 
     return ::art::Exec(exec_argv, error_msg);
diff --git a/libartbase/base/common_art_test.cc b/libartbase/base/common_art_test.cc
index 5a0b425..278203d 100644
--- a/libartbase/base/common_art_test.cc
+++ b/libartbase/base/common_art_test.cc
@@ -24,6 +24,7 @@
 #include "nativehelper/scoped_local_ref.h"
 
 #include "android-base/stringprintf.h"
+#include "android-base/strings.h"
 #include "android-base/unique_fd.h"
 #include <unicode/uvernum.h>
 
@@ -328,9 +329,48 @@
 }
 
 std::vector<std::string> CommonArtTestImpl::GetLibCoreDexFileNames() {
-  return std::vector<std::string>({GetDexFileName("core-oj", IsHost()),
-                                   GetDexFileName("core-libart", IsHost()),
-                                   GetDexFileName("core-simple", IsHost())});
+  // Note: This must match the TEST_CORE_JARS in Android.common_path.mk
+  // because that's what we use for compiling the core.art image.
+  static const char* const kLibcoreModules[] = {
+      "core-oj",
+      "core-libart",
+      "core-simple",
+      "conscrypt",
+      "okhttp",
+      "bouncycastle",
+  };
+
+  std::vector<std::string> result;
+  result.reserve(arraysize(kLibcoreModules));
+  for (const char* module : kLibcoreModules) {
+    result.push_back(GetDexFileName(module, IsHost()));
+  }
+  return result;
+}
+
+std::vector<std::string> CommonArtTestImpl::GetLibCoreDexLocations() {
+  std::vector<std::string> result = GetLibCoreDexFileNames();
+  if (IsHost()) {
+    // Strip the ANDROID_BUILD_TOP directory including the directory separator '/'.
+    const char* host_dir = getenv("ANDROID_BUILD_TOP");
+    CHECK(host_dir != nullptr);
+    std::string prefix = host_dir;
+    CHECK(!prefix.empty());
+    if (prefix.back() != '/') {
+      prefix += '/';
+    }
+    for (std::string& location : result) {
+      CHECK_GT(location.size(), prefix.size());
+      CHECK_EQ(location.compare(0u, prefix.size(), prefix), 0);
+      location.erase(0u, prefix.size());
+    }
+  }
+  return result;
+}
+
+std::string CommonArtTestImpl::GetClassPathOption(const char* option,
+                                                  const std::vector<std::string>& class_path) {
+  return option + android::base::Join(class_path, ':');
 }
 
 std::string CommonArtTestImpl::GetTestAndroidRoot() {
diff --git a/libartbase/base/common_art_test.h b/libartbase/base/common_art_test.h
index 0f4800d..3e2340f 100644
--- a/libartbase/base/common_art_test.h
+++ b/libartbase/base/common_art_test.h
@@ -99,6 +99,12 @@
   // Gets the paths of the libcore dex files.
   static std::vector<std::string> GetLibCoreDexFileNames();
 
+  // Gets the locations of the libcore dex files.
+  static std::vector<std::string> GetLibCoreDexLocations();
+
+  static std::string GetClassPathOption(const char* option,
+                                        const std::vector<std::string>& class_path);
+
   // Returns bin directory which contains host's prebuild tools.
   static std::string GetAndroidHostToolsDir();
 
diff --git a/oatdump/oatdump_test.h b/oatdump/oatdump_test.h
index 4ee5101..728939f 100644
--- a/oatdump/oatdump_test.h
+++ b/oatdump/oatdump_test.h
@@ -122,6 +122,10 @@
         "-Xmx512m",
         "--runtime-arg",
         "-Xnorelocate",
+        "--runtime-arg",
+        GetClassPathOption("-Xbootclasspath:", GetLibCoreDexFileNames()),
+        "--runtime-arg",
+        GetClassPathOption("-Xbootclasspath-locations:", GetLibCoreDexLocations()),
         "--boot-image=" + GetCoreArtLocation(),
         "--instruction-set=" + std::string(GetInstructionSetString(kRuntimeISA)),
         "--dex-file=" + GetTestDexFileName(GetAppBaseName().c_str()),
@@ -181,6 +185,11 @@
         expected_prefixes.push_back("IMAGE BEGIN:");
         expected_prefixes.push_back("kDexCaches:");
       } else if (mode == kModeOatWithBootImage) {
+        exec_argv.push_back("--runtime-arg");
+        exec_argv.push_back(GetClassPathOption("-Xbootclasspath:", GetLibCoreDexFileNames()));
+        exec_argv.push_back("--runtime-arg");
+        exec_argv.push_back(
+            GetClassPathOption("-Xbootclasspath-locations:", GetLibCoreDexLocations()));
         exec_argv.push_back("--boot-image=" + GetCoreArtLocation());
         exec_argv.push_back("--instruction-set=" + std::string(
             GetInstructionSetString(kRuntimeISA)));
diff --git a/openjdkjvmti/ti_thread.cc b/openjdkjvmti/ti_thread.cc
index 2131120..051db4c 100644
--- a/openjdkjvmti/ti_thread.cc
+++ b/openjdkjvmti/ti_thread.cc
@@ -91,7 +91,8 @@
         self->GetThreadName(name);
         if (name != "JDWP" &&
             name != "Signal Catcher" &&
-            !android::base::StartsWith(name, "Jit thread pool")) {
+            !android::base::StartsWith(name, "Jit thread pool") &&
+            !android::base::StartsWith(name, "Runtime worker thread")) {
           LOG(FATAL) << "Unexpected thread before start: " << name << " id: "
                      << self->GetThreadId();
         }
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 410901e..b03ef60 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -33,6 +33,7 @@
         "art_method.cc",
         "backtrace_helper.cc",
         "barrier.cc",
+        "base/locks.cc",
         "base/mem_map_arena_pool.cc",
         "base/mutex.cc",
         "base/quasi_atomic.cc",
@@ -454,7 +455,7 @@
     cmd: "$(location generate_operator_out) art/runtime $(in) > $(out)",
     tools: ["generate_operator_out"],
     srcs: [
-        "base/mutex.h",
+        "base/locks.h",
         "class_loader_context.h",
         "class_status.h",
         "debugger.h",
diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc
index e97d2cb..58ae394 100644
--- a/runtime/arch/arm/instruction_set_features_arm.cc
+++ b/runtime/arch/arm/instruction_set_features_arm.cc
@@ -54,7 +54,8 @@
       "cortex-a76",
       "exynos-m1",
       "denver",
-      "kryo"
+      "kryo",
+      "kryo385",
   };
   bool has_armv8a = FindVariantInArray(arm_variants_with_armv8a,
                                        arraysize(arm_variants_with_armv8a),
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc
index 7796ca7..963c207 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64.cc
@@ -57,10 +57,6 @@
   static const char* arm64_variants_with_crc[] = {
       "default",
       "generic",
-      "kryo",
-      "exynos-m1",
-      "exynos-m2",
-      "exynos-m3",
       "cortex-a35",
       "cortex-a53",
       "cortex-a53.a57",
@@ -71,18 +67,25 @@
       "cortex-a55",
       "cortex-a75",
       "cortex-a76",
+      "exynos-m1",
+      "exynos-m2",
+      "exynos-m3",
+      "kryo",
+      "kryo385",
   };
 
   static const char* arm64_variants_with_lse[] = {
       "cortex-a55",
       "cortex-a75",
       "cortex-a76",
+      "kryo385",
   };
 
   static const char* arm64_variants_with_fp16[] = {
       "cortex-a55",
       "cortex-a75",
       "cortex-a76",
+      "kryo385",
   };
 
   static const char* arm64_variants_with_dotprod[] = {
@@ -124,7 +127,8 @@
         "exynos-m2",
         "exynos-m3",
         "denver64",
-        "kryo"
+        "kryo",
+        "kryo385",
     };
     if (!FindVariantInArray(arm64_known_variants, arraysize(arm64_known_variants), variant)) {
       std::ostringstream os;
diff --git a/runtime/arch/context.h b/runtime/arch/context.h
index d067f66..5980b03 100644
--- a/runtime/arch/context.h
+++ b/runtime/arch/context.h
@@ -21,7 +21,6 @@
 #include <stdint.h>
 
 #include "base/macros.h"
-#include "base/mutex.h"
 
 namespace art {
 
diff --git a/runtime/base/locks.cc b/runtime/base/locks.cc
new file mode 100644
index 0000000..cfc9f1d
--- /dev/null
+++ b/runtime/base/locks.cc
@@ -0,0 +1,384 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "locks.h"
+
+#include <errno.h>
+#include <sys/time.h>
+
+#include "android-base/logging.h"
+
+#include "base/atomic.h"
+#include "base/logging.h"
+#include "base/systrace.h"
+#include "base/time_utils.h"
+#include "base/value_object.h"
+#include "mutex-inl.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-inl.h"
+
+namespace art {
+
+static Atomic<Locks::ClientCallback*> safe_to_call_abort_callback(nullptr);
+
+Mutex* Locks::abort_lock_ = nullptr;
+Mutex* Locks::alloc_tracker_lock_ = nullptr;
+Mutex* Locks::allocated_monitor_ids_lock_ = nullptr;
+Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
+ReaderWriterMutex* Locks::breakpoint_lock_ = nullptr;
+ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
+Mutex* Locks::custom_tls_lock_ = nullptr;
+Mutex* Locks::deoptimization_lock_ = nullptr;
+ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
+Mutex* Locks::instrument_entrypoints_lock_ = nullptr;
+Mutex* Locks::intern_table_lock_ = nullptr;
+Mutex* Locks::jni_function_table_lock_ = nullptr;
+Mutex* Locks::jni_libraries_lock_ = nullptr;
+Mutex* Locks::logging_lock_ = nullptr;
+Mutex* Locks::modify_ldt_lock_ = nullptr;
+MutatorMutex* Locks::mutator_lock_ = nullptr;
+Mutex* Locks::profiler_lock_ = nullptr;
+ReaderWriterMutex* Locks::verifier_deps_lock_ = nullptr;
+ReaderWriterMutex* Locks::oat_file_manager_lock_ = nullptr;
+Mutex* Locks::host_dlopen_handles_lock_ = nullptr;
+Mutex* Locks::reference_processor_lock_ = nullptr;
+Mutex* Locks::reference_queue_cleared_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_finalizer_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_phantom_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_soft_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_weak_references_lock_ = nullptr;
+Mutex* Locks::runtime_shutdown_lock_ = nullptr;
+Mutex* Locks::cha_lock_ = nullptr;
+Mutex* Locks::subtype_check_lock_ = nullptr;
+Mutex* Locks::thread_list_lock_ = nullptr;
+ConditionVariable* Locks::thread_exit_cond_ = nullptr;
+Mutex* Locks::thread_suspend_count_lock_ = nullptr;
+Mutex* Locks::trace_lock_ = nullptr;
+Mutex* Locks::unexpected_signal_lock_ = nullptr;
+Mutex* Locks::user_code_suspension_lock_ = nullptr;
+Uninterruptible Roles::uninterruptible_;
+ReaderWriterMutex* Locks::jni_globals_lock_ = nullptr;
+Mutex* Locks::jni_weak_globals_lock_ = nullptr;
+ReaderWriterMutex* Locks::dex_lock_ = nullptr;
+Mutex* Locks::native_debug_interface_lock_ = nullptr;
+std::vector<BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_;
+Atomic<const BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_guard_;
+
+// Wait for an amount of time that roughly increases in the argument i.
+// Spin for small arguments and yield/sleep for longer ones.
+static void BackOff(uint32_t i) {
+  static constexpr uint32_t kSpinMax = 10;
+  static constexpr uint32_t kYieldMax = 20;
+  if (i <= kSpinMax) {
+    // TODO: Esp. in very latency-sensitive cases, consider replacing this with an explicit
+    // test-and-test-and-set loop in the caller.  Possibly skip entirely on a uniprocessor.
+    volatile uint32_t x = 0;
+    const uint32_t spin_count = 10 * i;
+    for (uint32_t spin = 0; spin < spin_count; ++spin) {
+      ++x;  // Volatile; hence should not be optimized away.
+    }
+    // TODO: Consider adding x86 PAUSE and/or ARM YIELD here.
+  } else if (i <= kYieldMax) {
+    sched_yield();
+  } else {
+    NanoSleep(1000ull * (i - kYieldMax));
+  }
+}
+
+class Locks::ScopedExpectedMutexesOnWeakRefAccessLock final {
+ public:
+  explicit ScopedExpectedMutexesOnWeakRefAccessLock(const BaseMutex* mutex) : mutex_(mutex) {
+    for (uint32_t i = 0;
+         !Locks::expected_mutexes_on_weak_ref_access_guard_.CompareAndSetWeakAcquire(nullptr,
+                                                                                     mutex);
+         ++i) {
+      BackOff(i);
+    }
+  }
+
+  ~ScopedExpectedMutexesOnWeakRefAccessLock() {
+    DCHECK_EQ(Locks::expected_mutexes_on_weak_ref_access_guard_.load(std::memory_order_relaxed),
+              mutex_);
+    Locks::expected_mutexes_on_weak_ref_access_guard_.store(nullptr, std::memory_order_release);
+  }
+
+ private:
+  const BaseMutex* const mutex_;
+};
+
+void Locks::Init() {
+  if (logging_lock_ != nullptr) {
+    // Already initialized.
+    if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) {
+      DCHECK(modify_ldt_lock_ != nullptr);
+    } else {
+      DCHECK(modify_ldt_lock_ == nullptr);
+    }
+    DCHECK(abort_lock_ != nullptr);
+    DCHECK(alloc_tracker_lock_ != nullptr);
+    DCHECK(allocated_monitor_ids_lock_ != nullptr);
+    DCHECK(allocated_thread_ids_lock_ != nullptr);
+    DCHECK(breakpoint_lock_ != nullptr);
+    DCHECK(classlinker_classes_lock_ != nullptr);
+    DCHECK(custom_tls_lock_ != nullptr);
+    DCHECK(deoptimization_lock_ != nullptr);
+    DCHECK(heap_bitmap_lock_ != nullptr);
+    DCHECK(oat_file_manager_lock_ != nullptr);
+    DCHECK(verifier_deps_lock_ != nullptr);
+    DCHECK(host_dlopen_handles_lock_ != nullptr);
+    DCHECK(intern_table_lock_ != nullptr);
+    DCHECK(jni_function_table_lock_ != nullptr);
+    DCHECK(jni_libraries_lock_ != nullptr);
+    DCHECK(logging_lock_ != nullptr);
+    DCHECK(mutator_lock_ != nullptr);
+    DCHECK(profiler_lock_ != nullptr);
+    DCHECK(cha_lock_ != nullptr);
+    DCHECK(subtype_check_lock_ != nullptr);
+    DCHECK(thread_list_lock_ != nullptr);
+    DCHECK(thread_suspend_count_lock_ != nullptr);
+    DCHECK(trace_lock_ != nullptr);
+    DCHECK(unexpected_signal_lock_ != nullptr);
+    DCHECK(user_code_suspension_lock_ != nullptr);
+    DCHECK(dex_lock_ != nullptr);
+    DCHECK(native_debug_interface_lock_ != nullptr);
+  } else {
+    // Create global locks in level order from highest lock level to lowest.
+    LockLevel current_lock_level = kInstrumentEntrypointsLock;
+    DCHECK(instrument_entrypoints_lock_ == nullptr);
+    instrument_entrypoints_lock_ = new Mutex("instrument entrypoint lock", current_lock_level);
+
+    #define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
+      if ((new_level) >= current_lock_level) { \
+        /* Do not use CHECKs or FATAL here, abort_lock_ is not setup yet. */ \
+        fprintf(stderr, "New local level %d is not less than current level %d\n", \
+                new_level, current_lock_level); \
+        exit(1); \
+      } \
+      current_lock_level = new_level;
+
+    UPDATE_CURRENT_LOCK_LEVEL(kUserCodeSuspensionLock);
+    DCHECK(user_code_suspension_lock_ == nullptr);
+    user_code_suspension_lock_ = new Mutex("user code suspension lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock);
+    DCHECK(mutator_lock_ == nullptr);
+    mutator_lock_ = new MutatorMutex("mutator lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock);
+    DCHECK(heap_bitmap_lock_ == nullptr);
+    heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
+    DCHECK(trace_lock_ == nullptr);
+    trace_lock_ = new Mutex("trace lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
+    DCHECK(runtime_shutdown_lock_ == nullptr);
+    runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
+    DCHECK(profiler_lock_ == nullptr);
+    profiler_lock_ = new Mutex("profiler lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kDeoptimizationLock);
+    DCHECK(deoptimization_lock_ == nullptr);
+    deoptimization_lock_ = new Mutex("Deoptimization lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kAllocTrackerLock);
+    DCHECK(alloc_tracker_lock_ == nullptr);
+    alloc_tracker_lock_ = new Mutex("AllocTracker lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
+    DCHECK(thread_list_lock_ == nullptr);
+    thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kJniLoadLibraryLock);
+    DCHECK(jni_libraries_lock_ == nullptr);
+    jni_libraries_lock_ = new Mutex("JNI shared libraries map lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
+    DCHECK(breakpoint_lock_ == nullptr);
+    breakpoint_lock_ = new ReaderWriterMutex("breakpoint lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kSubtypeCheckLock);
+    DCHECK(subtype_check_lock_ == nullptr);
+    subtype_check_lock_ = new Mutex("SubtypeCheck lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
+    DCHECK(classlinker_classes_lock_ == nullptr);
+    classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
+                                                      current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kMonitorPoolLock);
+    DCHECK(allocated_monitor_ids_lock_ == nullptr);
+    allocated_monitor_ids_lock_ =  new Mutex("allocated monitor ids lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock);
+    DCHECK(allocated_thread_ids_lock_ == nullptr);
+    allocated_thread_ids_lock_ =  new Mutex("allocated thread ids lock", current_lock_level);
+
+    if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) {
+      UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock);
+      DCHECK(modify_ldt_lock_ == nullptr);
+      modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
+    }
+
+    UPDATE_CURRENT_LOCK_LEVEL(kDexLock);
+    DCHECK(dex_lock_ == nullptr);
+    dex_lock_ = new ReaderWriterMutex("ClassLinker dex lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kOatFileManagerLock);
+    DCHECK(oat_file_manager_lock_ == nullptr);
+    oat_file_manager_lock_ = new ReaderWriterMutex("OatFile manager lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kVerifierDepsLock);
+    DCHECK(verifier_deps_lock_ == nullptr);
+    verifier_deps_lock_ = new ReaderWriterMutex("verifier deps lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kHostDlOpenHandlesLock);
+    DCHECK(host_dlopen_handles_lock_ == nullptr);
+    host_dlopen_handles_lock_ = new Mutex("host dlopen handles lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock);
+    DCHECK(intern_table_lock_ == nullptr);
+    intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceProcessorLock);
+    DCHECK(reference_processor_lock_ == nullptr);
+    reference_processor_lock_ = new Mutex("ReferenceProcessor lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueClearedReferencesLock);
+    DCHECK(reference_queue_cleared_references_lock_ == nullptr);
+    reference_queue_cleared_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueWeakReferencesLock);
+    DCHECK(reference_queue_weak_references_lock_ == nullptr);
+    reference_queue_weak_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueFinalizerReferencesLock);
+    DCHECK(reference_queue_finalizer_references_lock_ == nullptr);
+    reference_queue_finalizer_references_lock_ = new Mutex("ReferenceQueue finalizer references lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueuePhantomReferencesLock);
+    DCHECK(reference_queue_phantom_references_lock_ == nullptr);
+    reference_queue_phantom_references_lock_ = new Mutex("ReferenceQueue phantom references lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueSoftReferencesLock);
+    DCHECK(reference_queue_soft_references_lock_ == nullptr);
+    reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kJniGlobalsLock);
+    DCHECK(jni_globals_lock_ == nullptr);
+    jni_globals_lock_ =
+        new ReaderWriterMutex("JNI global reference table lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kJniWeakGlobalsLock);
+    DCHECK(jni_weak_globals_lock_ == nullptr);
+    jni_weak_globals_lock_ = new Mutex("JNI weak global reference table lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kJniFunctionTableLock);
+    DCHECK(jni_function_table_lock_ == nullptr);
+    jni_function_table_lock_ = new Mutex("JNI function table lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kCustomTlsLock);
+    DCHECK(custom_tls_lock_ == nullptr);
+    custom_tls_lock_ = new Mutex("Thread::custom_tls_ lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kCHALock);
+    DCHECK(cha_lock_ == nullptr);
+    cha_lock_ = new Mutex("CHA lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kNativeDebugInterfaceLock);
+    DCHECK(native_debug_interface_lock_ == nullptr);
+    native_debug_interface_lock_ = new Mutex("Native debug interface lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
+    DCHECK(abort_lock_ == nullptr);
+    abort_lock_ = new Mutex("abort lock", current_lock_level, true);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kThreadSuspendCountLock);
+    DCHECK(thread_suspend_count_lock_ == nullptr);
+    thread_suspend_count_lock_ = new Mutex("thread suspend count lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kUnexpectedSignalLock);
+    DCHECK(unexpected_signal_lock_ == nullptr);
+    unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
+    DCHECK(logging_lock_ == nullptr);
+    logging_lock_ = new Mutex("logging lock", current_lock_level, true);
+
+    #undef UPDATE_CURRENT_LOCK_LEVEL
+
+    // List of mutexes that we may hold when accessing a weak ref.
+    AddToExpectedMutexesOnWeakRefAccess(dex_lock_, /*need_lock=*/ false);
+    AddToExpectedMutexesOnWeakRefAccess(classlinker_classes_lock_, /*need_lock=*/ false);
+    AddToExpectedMutexesOnWeakRefAccess(jni_libraries_lock_, /*need_lock=*/ false);
+
+    InitConditions();
+  }
+}
+
+void Locks::InitConditions() {
+  thread_exit_cond_ = new ConditionVariable("thread exit condition variable", *thread_list_lock_);
+}
+
+void Locks::SetClientCallback(ClientCallback* safe_to_call_abort_cb) {
+  safe_to_call_abort_callback.store(safe_to_call_abort_cb, std::memory_order_release);
+}
+
+// Helper to allow checking shutdown while ignoring locking requirements.
+bool Locks::IsSafeToCallAbortRacy() {
+  Locks::ClientCallback* safe_to_call_abort_cb =
+      safe_to_call_abort_callback.load(std::memory_order_acquire);
+  return safe_to_call_abort_cb != nullptr && safe_to_call_abort_cb();
+}
+
+void Locks::AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock) {
+  if (need_lock) {
+    ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
+    mutex->SetShouldRespondToEmptyCheckpointRequest(true);
+    expected_mutexes_on_weak_ref_access_.push_back(mutex);
+  } else {
+    mutex->SetShouldRespondToEmptyCheckpointRequest(true);
+    expected_mutexes_on_weak_ref_access_.push_back(mutex);
+  }
+}
+
+void Locks::RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock) {
+  if (need_lock) {
+    ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
+    mutex->SetShouldRespondToEmptyCheckpointRequest(false);
+    std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
+    auto it = std::find(list.begin(), list.end(), mutex);
+    DCHECK(it != list.end());
+    list.erase(it);
+  } else {
+    mutex->SetShouldRespondToEmptyCheckpointRequest(false);
+    std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
+    auto it = std::find(list.begin(), list.end(), mutex);
+    DCHECK(it != list.end());
+    list.erase(it);
+  }
+}
+
+bool Locks::IsExpectedOnWeakRefAccess(BaseMutex* mutex) {
+  ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
+  std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
+  return std::find(list.begin(), list.end(), mutex) != list.end();
+}
+
+}  // namespace art
diff --git a/runtime/base/locks.h b/runtime/base/locks.h
new file mode 100644
index 0000000..8cbe372
--- /dev/null
+++ b/runtime/base/locks.h
@@ -0,0 +1,364 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_LOCKS_H_
+#define ART_RUNTIME_BASE_LOCKS_H_
+
+#include <stdint.h>
+
+#include <iosfwd>
+#include <vector>
+
+#include "base/atomic.h"
+#include "base/macros.h"
+
+namespace art {
+
+class BaseMutex;
+class ConditionVariable;
+class SHARED_LOCKABLE ReaderWriterMutex;
+class SHARED_LOCKABLE MutatorMutex;
+class LOCKABLE Mutex;
+class Thread;
+
+// LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
+// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
+// partial ordering and thereby cause deadlock situations to fail checks.
+//
+// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
+enum LockLevel : uint8_t {
+  kLoggingLock = 0,
+  kSwapMutexesLock,
+  kUnexpectedSignalLock,
+  kThreadSuspendCountLock,
+  kAbortLock,
+  kNativeDebugInterfaceLock,
+  kSignalHandlingLock,
+  // A generic lock level for mutexs that should not allow any additional mutexes to be gained after
+  // acquiring it.
+  kGenericBottomLock,
+  // Tracks the second acquisition at the same lock level for kThreadWaitLock. This is an exception
+  // to the normal lock ordering, used to implement Monitor::Wait - while holding one kThreadWait
+  // level lock, it is permitted to acquire a second one - with internal safeguards to ensure that
+  // the second lock acquisition does not result in deadlock. This is implemented in the lock
+  // order by treating the second acquisition of a kThreadWaitLock as a kThreadWaitWakeLock
+  // acquisition. Thus, acquiring kThreadWaitWakeLock requires holding kThreadWaitLock. This entry
+  // is here near the bottom of the hierarchy because other locks should not be
+  // acquired while it is held. kThreadWaitLock cannot be moved here because GC
+  // activity acquires locks while holding the wait lock.
+  kThreadWaitWakeLock,
+  kJdwpAdbStateLock,
+  kJdwpSocketLock,
+  kRegionSpaceRegionLock,
+  kMarkSweepMarkStackLock,
+  // Can be held while GC related work is done, and thus must be above kMarkSweepMarkStackLock
+  kThreadWaitLock,
+  kCHALock,
+  kJitCodeCacheLock,
+  kRosAllocGlobalLock,
+  kRosAllocBracketLock,
+  kRosAllocBulkFreeLock,
+  kTaggingLockLevel,
+  kTransactionLogLock,
+  kCustomTlsLock,
+  kJniFunctionTableLock,
+  kJniWeakGlobalsLock,
+  kJniGlobalsLock,
+  kReferenceQueueSoftReferencesLock,
+  kReferenceQueuePhantomReferencesLock,
+  kReferenceQueueFinalizerReferencesLock,
+  kReferenceQueueWeakReferencesLock,
+  kReferenceQueueClearedReferencesLock,
+  kReferenceProcessorLock,
+  kJitDebugInterfaceLock,
+  kAllocSpaceLock,
+  kBumpPointerSpaceBlockLock,
+  kArenaPoolLock,
+  kInternTableLock,
+  kOatFileSecondaryLookupLock,
+  kHostDlOpenHandlesLock,
+  kVerifierDepsLock,
+  kOatFileManagerLock,
+  kTracingUniqueMethodsLock,
+  kTracingStreamingLock,
+  kClassLoaderClassesLock,
+  kDefaultMutexLevel,
+  kDexLock,
+  kMarkSweepLargeObjectLock,
+  kJdwpObjectRegistryLock,
+  kModifyLdtLock,
+  kAllocatedThreadIdsLock,
+  kMonitorPoolLock,
+  kClassLinkerClassesLock,  // TODO rename.
+  kDexToDexCompilerLock,
+  kSubtypeCheckLock,
+  kBreakpointLock,
+  kMonitorLock,
+  kMonitorListLock,
+  kJniLoadLibraryLock,
+  kThreadListLock,
+  kAllocTrackerLock,
+  kDeoptimizationLock,
+  kProfilerLock,
+  kJdwpShutdownLock,
+  kJdwpEventListLock,
+  kJdwpAttachLock,
+  kJdwpStartLock,
+  kRuntimeShutdownLock,
+  kTraceLock,
+  kHeapBitmapLock,
+  kMutatorLock,
+  kUserCodeSuspensionLock,
+  kInstrumentEntrypointsLock,
+  kZygoteCreationLock,
+
+  // The highest valid lock level. Use this if there is code that should only be called with no
+  // other locks held. Since this is the highest lock level we also allow it to be held even if the
+  // runtime or current thread is not fully set-up yet (for example during thread attach). Note that
+  // this lock also has special behavior around the mutator_lock_. Since the mutator_lock_ is not
+  // really a 'real' lock we allow this to be locked when the mutator_lock_ is held exclusive.
+  // Furthermore, the mutator_lock_ may not be acquired in any form when a lock of this level is
+  // held. Since the mutator_lock_ being held strong means that all other threads are suspended this
+  // will prevent deadlocks while still allowing this lock level to function as a "highest" level.
+  kTopLockLevel,
+
+  kLockLevelCount  // Must come last.
+};
+std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
+
+// For StartNoThreadSuspension and EndNoThreadSuspension.
+class CAPABILITY("role") Role {
+ public:
+  void Acquire() ACQUIRE() {}
+  void Release() RELEASE() {}
+  const Role& operator!() const { return *this; }
+};
+
+class Uninterruptible : public Role {
+};
+
+// Global mutexes corresponding to the levels above.
+class Locks {
+ public:
+  static void Init();
+  static void InitConditions() NO_THREAD_SAFETY_ANALYSIS;  // Condition variables.
+
+  // Destroying various lock types can emit errors that vary depending upon
+  // whether the client (art::Runtime) is currently active.  Allow the client
+  // to set a callback that is used to check when it is acceptable to call
+  // Abort.  The default behavior is that the client *is not* able to call
+  // Abort if no callback is established.
+  using ClientCallback = bool();
+  static void SetClientCallback(ClientCallback* is_safe_to_call_abort_cb) NO_THREAD_SAFETY_ANALYSIS;
+  // Checks for whether it is safe to call Abort() without using locks.
+  static bool IsSafeToCallAbortRacy() NO_THREAD_SAFETY_ANALYSIS;
+
+  // Add a mutex to expected_mutexes_on_weak_ref_access_.
+  static void AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
+  // Remove a mutex from expected_mutexes_on_weak_ref_access_.
+  static void RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
+  // Check if the given mutex is in expected_mutexes_on_weak_ref_access_.
+  static bool IsExpectedOnWeakRefAccess(BaseMutex* mutex);
+
+  // Guards allocation entrypoint instrumenting.
+  static Mutex* instrument_entrypoints_lock_;
+
+  // Guards code that deals with user-code suspension. This mutex must be held when suspending or
+  // resuming threads with SuspendReason::kForUserCode. It may be held by a suspended thread, but
+  // only if the suspension is not due to SuspendReason::kForUserCode.
+  static Mutex* user_code_suspension_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
+
+  // A barrier is used to synchronize the GC/Debugger thread with mutator threads. When GC/Debugger
+  // thread wants to suspend all mutator threads, it needs to wait for all mutator threads to pass
+  // a barrier. Threads that are already suspended will get their barrier passed by the GC/Debugger
+  // thread; threads in the runnable state will pass the barrier when they transit to the suspended
+  // state. GC/Debugger thread will be woken up when all mutator threads are suspended.
+  //
+  // Thread suspension:
+  // mutator thread                                | GC/Debugger
+  //   .. running ..                               |   .. running ..
+  //   .. running ..                               | Request thread suspension by:
+  //   .. running ..                               |   - acquiring thread_suspend_count_lock_
+  //   .. running ..                               |   - incrementing Thread::suspend_count_ on
+  //   .. running ..                               |     all mutator threads
+  //   .. running ..                               |   - releasing thread_suspend_count_lock_
+  //   .. running ..                               | Block wait for all threads to pass a barrier
+  // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
+  // suspend code.                                 |   .. blocked ..
+  // Change state to kSuspended (pass the barrier) | Wake up when all threads pass the barrier
+  // x: Acquire thread_suspend_count_lock_         |   .. running ..
+  // while Thread::suspend_count_ > 0              |   .. running ..
+  //   - wait on Thread::resume_cond_              |   .. running ..
+  //     (releases thread_suspend_count_lock_)     |   .. running ..
+  //   .. waiting ..                               | Request thread resumption by:
+  //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
+  //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
+  //   .. waiting ..                               |     all mutator threads
+  //   .. waiting ..                               |   - notifying on Thread::resume_cond_
+  //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
+  // Release thread_suspend_count_lock_            |  .. running ..
+  // Change to kRunnable                           |  .. running ..
+  //  - this uses a CAS operation to ensure the    |  .. running ..
+  //    suspend request flag isn't raised as the   |  .. running ..
+  //    state is changed                           |  .. running ..
+  //  - if the CAS operation fails then goto x     |  .. running ..
+  //  .. running ..                                |  .. running ..
+  static MutatorMutex* mutator_lock_ ACQUIRED_AFTER(user_code_suspension_lock_);
+
+  // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
+  static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
+
+  // Guards shutdown of the runtime.
+  static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
+
+  // Guards background profiler global state.
+  static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
+
+  // Guards trace (ie traceview) requests.
+  static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
+
+  // Guards debugger recent allocation records.
+  static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
+
+  // Guards updates to instrumentation to ensure mutual exclusion of
+  // events like deoptimization requests.
+  // TODO: improve name, perhaps instrumentation_update_lock_.
+  static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
+
+  // Guard the update of the SubtypeCheck data stores in each Class::status_ field.
+  // This lock is used in SubtypeCheck methods which are the interface for
+  // any SubtypeCheck-mutating methods.
+  // In Class::IsSubClass, the lock is not required since it does not update the SubtypeCheck data.
+  static Mutex* subtype_check_lock_ ACQUIRED_AFTER(deoptimization_lock_);
+
+  // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
+  // attaching and detaching.
+  static Mutex* thread_list_lock_ ACQUIRED_AFTER(subtype_check_lock_);
+
+  // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
+  static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
+
+  // Guards maintaining loading library data structures.
+  static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
+
+  // Guards breakpoints.
+  static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
+
+  // Guards lists of classes within the class linker.
+  static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
+
+  // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
+  // doesn't try to hold a higher level Mutex.
+  #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::classlinker_classes_lock_)
+
+  static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
+
+  // Guard the allocation/deallocation of thread ids.
+  static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
+
+  // Guards modification of the LDT on x86.
+  static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
+
+  static ReaderWriterMutex* dex_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
+
+  // Guards opened oat files in OatFileManager.
+  static ReaderWriterMutex* oat_file_manager_lock_ ACQUIRED_AFTER(dex_lock_);
+
+  // Guards extra string entries for VerifierDeps.
+  static ReaderWriterMutex* verifier_deps_lock_ ACQUIRED_AFTER(oat_file_manager_lock_);
+
+  // Guards dlopen_handles_ in DlOpenOatFile.
+  static Mutex* host_dlopen_handles_lock_ ACQUIRED_AFTER(verifier_deps_lock_);
+
+  // Guards intern table.
+  static Mutex* intern_table_lock_ ACQUIRED_AFTER(host_dlopen_handles_lock_);
+
+  // Guards reference processor.
+  static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
+
+  // Guards cleared references queue.
+  static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
+
+  // Guards weak references queue.
+  static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
+
+  // Guards finalizer references queue.
+  static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
+
+  // Guards phantom references queue.
+  static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
+
+  // Guards soft references queue.
+  static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
+
+  // Guard accesses to the JNI Global Reference table.
+  static ReaderWriterMutex* jni_globals_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
+
+  // Guard accesses to the JNI Weak Global Reference table.
+  static Mutex* jni_weak_globals_lock_ ACQUIRED_AFTER(jni_globals_lock_);
+
+  // Guard accesses to the JNI function table override.
+  static Mutex* jni_function_table_lock_ ACQUIRED_AFTER(jni_weak_globals_lock_);
+
+  // Guard accesses to the Thread::custom_tls_. We use this to allow the TLS of other threads to be
+  // read (the reader must hold the ThreadListLock or have some other way of ensuring the thread
+  // will not die in that case though). This is useful for (eg) the implementation of
+  // GetThreadLocalStorage.
+  static Mutex* custom_tls_lock_ ACQUIRED_AFTER(jni_function_table_lock_);
+
+  // Guards Class Hierarchy Analysis (CHA).
+  static Mutex* cha_lock_ ACQUIRED_AFTER(custom_tls_lock_);
+
+  // When declaring any Mutex add BOTTOM_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
+  // doesn't try to acquire a higher level Mutex. NB Due to the way the annotalysis works this
+  // actually only encodes the mutex being below jni_function_table_lock_ although having
+  // kGenericBottomLock level is lower than this.
+  #define BOTTOM_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::cha_lock_)
+
+  // Have an exclusive aborting thread.
+  static Mutex* abort_lock_ ACQUIRED_AFTER(custom_tls_lock_);
+
+  // Allow mutual exclusion when manipulating Thread::suspend_count_.
+  // TODO: Does the trade-off of a per-thread lock make sense?
+  static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
+
+  // One unexpected signal at a time lock.
+  static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
+
+  // Guards the magic global variables used by native tools (e.g. libunwind).
+  static Mutex* native_debug_interface_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
+
+  // Have an exclusive logging thread.
+  static Mutex* logging_lock_ ACQUIRED_AFTER(native_debug_interface_lock_);
+
+  // List of mutexes that we expect a thread may hold when accessing weak refs. This is used to
+  // avoid a deadlock in the empty checkpoint while weak ref access is disabled (b/34964016). If we
+  // encounter an unexpected mutex on accessing weak refs,
+  // Thread::CheckEmptyCheckpointFromWeakRefAccess will detect it.
+  static std::vector<BaseMutex*> expected_mutexes_on_weak_ref_access_;
+  static Atomic<const BaseMutex*> expected_mutexes_on_weak_ref_access_guard_;
+  class ScopedExpectedMutexesOnWeakRefAccessLock;
+};
+
+class Roles {
+ public:
+  // Uninterruptible means that the thread may not become suspended.
+  static Uninterruptible uninterruptible_;
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_BASE_LOCKS_H_
diff --git a/runtime/base/mutator_locked_dumpable.h b/runtime/base/mutator_locked_dumpable.h
index cf2199c..afbd732 100644
--- a/runtime/base/mutator_locked_dumpable.h
+++ b/runtime/base/mutator_locked_dumpable.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_BASE_MUTATOR_LOCKED_DUMPABLE_H_
 #define ART_RUNTIME_BASE_MUTATOR_LOCKED_DUMPABLE_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "thread-current-inl.h"
 
 namespace art {
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index ca2ed80..5a52818 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -34,51 +34,6 @@
 
 using android::base::StringPrintf;
 
-static Atomic<Locks::ClientCallback*> safe_to_call_abort_callback(nullptr);
-
-Mutex* Locks::abort_lock_ = nullptr;
-Mutex* Locks::alloc_tracker_lock_ = nullptr;
-Mutex* Locks::allocated_monitor_ids_lock_ = nullptr;
-Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
-ReaderWriterMutex* Locks::breakpoint_lock_ = nullptr;
-ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
-Mutex* Locks::custom_tls_lock_ = nullptr;
-Mutex* Locks::deoptimization_lock_ = nullptr;
-ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
-Mutex* Locks::instrument_entrypoints_lock_ = nullptr;
-Mutex* Locks::intern_table_lock_ = nullptr;
-Mutex* Locks::jni_function_table_lock_ = nullptr;
-Mutex* Locks::jni_libraries_lock_ = nullptr;
-Mutex* Locks::logging_lock_ = nullptr;
-Mutex* Locks::modify_ldt_lock_ = nullptr;
-MutatorMutex* Locks::mutator_lock_ = nullptr;
-Mutex* Locks::profiler_lock_ = nullptr;
-ReaderWriterMutex* Locks::verifier_deps_lock_ = nullptr;
-ReaderWriterMutex* Locks::oat_file_manager_lock_ = nullptr;
-Mutex* Locks::host_dlopen_handles_lock_ = nullptr;
-Mutex* Locks::reference_processor_lock_ = nullptr;
-Mutex* Locks::reference_queue_cleared_references_lock_ = nullptr;
-Mutex* Locks::reference_queue_finalizer_references_lock_ = nullptr;
-Mutex* Locks::reference_queue_phantom_references_lock_ = nullptr;
-Mutex* Locks::reference_queue_soft_references_lock_ = nullptr;
-Mutex* Locks::reference_queue_weak_references_lock_ = nullptr;
-Mutex* Locks::runtime_shutdown_lock_ = nullptr;
-Mutex* Locks::cha_lock_ = nullptr;
-Mutex* Locks::subtype_check_lock_ = nullptr;
-Mutex* Locks::thread_list_lock_ = nullptr;
-ConditionVariable* Locks::thread_exit_cond_ = nullptr;
-Mutex* Locks::thread_suspend_count_lock_ = nullptr;
-Mutex* Locks::trace_lock_ = nullptr;
-Mutex* Locks::unexpected_signal_lock_ = nullptr;
-Mutex* Locks::user_code_suspension_lock_ = nullptr;
-Uninterruptible Roles::uninterruptible_;
-ReaderWriterMutex* Locks::jni_globals_lock_ = nullptr;
-Mutex* Locks::jni_weak_globals_lock_ = nullptr;
-ReaderWriterMutex* Locks::dex_lock_ = nullptr;
-Mutex* Locks::native_debug_interface_lock_ = nullptr;
-std::vector<BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_;
-Atomic<const BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_guard_;
-
 struct AllMutexData {
   // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
   Atomic<const BaseMutex*> all_mutexes_guard;
@@ -144,27 +99,6 @@
   const BaseMutex* const mutex_;
 };
 
-class Locks::ScopedExpectedMutexesOnWeakRefAccessLock final {
- public:
-  explicit ScopedExpectedMutexesOnWeakRefAccessLock(const BaseMutex* mutex) : mutex_(mutex) {
-    for (uint32_t i = 0;
-         !Locks::expected_mutexes_on_weak_ref_access_guard_.CompareAndSetWeakAcquire(nullptr,
-                                                                                     mutex);
-         ++i) {
-      BackOff(i);
-    }
-  }
-
-  ~ScopedExpectedMutexesOnWeakRefAccessLock() {
-    DCHECK_EQ(Locks::expected_mutexes_on_weak_ref_access_guard_.load(std::memory_order_relaxed),
-              mutex_);
-    Locks::expected_mutexes_on_weak_ref_access_guard_.store(nullptr, std::memory_order_release);
-  }
-
- private:
-  const BaseMutex* const mutex_;
-};
-
 // Scoped class that generates events at the beginning and end of lock contention.
 class ScopedContentionRecorder final : public ValueObject {
  public:
@@ -1042,266 +976,4 @@
   return timed_out;
 }
 
-void Locks::Init() {
-  if (logging_lock_ != nullptr) {
-    // Already initialized.
-    if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) {
-      DCHECK(modify_ldt_lock_ != nullptr);
-    } else {
-      DCHECK(modify_ldt_lock_ == nullptr);
-    }
-    DCHECK(abort_lock_ != nullptr);
-    DCHECK(alloc_tracker_lock_ != nullptr);
-    DCHECK(allocated_monitor_ids_lock_ != nullptr);
-    DCHECK(allocated_thread_ids_lock_ != nullptr);
-    DCHECK(breakpoint_lock_ != nullptr);
-    DCHECK(classlinker_classes_lock_ != nullptr);
-    DCHECK(custom_tls_lock_ != nullptr);
-    DCHECK(deoptimization_lock_ != nullptr);
-    DCHECK(heap_bitmap_lock_ != nullptr);
-    DCHECK(oat_file_manager_lock_ != nullptr);
-    DCHECK(verifier_deps_lock_ != nullptr);
-    DCHECK(host_dlopen_handles_lock_ != nullptr);
-    DCHECK(intern_table_lock_ != nullptr);
-    DCHECK(jni_function_table_lock_ != nullptr);
-    DCHECK(jni_libraries_lock_ != nullptr);
-    DCHECK(logging_lock_ != nullptr);
-    DCHECK(mutator_lock_ != nullptr);
-    DCHECK(profiler_lock_ != nullptr);
-    DCHECK(cha_lock_ != nullptr);
-    DCHECK(subtype_check_lock_ != nullptr);
-    DCHECK(thread_list_lock_ != nullptr);
-    DCHECK(thread_suspend_count_lock_ != nullptr);
-    DCHECK(trace_lock_ != nullptr);
-    DCHECK(unexpected_signal_lock_ != nullptr);
-    DCHECK(user_code_suspension_lock_ != nullptr);
-    DCHECK(dex_lock_ != nullptr);
-    DCHECK(native_debug_interface_lock_ != nullptr);
-  } else {
-    // Create global locks in level order from highest lock level to lowest.
-    LockLevel current_lock_level = kInstrumentEntrypointsLock;
-    DCHECK(instrument_entrypoints_lock_ == nullptr);
-    instrument_entrypoints_lock_ = new Mutex("instrument entrypoint lock", current_lock_level);
-
-    #define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
-      if ((new_level) >= current_lock_level) { \
-        /* Do not use CHECKs or FATAL here, abort_lock_ is not setup yet. */ \
-        fprintf(stderr, "New local level %d is not less than current level %d\n", \
-                new_level, current_lock_level); \
-        exit(1); \
-      } \
-      current_lock_level = new_level;
-
-    UPDATE_CURRENT_LOCK_LEVEL(kUserCodeSuspensionLock);
-    DCHECK(user_code_suspension_lock_ == nullptr);
-    user_code_suspension_lock_ = new Mutex("user code suspension lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock);
-    DCHECK(mutator_lock_ == nullptr);
-    mutator_lock_ = new MutatorMutex("mutator lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock);
-    DCHECK(heap_bitmap_lock_ == nullptr);
-    heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
-    DCHECK(trace_lock_ == nullptr);
-    trace_lock_ = new Mutex("trace lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
-    DCHECK(runtime_shutdown_lock_ == nullptr);
-    runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
-    DCHECK(profiler_lock_ == nullptr);
-    profiler_lock_ = new Mutex("profiler lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kDeoptimizationLock);
-    DCHECK(deoptimization_lock_ == nullptr);
-    deoptimization_lock_ = new Mutex("Deoptimization lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kAllocTrackerLock);
-    DCHECK(alloc_tracker_lock_ == nullptr);
-    alloc_tracker_lock_ = new Mutex("AllocTracker lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
-    DCHECK(thread_list_lock_ == nullptr);
-    thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kJniLoadLibraryLock);
-    DCHECK(jni_libraries_lock_ == nullptr);
-    jni_libraries_lock_ = new Mutex("JNI shared libraries map lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
-    DCHECK(breakpoint_lock_ == nullptr);
-    breakpoint_lock_ = new ReaderWriterMutex("breakpoint lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kSubtypeCheckLock);
-    DCHECK(subtype_check_lock_ == nullptr);
-    subtype_check_lock_ = new Mutex("SubtypeCheck lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
-    DCHECK(classlinker_classes_lock_ == nullptr);
-    classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
-                                                      current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kMonitorPoolLock);
-    DCHECK(allocated_monitor_ids_lock_ == nullptr);
-    allocated_monitor_ids_lock_ =  new Mutex("allocated monitor ids lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock);
-    DCHECK(allocated_thread_ids_lock_ == nullptr);
-    allocated_thread_ids_lock_ =  new Mutex("allocated thread ids lock", current_lock_level);
-
-    if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) {
-      UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock);
-      DCHECK(modify_ldt_lock_ == nullptr);
-      modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
-    }
-
-    UPDATE_CURRENT_LOCK_LEVEL(kDexLock);
-    DCHECK(dex_lock_ == nullptr);
-    dex_lock_ = new ReaderWriterMutex("ClassLinker dex lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kOatFileManagerLock);
-    DCHECK(oat_file_manager_lock_ == nullptr);
-    oat_file_manager_lock_ = new ReaderWriterMutex("OatFile manager lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kVerifierDepsLock);
-    DCHECK(verifier_deps_lock_ == nullptr);
-    verifier_deps_lock_ = new ReaderWriterMutex("verifier deps lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kHostDlOpenHandlesLock);
-    DCHECK(host_dlopen_handles_lock_ == nullptr);
-    host_dlopen_handles_lock_ = new Mutex("host dlopen handles lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock);
-    DCHECK(intern_table_lock_ == nullptr);
-    intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kReferenceProcessorLock);
-    DCHECK(reference_processor_lock_ == nullptr);
-    reference_processor_lock_ = new Mutex("ReferenceProcessor lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueClearedReferencesLock);
-    DCHECK(reference_queue_cleared_references_lock_ == nullptr);
-    reference_queue_cleared_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueWeakReferencesLock);
-    DCHECK(reference_queue_weak_references_lock_ == nullptr);
-    reference_queue_weak_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueFinalizerReferencesLock);
-    DCHECK(reference_queue_finalizer_references_lock_ == nullptr);
-    reference_queue_finalizer_references_lock_ = new Mutex("ReferenceQueue finalizer references lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueuePhantomReferencesLock);
-    DCHECK(reference_queue_phantom_references_lock_ == nullptr);
-    reference_queue_phantom_references_lock_ = new Mutex("ReferenceQueue phantom references lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueSoftReferencesLock);
-    DCHECK(reference_queue_soft_references_lock_ == nullptr);
-    reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kJniGlobalsLock);
-    DCHECK(jni_globals_lock_ == nullptr);
-    jni_globals_lock_ =
-        new ReaderWriterMutex("JNI global reference table lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kJniWeakGlobalsLock);
-    DCHECK(jni_weak_globals_lock_ == nullptr);
-    jni_weak_globals_lock_ = new Mutex("JNI weak global reference table lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kJniFunctionTableLock);
-    DCHECK(jni_function_table_lock_ == nullptr);
-    jni_function_table_lock_ = new Mutex("JNI function table lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kCustomTlsLock);
-    DCHECK(custom_tls_lock_ == nullptr);
-    custom_tls_lock_ = new Mutex("Thread::custom_tls_ lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kCHALock);
-    DCHECK(cha_lock_ == nullptr);
-    cha_lock_ = new Mutex("CHA lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kNativeDebugInterfaceLock);
-    DCHECK(native_debug_interface_lock_ == nullptr);
-    native_debug_interface_lock_ = new Mutex("Native debug interface lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
-    DCHECK(abort_lock_ == nullptr);
-    abort_lock_ = new Mutex("abort lock", current_lock_level, true);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kThreadSuspendCountLock);
-    DCHECK(thread_suspend_count_lock_ == nullptr);
-    thread_suspend_count_lock_ = new Mutex("thread suspend count lock", current_lock_level);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kUnexpectedSignalLock);
-    DCHECK(unexpected_signal_lock_ == nullptr);
-    unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
-
-    UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
-    DCHECK(logging_lock_ == nullptr);
-    logging_lock_ = new Mutex("logging lock", current_lock_level, true);
-
-    #undef UPDATE_CURRENT_LOCK_LEVEL
-
-    // List of mutexes that we may hold when accessing a weak ref.
-    AddToExpectedMutexesOnWeakRefAccess(dex_lock_, /*need_lock=*/ false);
-    AddToExpectedMutexesOnWeakRefAccess(classlinker_classes_lock_, /*need_lock=*/ false);
-    AddToExpectedMutexesOnWeakRefAccess(jni_libraries_lock_, /*need_lock=*/ false);
-
-    InitConditions();
-  }
-}
-
-void Locks::InitConditions() {
-  thread_exit_cond_ = new ConditionVariable("thread exit condition variable", *thread_list_lock_);
-}
-
-void Locks::SetClientCallback(ClientCallback* safe_to_call_abort_cb) {
-  safe_to_call_abort_callback.store(safe_to_call_abort_cb, std::memory_order_release);
-}
-
-// Helper to allow checking shutdown while ignoring locking requirements.
-bool Locks::IsSafeToCallAbortRacy() {
-  Locks::ClientCallback* safe_to_call_abort_cb =
-      safe_to_call_abort_callback.load(std::memory_order_acquire);
-  return safe_to_call_abort_cb != nullptr && safe_to_call_abort_cb();
-}
-
-void Locks::AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock) {
-  if (need_lock) {
-    ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
-    mutex->SetShouldRespondToEmptyCheckpointRequest(true);
-    expected_mutexes_on_weak_ref_access_.push_back(mutex);
-  } else {
-    mutex->SetShouldRespondToEmptyCheckpointRequest(true);
-    expected_mutexes_on_weak_ref_access_.push_back(mutex);
-  }
-}
-
-void Locks::RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock) {
-  if (need_lock) {
-    ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
-    mutex->SetShouldRespondToEmptyCheckpointRequest(false);
-    std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
-    auto it = std::find(list.begin(), list.end(), mutex);
-    DCHECK(it != list.end());
-    list.erase(it);
-  } else {
-    mutex->SetShouldRespondToEmptyCheckpointRequest(false);
-    std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
-    auto it = std::find(list.begin(), list.end(), mutex);
-    DCHECK(it != list.end());
-    list.erase(it);
-  }
-}
-
-bool Locks::IsExpectedOnWeakRefAccess(BaseMutex* mutex) {
-  ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
-  std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
-  return std::find(list.begin(), list.end(), mutex) != list.end();
-}
-
 }  // namespace art
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index e391a15..41a47af 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -30,6 +30,7 @@
 #include "base/atomic.h"
 #include "base/globals.h"
 #include "base/macros.h"
+#include "locks.h"
 
 #if defined(__linux__)
 #define ART_USE_FUTEXES 1
@@ -50,112 +51,7 @@
 class SHARED_LOCKABLE MutatorMutex;
 class ScopedContentionRecorder;
 class Thread;
-class Mutex;
-
-// LockLevel is used to impose a lock hierarchy [1] where acquisition of a Mutex at a higher or
-// equal level to a lock a thread holds is invalid. The lock hierarchy achieves a cycle free
-// partial ordering and thereby cause deadlock situations to fail checks.
-//
-// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
-enum LockLevel : uint8_t {
-  kLoggingLock = 0,
-  kSwapMutexesLock,
-  kUnexpectedSignalLock,
-  kThreadSuspendCountLock,
-  kAbortLock,
-  kNativeDebugInterfaceLock,
-  kSignalHandlingLock,
-  // A generic lock level for mutexs that should not allow any additional mutexes to be gained after
-  // acquiring it.
-  kGenericBottomLock,
-  // Tracks the second acquisition at the same lock level for kThreadWaitLock. This is an exception
-  // to the normal lock ordering, used to implement Monitor::Wait - while holding one kThreadWait
-  // level lock, it is permitted to acquire a second one - with internal safeguards to ensure that
-  // the second lock acquisition does not result in deadlock. This is implemented in the lock
-  // order by treating the second acquisition of a kThreadWaitLock as a kThreadWaitWakeLock
-  // acquisition. Thus, acquiring kThreadWaitWakeLock requires holding kThreadWaitLock. This entry
-  // is here near the bottom of the hierarchy because other locks should not be
-  // acquired while it is held. kThreadWaitLock cannot be moved here because GC
-  // activity acquires locks while holding the wait lock.
-  kThreadWaitWakeLock,
-  kJdwpAdbStateLock,
-  kJdwpSocketLock,
-  kRegionSpaceRegionLock,
-  kMarkSweepMarkStackLock,
-  // Can be held while GC related work is done, and thus must be above kMarkSweepMarkStackLock
-  kThreadWaitLock,
-  kCHALock,
-  kJitCodeCacheLock,
-  kRosAllocGlobalLock,
-  kRosAllocBracketLock,
-  kRosAllocBulkFreeLock,
-  kTaggingLockLevel,
-  kTransactionLogLock,
-  kCustomTlsLock,
-  kJniFunctionTableLock,
-  kJniWeakGlobalsLock,
-  kJniGlobalsLock,
-  kReferenceQueueSoftReferencesLock,
-  kReferenceQueuePhantomReferencesLock,
-  kReferenceQueueFinalizerReferencesLock,
-  kReferenceQueueWeakReferencesLock,
-  kReferenceQueueClearedReferencesLock,
-  kReferenceProcessorLock,
-  kJitDebugInterfaceLock,
-  kAllocSpaceLock,
-  kBumpPointerSpaceBlockLock,
-  kArenaPoolLock,
-  kInternTableLock,
-  kOatFileSecondaryLookupLock,
-  kHostDlOpenHandlesLock,
-  kVerifierDepsLock,
-  kOatFileManagerLock,
-  kTracingUniqueMethodsLock,
-  kTracingStreamingLock,
-  kClassLoaderClassesLock,
-  kDefaultMutexLevel,
-  kDexLock,
-  kMarkSweepLargeObjectLock,
-  kJdwpObjectRegistryLock,
-  kModifyLdtLock,
-  kAllocatedThreadIdsLock,
-  kMonitorPoolLock,
-  kClassLinkerClassesLock,  // TODO rename.
-  kDexToDexCompilerLock,
-  kSubtypeCheckLock,
-  kBreakpointLock,
-  kMonitorLock,
-  kMonitorListLock,
-  kJniLoadLibraryLock,
-  kThreadListLock,
-  kAllocTrackerLock,
-  kDeoptimizationLock,
-  kProfilerLock,
-  kJdwpShutdownLock,
-  kJdwpEventListLock,
-  kJdwpAttachLock,
-  kJdwpStartLock,
-  kRuntimeShutdownLock,
-  kTraceLock,
-  kHeapBitmapLock,
-  kMutatorLock,
-  kUserCodeSuspensionLock,
-  kInstrumentEntrypointsLock,
-  kZygoteCreationLock,
-
-  // The highest valid lock level. Use this if there is code that should only be called with no
-  // other locks held. Since this is the highest lock level we also allow it to be held even if the
-  // runtime or current thread is not fully set-up yet (for example during thread attach). Note that
-  // this lock also has special behavior around the mutator_lock_. Since the mutator_lock_ is not
-  // really a 'real' lock we allow this to be locked when the mutator_lock_ is held exclusive.
-  // Furthermore, the mutator_lock_ may not be acquired in any form when a lock of this level is
-  // held. Since the mutator_lock_ being held strong means that all other threads are suspended this
-  // will prevent deadlocks while still allowing this lock level to function as a "highest" level.
-  kTopLockLevel,
-
-  kLockLevelCount  // Must come last.
-};
-std::ostream& operator<<(std::ostream& os, const LockLevel& rhs);
+class LOCKABLE Mutex;
 
 constexpr bool kDebugLocking = kIsDebugBuild;
 
@@ -578,226 +474,6 @@
   DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
 };
 
-// For StartNoThreadSuspension and EndNoThreadSuspension.
-class CAPABILITY("role") Role {
- public:
-  void Acquire() ACQUIRE() {}
-  void Release() RELEASE() {}
-  const Role& operator!() const { return *this; }
-};
-
-class Uninterruptible : public Role {
-};
-
-// Global mutexes corresponding to the levels above.
-class Locks {
- public:
-  static void Init();
-  static void InitConditions() NO_THREAD_SAFETY_ANALYSIS;  // Condition variables.
-
-  // Destroying various lock types can emit errors that vary depending upon
-  // whether the client (art::Runtime) is currently active.  Allow the client
-  // to set a callback that is used to check when it is acceptable to call
-  // Abort.  The default behavior is that the client *is not* able to call
-  // Abort if no callback is established.
-  using ClientCallback = bool();
-  static void SetClientCallback(ClientCallback* is_safe_to_call_abort_cb) NO_THREAD_SAFETY_ANALYSIS;
-  // Checks for whether it is safe to call Abort() without using locks.
-  static bool IsSafeToCallAbortRacy() NO_THREAD_SAFETY_ANALYSIS;
-
-  // Add a mutex to expected_mutexes_on_weak_ref_access_.
-  static void AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
-  // Remove a mutex from expected_mutexes_on_weak_ref_access_.
-  static void RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
-  // Check if the given mutex is in expected_mutexes_on_weak_ref_access_.
-  static bool IsExpectedOnWeakRefAccess(BaseMutex* mutex);
-
-  // Guards allocation entrypoint instrumenting.
-  static Mutex* instrument_entrypoints_lock_;
-
-  // Guards code that deals with user-code suspension. This mutex must be held when suspending or
-  // resuming threads with SuspendReason::kForUserCode. It may be held by a suspended thread, but
-  // only if the suspension is not due to SuspendReason::kForUserCode.
-  static Mutex* user_code_suspension_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_);
-
-  // A barrier is used to synchronize the GC/Debugger thread with mutator threads. When GC/Debugger
-  // thread wants to suspend all mutator threads, it needs to wait for all mutator threads to pass
-  // a barrier. Threads that are already suspended will get their barrier passed by the GC/Debugger
-  // thread; threads in the runnable state will pass the barrier when they transit to the suspended
-  // state. GC/Debugger thread will be woken up when all mutator threads are suspended.
-  //
-  // Thread suspension:
-  // mutator thread                                | GC/Debugger
-  //   .. running ..                               |   .. running ..
-  //   .. running ..                               | Request thread suspension by:
-  //   .. running ..                               |   - acquiring thread_suspend_count_lock_
-  //   .. running ..                               |   - incrementing Thread::suspend_count_ on
-  //   .. running ..                               |     all mutator threads
-  //   .. running ..                               |   - releasing thread_suspend_count_lock_
-  //   .. running ..                               | Block wait for all threads to pass a barrier
-  // Poll Thread::suspend_count_ and enter full    |   .. blocked ..
-  // suspend code.                                 |   .. blocked ..
-  // Change state to kSuspended (pass the barrier) | Wake up when all threads pass the barrier
-  // x: Acquire thread_suspend_count_lock_         |   .. running ..
-  // while Thread::suspend_count_ > 0              |   .. running ..
-  //   - wait on Thread::resume_cond_              |   .. running ..
-  //     (releases thread_suspend_count_lock_)     |   .. running ..
-  //   .. waiting ..                               | Request thread resumption by:
-  //   .. waiting ..                               |   - acquiring thread_suspend_count_lock_
-  //   .. waiting ..                               |   - decrementing Thread::suspend_count_ on
-  //   .. waiting ..                               |     all mutator threads
-  //   .. waiting ..                               |   - notifying on Thread::resume_cond_
-  //    - re-acquire thread_suspend_count_lock_    |   - releasing thread_suspend_count_lock_
-  // Release thread_suspend_count_lock_            |  .. running ..
-  // Change to kRunnable                           |  .. running ..
-  //  - this uses a CAS operation to ensure the    |  .. running ..
-  //    suspend request flag isn't raised as the   |  .. running ..
-  //    state is changed                           |  .. running ..
-  //  - if the CAS operation fails then goto x     |  .. running ..
-  //  .. running ..                                |  .. running ..
-  static MutatorMutex* mutator_lock_ ACQUIRED_AFTER(user_code_suspension_lock_);
-
-  // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
-  static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
-
-  // Guards shutdown of the runtime.
-  static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
-
-  // Guards background profiler global state.
-  static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
-
-  // Guards trace (ie traceview) requests.
-  static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
-
-  // Guards debugger recent allocation records.
-  static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
-
-  // Guards updates to instrumentation to ensure mutual exclusion of
-  // events like deoptimization requests.
-  // TODO: improve name, perhaps instrumentation_update_lock_.
-  static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
-
-  // Guard the update of the SubtypeCheck data stores in each Class::status_ field.
-  // This lock is used in SubtypeCheck methods which are the interface for
-  // any SubtypeCheck-mutating methods.
-  // In Class::IsSubClass, the lock is not required since it does not update the SubtypeCheck data.
-  static Mutex* subtype_check_lock_ ACQUIRED_AFTER(deoptimization_lock_);
-
-  // The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
-  // attaching and detaching.
-  static Mutex* thread_list_lock_ ACQUIRED_AFTER(subtype_check_lock_);
-
-  // Signaled when threads terminate. Used to determine when all non-daemons have terminated.
-  static ConditionVariable* thread_exit_cond_ GUARDED_BY(Locks::thread_list_lock_);
-
-  // Guards maintaining loading library data structures.
-  static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
-
-  // Guards breakpoints.
-  static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
-
-  // Guards lists of classes within the class linker.
-  static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
-
-  // When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
-  // doesn't try to hold a higher level Mutex.
-  #define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::classlinker_classes_lock_)
-
-  static Mutex* allocated_monitor_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
-
-  // Guard the allocation/deallocation of thread ids.
-  static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(allocated_monitor_ids_lock_);
-
-  // Guards modification of the LDT on x86.
-  static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
-
-  static ReaderWriterMutex* dex_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
-
-  // Guards opened oat files in OatFileManager.
-  static ReaderWriterMutex* oat_file_manager_lock_ ACQUIRED_AFTER(dex_lock_);
-
-  // Guards extra string entries for VerifierDeps.
-  static ReaderWriterMutex* verifier_deps_lock_ ACQUIRED_AFTER(oat_file_manager_lock_);
-
-  // Guards dlopen_handles_ in DlOpenOatFile.
-  static Mutex* host_dlopen_handles_lock_ ACQUIRED_AFTER(verifier_deps_lock_);
-
-  // Guards intern table.
-  static Mutex* intern_table_lock_ ACQUIRED_AFTER(host_dlopen_handles_lock_);
-
-  // Guards reference processor.
-  static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
-
-  // Guards cleared references queue.
-  static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
-
-  // Guards weak references queue.
-  static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
-
-  // Guards finalizer references queue.
-  static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
-
-  // Guards phantom references queue.
-  static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
-
-  // Guards soft references queue.
-  static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
-
-  // Guard accesses to the JNI Global Reference table.
-  static ReaderWriterMutex* jni_globals_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
-
-  // Guard accesses to the JNI Weak Global Reference table.
-  static Mutex* jni_weak_globals_lock_ ACQUIRED_AFTER(jni_globals_lock_);
-
-  // Guard accesses to the JNI function table override.
-  static Mutex* jni_function_table_lock_ ACQUIRED_AFTER(jni_weak_globals_lock_);
-
-  // Guard accesses to the Thread::custom_tls_. We use this to allow the TLS of other threads to be
-  // read (the reader must hold the ThreadListLock or have some other way of ensuring the thread
-  // will not die in that case though). This is useful for (eg) the implementation of
-  // GetThreadLocalStorage.
-  static Mutex* custom_tls_lock_ ACQUIRED_AFTER(jni_function_table_lock_);
-
-  // Guards Class Hierarchy Analysis (CHA).
-  static Mutex* cha_lock_ ACQUIRED_AFTER(custom_tls_lock_);
-
-  // When declaring any Mutex add BOTTOM_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
-  // doesn't try to acquire a higher level Mutex. NB Due to the way the annotalysis works this
-  // actually only encodes the mutex being below jni_function_table_lock_ although having
-  // kGenericBottomLock level is lower than this.
-  #define BOTTOM_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::cha_lock_)
-
-  // Have an exclusive aborting thread.
-  static Mutex* abort_lock_ ACQUIRED_AFTER(custom_tls_lock_);
-
-  // Allow mutual exclusion when manipulating Thread::suspend_count_.
-  // TODO: Does the trade-off of a per-thread lock make sense?
-  static Mutex* thread_suspend_count_lock_ ACQUIRED_AFTER(abort_lock_);
-
-  // One unexpected signal at a time lock.
-  static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
-
-  // Guards the magic global variables used by native tools (e.g. libunwind).
-  static Mutex* native_debug_interface_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
-
-  // Have an exclusive logging thread.
-  static Mutex* logging_lock_ ACQUIRED_AFTER(native_debug_interface_lock_);
-
-  // List of mutexes that we expect a thread may hold when accessing weak refs. This is used to
-  // avoid a deadlock in the empty checkpoint while weak ref access is disabled (b/34964016). If we
-  // encounter an unexpected mutex on accessing weak refs,
-  // Thread::CheckEmptyCheckpointFromWeakRefAccess will detect it.
-  static std::vector<BaseMutex*> expected_mutexes_on_weak_ref_access_;
-  static Atomic<const BaseMutex*> expected_mutexes_on_weak_ref_access_guard_;
-  class ScopedExpectedMutexesOnWeakRefAccessLock;
-};
-
-class Roles {
- public:
-  // Uninterruptible means that the thread may not become suspended.
-  static Uninterruptible uninterruptible_;
-};
-
 }  // namespace art
 
 #endif  // ART_RUNTIME_BASE_MUTEX_H_
diff --git a/runtime/cha.cc b/runtime/cha.cc
index 8e06fda..5110b7a 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -18,6 +18,7 @@
 
 #include "art_method-inl.h"
 #include "base/logging.h"  // For VLOG
+#include "base/mutex.h"
 #include "jit/jit.h"
 #include "jit/jit_code_cache.h"
 #include "linear_alloc.h"
diff --git a/runtime/cha.h b/runtime/cha.h
index d1a1b7c..a07ee91 100644
--- a/runtime/cha.h
+++ b/runtime/cha.h
@@ -21,7 +21,7 @@
 #include <unordered_set>
 
 #include "base/enums.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "handle.h"
 #include "mirror/class.h"
 #include "oat_quick_method_header.h"
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 6703205..43f3ed3 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -21,6 +21,7 @@
 
 #include "art_field-inl.h"
 #include "art_method-inl.h"
+#include "base/mutex.h"
 #include "class_linker.h"
 #include "gc_root-inl.h"
 #include "handle_scope-inl.h"
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 9b2e1a1..6196612 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1037,13 +1037,20 @@
   runtime->SetSentinel(heap->AllocNonMovableObject<true>(
       self, java_lang_Object, java_lang_Object->GetObjectSize(), VoidFunctor()));
 
-  for (gc::space::ImageSpace* image_space : spaces) {
+  const std::vector<std::string>& boot_class_path = runtime->GetBootClassPath();
+  if (boot_class_path.size() != spaces.size()) {
+    *error_msg = StringPrintf("Boot class path has %zu components but there are %zu image spaces.",
+                              boot_class_path.size(),
+                              spaces.size());
+    return false;
+  }
+  for (size_t i = 0u, size = spaces.size(); i != size; ++i) {
     // Boot class loader, use a null handle.
     std::vector<std::unique_ptr<const DexFile>> dex_files;
-    if (!AddImageSpace(image_space,
+    if (!AddImageSpace(spaces[i],
                        ScopedNullHandle<mirror::ClassLoader>(),
-                       /*dex_elements=*/nullptr,
-                       /*dex_location=*/nullptr,
+                       /*dex_elements=*/ nullptr,
+                       /*dex_location=*/ boot_class_path[i].c_str(),
                        /*out*/&dex_files,
                        error_msg)) {
       return false;
@@ -1981,13 +1988,7 @@
     std::string dex_file_location(dex_cache->GetLocation()->ToModifiedUtf8());
     // TODO: Only store qualified paths.
     // If non qualified, qualify it.
-    if (dex_file_location.find('/') == std::string::npos) {
-      std::string dex_location_path = dex_location;
-      const size_t pos = dex_location_path.find_last_of('/');
-      CHECK_NE(pos, std::string::npos);
-      dex_location_path = dex_location_path.substr(0, pos + 1);  // Keep trailing '/'
-      dex_file_location = dex_location_path + dex_file_location;
-    }
+    dex_file_location = OatFile::ResolveRelativeEncodedDexLocation(dex_location, dex_file_location);
     std::unique_ptr<const DexFile> dex_file = OpenOatDexFile(oat_file,
                                                              dex_file_location.c_str(),
                                                              error_msg);
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index dd5f911..d3eab7c 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -25,8 +25,8 @@
 #include <vector>
 
 #include "base/enums.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "dex/class_accessor.h"
 #include "dex/dex_cache_resolved_classes.h"
 #include "dex/dex_file.h"
diff --git a/runtime/class_loader_utils.h b/runtime/class_loader_utils.h
index 945d659..69476df 100644
--- a/runtime/class_loader_utils.h
+++ b/runtime/class_loader_utils.h
@@ -18,7 +18,7 @@
 #define ART_RUNTIME_CLASS_LOADER_UTILS_H_
 
 #include "art_field-inl.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "handle_scope.h"
 #include "jni/jni_internal.h"
 #include "mirror/class_loader.h"
diff --git a/runtime/common_dex_operations.h b/runtime/common_dex_operations.h
index 15ab5f0..5c5431d 100644
--- a/runtime/common_dex_operations.h
+++ b/runtime/common_dex_operations.h
@@ -20,8 +20,8 @@
 #include "android-base/logging.h"
 #include "art_field.h"
 #include "art_method.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "class_linker.h"
 #include "dex/code_item_accessors.h"
 #include "dex/primitive.h"
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 0300fa1..a101976 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -44,6 +44,7 @@
 #include "dex/dex_file_loader.h"
 #include "dex/primitive.h"
 #include "gc/heap.h"
+#include "gc/space/image_space.h"
 #include "gc_root-inl.h"
 #include "gtest/gtest.h"
 #include "handle_scope-inl.h"
@@ -111,15 +112,14 @@
   std::string min_heap_string(StringPrintf("-Xms%zdm", gc::Heap::kDefaultInitialSize / MB));
   std::string max_heap_string(StringPrintf("-Xmx%zdm", gc::Heap::kDefaultMaximumSize / MB));
 
-
   RuntimeOptions options;
-  std::string boot_class_path_string = "-Xbootclasspath";
-  for (const std::string &core_dex_file_name : GetLibCoreDexFileNames()) {
-    boot_class_path_string += ":";
-    boot_class_path_string += core_dex_file_name;
-  }
+  std::string boot_class_path_string =
+      GetClassPathOption("-Xbootclasspath:", GetLibCoreDexFileNames());
+  std::string boot_class_path_locations_string =
+      GetClassPathOption("-Xbootclasspath-locations:", GetLibCoreDexLocations());
 
   options.push_back(std::make_pair(boot_class_path_string, nullptr));
+  options.push_back(std::make_pair(boot_class_path_locations_string, nullptr));
   options.push_back(std::make_pair("-Xcheck:jni", nullptr));
   options.push_back(std::make_pair(min_heap_string, nullptr));
   options.push_back(std::make_pair(max_heap_string, nullptr));
@@ -386,6 +386,38 @@
   }
 }
 
+bool CommonRuntimeTestImpl::StartDex2OatCommandLine(/*out*/std::vector<std::string>* argv,
+                                                    /*out*/std::string* error_msg) {
+  DCHECK(argv != nullptr);
+  DCHECK(argv->empty());
+
+  Runtime* runtime = Runtime::Current();
+  const std::vector<gc::space::ImageSpace*>& image_spaces =
+      runtime->GetHeap()->GetBootImageSpaces();
+  if (image_spaces.empty()) {
+    *error_msg = "No image location found for Dex2Oat.";
+    return false;
+  }
+  std::string image_location = image_spaces[0]->GetImageLocation();
+
+  argv->push_back(runtime->GetCompilerExecutable());
+  if (runtime->IsJavaDebuggable()) {
+    argv->push_back("--debuggable");
+  }
+  runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(argv);
+
+  argv->push_back("--runtime-arg");
+  argv->push_back(GetClassPathOption("-Xbootclasspath:", GetLibCoreDexFileNames()));
+  argv->push_back("--runtime-arg");
+  argv->push_back(GetClassPathOption("-Xbootclasspath-locations:", GetLibCoreDexLocations()));
+
+  argv->push_back("--boot-image=" + image_location);
+
+  std::vector<std::string> compiler_options = runtime->GetCompilerOptions();
+  argv->insert(argv->end(), compiler_options.begin(), compiler_options.end());
+  return true;
+}
+
 CheckJniAbortCatcher::CheckJniAbortCatcher() : vm_(Runtime::Current()->GetJavaVM()) {
   vm_->SetCheckJniAbortHook(Hook, &actual_);
 }
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 0fee797..29b7813 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -27,7 +27,7 @@
 #include "arch/instruction_set.h"
 #include "base/common_art_test.h"
 #include "base/globals.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "base/os.h"
 #include "base/unix_file/fd_file.h"
 #include "dex/art_dex_file_loader.h"
@@ -97,6 +97,9 @@
     return true;
   }
 
+  static bool StartDex2OatCommandLine(/*out*/std::vector<std::string>* argv,
+                                      /*out*/std::string* error_msg);
+
  protected:
   // Allow subclases such as CommonCompilerTest to add extra options.
   virtual void SetUpRuntimeOptions(RuntimeOptions* options ATTRIBUTE_UNUSED) {}
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index 6acff6f..ca9c96a 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_COMMON_THROWS_H_
 #define ART_RUNTIME_COMMON_THROWS_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "obj_ptr.h"
 
 namespace art {
diff --git a/runtime/compiler_callbacks.h b/runtime/compiler_callbacks.h
index b29eb70..18632dc 100644
--- a/runtime/compiler_callbacks.h
+++ b/runtime/compiler_callbacks.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_COMPILER_CALLBACKS_H_
 #define ART_RUNTIME_COMPILER_CALLBACKS_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "dex/class_reference.h"
 #include "class_status.h"
 
diff --git a/runtime/debug_print.h b/runtime/debug_print.h
index df00f06..e2990d4 100644
--- a/runtime/debug_print.h
+++ b/runtime/debug_print.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_DEBUG_PRINT_H_
 #define ART_RUNTIME_DEBUG_PRINT_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "mirror/object.h"
 
 // Helper functions for printing extra information for certain hard to diagnose bugs.
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
index f52a0f9..b46c933 100644
--- a/runtime/dexopt_test.cc
+++ b/runtime/dexopt_test.cc
@@ -46,26 +46,13 @@
   ReserveImageSpace();
 }
 
-static std::string ImageLocation() {
-  Runtime* runtime = Runtime::Current();
-  const std::vector<gc::space::ImageSpace*>& image_spaces =
-      runtime->GetHeap()->GetBootImageSpaces();
-  if (image_spaces.empty()) {
-    return "";
-  }
-  return image_spaces[0]->GetImageLocation();
-}
-
 bool DexoptTest::Dex2Oat(const std::vector<std::string>& args, std::string* error_msg) {
-  Runtime* runtime = Runtime::Current();
-
   std::vector<std::string> argv;
-  argv.push_back(runtime->GetCompilerExecutable());
-  if (runtime->IsJavaDebuggable()) {
-    argv.push_back("--debuggable");
+  if (!CommonRuntimeTest::StartDex2OatCommandLine(&argv, error_msg)) {
+    return false;
   }
-  runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
 
+  Runtime* runtime = Runtime::Current();
   if (runtime->GetHiddenApiEnforcementPolicy() != hiddenapi::EnforcementPolicy::kDisabled) {
     argv.push_back("--runtime-arg");
     argv.push_back("-Xhidden-api-checks");
@@ -75,11 +62,6 @@
     argv.push_back("--host");
   }
 
-  argv.push_back("--boot-image=" + ImageLocation());
-
-  std::vector<std::string> compiler_options = runtime->GetCompilerOptions();
-  argv.insert(argv.end(), compiler_options.begin(), compiler_options.end());
-
   argv.insert(argv.end(), args.begin(), args.end());
 
   std::string command_line(android::base::Join(argv, ' '));
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index c8bf6d0..e10a6e8 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -21,8 +21,8 @@
 #include <stdint.h>
 
 #include "base/callee_save_type.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "dex/dex_file_types.h"
 #include "dex/dex_instruction.h"
 #include "gc/allocator_type.h"
diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
index 1e30907..e555d68 100644
--- a/runtime/entrypoints/quick/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -20,7 +20,7 @@
 #include "arch/instruction_set.h"
 #include "base/callee_save_type.h"
 #include "base/enums.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "quick/quick_method_frame_info.h"
 #include "thread-inl.h"
 
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.h b/runtime/entrypoints/quick/quick_alloc_entrypoints.h
index bd1e295..937ba8e 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ALLOC_ENTRYPOINTS_H_
 #define ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ALLOC_ENTRYPOINTS_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "gc/allocator_type.h"
 #include "quick_entrypoints.h"
 
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index 795faa8..243f7ec 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -19,8 +19,8 @@
 
 #include <jni.h>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "deoptimization_kind.h"
 #include "offsets.h"
 
diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h
index 24ef0b1..d3be51f 100644
--- a/runtime/fault_handler.h
+++ b/runtime/fault_handler.h
@@ -23,7 +23,8 @@
 
 #include <vector>
 
-#include "base/mutex.h"  // For annotalysis.
+#include "base/globals.h"  // For CanDoImplicitNullCheckOn.
+#include "base/locks.h"  // For annotalysis.
 
 namespace art {
 
diff --git a/runtime/gc/accounting/bitmap.h b/runtime/gc/accounting/bitmap.h
index ffef566..bdc686e 100644
--- a/runtime/gc/accounting/bitmap.h
+++ b/runtime/gc/accounting/bitmap.h
@@ -24,8 +24,8 @@
 #include <vector>
 
 #include "base/globals.h"
+#include "base/locks.h"
 #include "base/mem_map.h"
-#include "base/mutex.h"
 
 namespace art {
 
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index f163898..b84f22f 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -20,8 +20,8 @@
 #include <memory>
 
 #include "base/globals.h"
+#include "base/locks.h"
 #include "base/mem_map.h"
-#include "base/mutex.h"
 
 namespace art {
 
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index c997f8d..e477556 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -20,8 +20,8 @@
 #include <android-base/logging.h>
 
 #include "base/allocator.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "space_bitmap.h"
 
 namespace art {
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index 7eca792..2e42f8d 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -21,8 +21,8 @@
 
 #include "base/bit_utils.h"
 #include "base/globals.h"
+#include "base/locks.h"
 #include "base/mem_map.h"
-#include "base/mutex.h"
 #include "gc/space/space.h"
 
 namespace art {
diff --git a/runtime/gc/accounting/remembered_set.h b/runtime/gc/accounting/remembered_set.h
index b96f0d3..469074f 100644
--- a/runtime/gc/accounting/remembered_set.h
+++ b/runtime/gc/accounting/remembered_set.h
@@ -19,7 +19,7 @@
 
 #include "base/allocator.h"
 #include "base/globals.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "base/safe_map.h"
 
 #include <set>
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index fcc3007..8561f06 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -24,8 +24,8 @@
 #include <vector>
 
 #include "base/globals.h"
+#include "base/locks.h"
 #include "base/mem_map.h"
-#include "base/mutex.h"
 
 namespace art {
 
diff --git a/runtime/gc/allocation_listener.h b/runtime/gc/allocation_listener.h
index 7675a22..a578252 100644
--- a/runtime/gc/allocation_listener.h
+++ b/runtime/gc/allocation_listener.h
@@ -20,8 +20,8 @@
 #include <list>
 #include <memory>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "gc_root.h"
 #include "obj_ptr.h"
 
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 6b394c7..3160422 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -242,15 +242,33 @@
   // Use load-acquire on the read barrier pointer to ensure that we never see a black (non-gray)
   // read barrier state with an unmarked bit due to reordering.
   DCHECK(region_space_->IsInUnevacFromSpace(from_ref));
-  if (kEnableGenerationalConcurrentCopyingCollection
-      && young_gen_
-      && !done_scanning_.load(std::memory_order_acquire)) {
-    return from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState();
-  }
   if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
     return true;
+  } else if (!(kEnableGenerationalConcurrentCopyingCollection && young_gen_)
+             || done_scanning_.load(std::memory_order_acquire)) {
+    // If the card table scanning is not finished yet, then only read-barrier
+    // state should be checked. Checking the mark bitmap is unreliable as there
+    // may be some objects - whose corresponding card is dirty - which are
+    // marked in the mark bitmap, but cannot be considered marked unless their
+    // read-barrier state is set to Gray.
+    //
+    // Why read read-barrier state before checking done_scanning_?
+    // If the read-barrier state was read *after* done_scanning_, then there
+    // exists a concurrency race due to which even after the object is marked,
+    // read-barrier state is checked *after* that, this function will return
+    // false. The following scenario may cause the race:
+    //
+    // 1. Mutator thread reads done_scanning_ and upon finding it false, gets
+    // suspended before reading the object's read-barrier state.
+    // 2. GC thread finishes card-table scan and then sets done_scanning_ to
+    // true.
+    // 3. GC thread grays the object, scans it, marks in the bitmap, and then
+    // changes its read-barrier state back to non-gray.
+    // 4. Mutator thread resumes, reads the object's read-barrier state and
+    // returns false.
+    return region_space_bitmap_->Test(from_ref);
   }
-  return region_space_bitmap_->Test(from_ref);
+  return false;
 }
 
 }  // namespace collector
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index fefe9ab..7736568 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -332,11 +332,6 @@
               << reinterpret_cast<void*>(region_space_->Limit());
   }
   CheckEmptyMarkStack();
-  if (kIsDebugBuild) {
-    MutexLock mu(Thread::Current(), mark_stack_lock_);
-    CHECK(false_gray_stack_.empty());
-  }
-
   rb_mark_bit_stack_full_ = false;
   mark_from_read_barrier_measurements_ = measure_read_barrier_slow_path_;
   if (measure_read_barrier_slow_path_) {
@@ -1056,9 +1051,6 @@
     Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
     // Marking is done. Disable marking.
     DisableMarking();
-    if (kUseBakerReadBarrier) {
-      ProcessFalseGrayStack();
-    }
     CheckEmptyMarkStack();
   }
 
@@ -1170,32 +1162,6 @@
   mark_stack_mode_.store(kMarkStackModeOff, std::memory_order_seq_cst);
 }
 
-void ConcurrentCopying::PushOntoFalseGrayStack(Thread* const self, mirror::Object* ref) {
-  CHECK(kUseBakerReadBarrier);
-  DCHECK(ref != nullptr);
-  MutexLock mu(self, mark_stack_lock_);
-  false_gray_stack_.push_back(ref);
-}
-
-void ConcurrentCopying::ProcessFalseGrayStack() {
-  CHECK(kUseBakerReadBarrier);
-  // Change the objects on the false gray stack from gray to non-gray (conceptually black).
-  MutexLock mu(Thread::Current(), mark_stack_lock_);
-  for (mirror::Object* obj : false_gray_stack_) {
-    DCHECK(IsMarked(obj));
-    // The object could be non-gray (conceptually black) here if a thread got preempted after a
-    // success at the AtomicSetReadBarrierState in MarkNonMoving(), GC started marking through it
-    // (but not finished so still gray), the thread ran to register it onto the false gray stack,
-    // and then GC eventually marked it black (non-gray) after it finished scanning it.
-    if (obj->GetReadBarrierState() == ReadBarrier::GrayState()) {
-      bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
-                                                    ReadBarrier::NonGrayState());
-      DCHECK(success);
-    }
-  }
-  false_gray_stack_.clear();
-}
-
 void ConcurrentCopying::IssueEmptyCheckpoint() {
   Thread* self = Thread::Current();
   ThreadList* thread_list = Runtime::Current()->GetThreadList();
@@ -1418,24 +1384,6 @@
 }
 
 // The following visitors are used to assert the to-space invariant.
-class ConcurrentCopying::AssertToSpaceInvariantRefsVisitor {
- public:
-  explicit AssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
-      : collector_(collector) {}
-
-  void operator()(mirror::Object* ref) const
-      REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
-    if (ref == nullptr) {
-      // OK.
-      return;
-    }
-    collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
-  }
-
- private:
-  ConcurrentCopying* const collector_;
-};
-
 class ConcurrentCopying::AssertToSpaceInvariantFieldVisitor {
  public:
   explicit AssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
@@ -1447,8 +1395,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
     mirror::Object* ref =
         obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
-    AssertToSpaceInvariantRefsVisitor visitor(collector_);
-    visitor(ref);
+    collector_->AssertToSpaceInvariant(obj.Ptr(), offset, ref);
   }
   void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const
       REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
@@ -1464,8 +1411,8 @@
 
   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    AssertToSpaceInvariantRefsVisitor visitor(collector_);
-    visitor(root->AsMirrorPtr());
+    mirror::Object* ref = root->AsMirrorPtr();
+    collector_->AssertToSpaceInvariant(/* obj */ nullptr, MemberOffset(0), ref);
   }
 
  private:
@@ -1671,30 +1618,69 @@
   // Invariant: There should be no object from a newly-allocated
   // region (either large or non-large) on the mark stack.
   DCHECK(!region_space_->IsInNewlyAllocatedRegion(to_ref)) << to_ref;
-  if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
-    // Mark the bitmap only in the GC thread here so that we don't need a CAS.
-    if (!kUseBakerReadBarrier ||
-        !region_space_bitmap_->Set(to_ref)) {
-      // It may be already marked if we accidentally pushed the same object twice due to the racy
-      // bitmap read in MarkUnevacFromSpaceRegion.
-      if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
-        CHECK(region_space_->IsLargeObject(to_ref));
-        region_space_->ZeroLiveBytesForLargeObject(to_ref);
-        Scan<true>(to_ref);
-      } else {
-        Scan<false>(to_ref);
+  bool perform_scan = false;
+  switch (rtype) {
+    case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace:
+      // Mark the bitmap only in the GC thread here so that we don't need a CAS.
+      if (!kUseBakerReadBarrier || !region_space_bitmap_->Set(to_ref)) {
+        // It may be already marked if we accidentally pushed the same object twice due to the racy
+        // bitmap read in MarkUnevacFromSpaceRegion.
+        if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+          CHECK(region_space_->IsLargeObject(to_ref));
+          region_space_->ZeroLiveBytesForLargeObject(to_ref);
+        }
+        perform_scan = true;
+        // Only add to the live bytes if the object was not already marked and we are not the young
+        // GC.
+        add_to_live_bytes = true;
       }
-      // Only add to the live bytes if the object was not already marked and we are not the young
-      // GC.
-      add_to_live_bytes = true;
-    }
-  } else {
-    if (kEnableGenerationalConcurrentCopyingCollection) {
-      if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
+      break;
+    case space::RegionSpace::RegionType::kRegionTypeToSpace:
+      if (kEnableGenerationalConcurrentCopyingCollection) {
         // Copied to to-space, set the bit so that the next GC can scan objects.
         region_space_bitmap_->Set(to_ref);
       }
-    }
+      perform_scan = true;
+      break;
+    default:
+      DCHECK(!region_space_->HasAddress(to_ref)) << to_ref;
+      DCHECK(!immune_spaces_.ContainsObject(to_ref));
+      // Non-moving or large-object space.
+      if (kUseBakerReadBarrier) {
+        accounting::ContinuousSpaceBitmap* mark_bitmap =
+            heap_->GetNonMovingSpace()->GetMarkBitmap();
+        const bool is_los = !mark_bitmap->HasAddress(to_ref);
+        if (is_los) {
+          if (!IsAligned<kPageSize>(to_ref)) {
+            // Ref is a large object that is not aligned, it must be heap
+            // corruption. Remove memory protection and dump data before
+            // AtomicSetReadBarrierState since it will fault if the address is not
+            // valid.
+            region_space_->Unprotect();
+            heap_->GetVerification()->LogHeapCorruption(/* obj */ nullptr,
+                                                        MemberOffset(0),
+                                                        to_ref,
+                                                        /* fatal */ true);
+          }
+          DCHECK(heap_->GetLargeObjectsSpace())
+              << "ref=" << to_ref
+              << " doesn't belong to non-moving space and large object space doesn't exist";
+          accounting::LargeObjectBitmap* los_bitmap =
+              heap_->GetLargeObjectsSpace()->GetMarkBitmap();
+          DCHECK(los_bitmap->HasAddress(to_ref));
+          // Only the GC thread could be setting the LOS bit map hence doesn't
+          // need to be atomically done.
+          perform_scan = !los_bitmap->Set(to_ref);
+        } else {
+          // Only the GC thread could be setting the non-moving space bit map
+          // hence doesn't need to be atomically done.
+          perform_scan = !mark_bitmap->Set(to_ref);
+        }
+      } else {
+        perform_scan = true;
+      }
+  }
+  if (perform_scan) {
     if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
       Scan<true>(to_ref);
     } else {
@@ -2079,6 +2065,9 @@
       LOG(INFO) << "(after) num_bytes_allocated="
                 << heap_->num_bytes_allocated_.load();
     }
+
+    float reclaimed_bytes_ratio = static_cast<float>(freed_bytes) / num_bytes_allocated_before_gc_;
+    reclaimed_bytes_ratio_sum_ += reclaimed_bytes_ratio;
   }
 
   {
@@ -2094,11 +2083,6 @@
 
   CheckEmptyMarkStack();
 
-  int64_t num_bytes_allocated_after_gc = static_cast<int64_t>(heap_->GetBytesAllocated());
-  int64_t diff = num_bytes_allocated_before_gc_ - num_bytes_allocated_after_gc;
-  auto ratio = static_cast<float>(diff) / num_bytes_allocated_before_gc_;
-  reclaimed_bytes_ratio_sum_ += ratio;
-
   if (kVerboseMode) {
     LOG(INFO) << "GC end of ReclaimPhase";
   }
@@ -2145,7 +2129,10 @@
                                                mirror::Object* ref) {
   CHECK_EQ(heap_->collector_type_, kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
   if (is_asserting_to_space_invariant_) {
-    if (region_space_->HasAddress(ref)) {
+    if (ref == nullptr) {
+      // OK.
+      return;
+    } else if (region_space_->HasAddress(ref)) {
       // Check to-space invariant in region space (moving space).
       using RegionType = space::RegionSpace::RegionType;
       space::RegionSpace::RegionType type = region_space_->GetRegionTypeUnsafe(ref);
@@ -2248,7 +2235,10 @@
                                                mirror::Object* ref) {
   CHECK_EQ(heap_->collector_type_, kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
   if (is_asserting_to_space_invariant_) {
-    if (region_space_->HasAddress(ref)) {
+    if (ref == nullptr) {
+      // OK.
+      return;
+    } else if (region_space_->HasAddress(ref)) {
       // Check to-space invariant in region space (moving space).
       using RegionType = space::RegionSpace::RegionType;
       space::RegionSpace::RegionType type = region_space_->GetRegionTypeUnsafe(ref);
@@ -2326,14 +2316,17 @@
       LOG(INFO) << "holder is in an immune image or the zygote space.";
     } else {
       LOG(INFO) << "holder is in a non-immune, non-moving (or main) space.";
-      accounting::ContinuousSpaceBitmap* mark_bitmap =
-          heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
-      accounting::LargeObjectBitmap* los_bitmap =
-          heap_mark_bitmap_->GetLargeObjectBitmap(obj);
-      CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
-      bool is_los = mark_bitmap == nullptr;
+      accounting::ContinuousSpaceBitmap* mark_bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
+      accounting::LargeObjectBitmap* los_bitmap = nullptr;
+      const bool is_los = !mark_bitmap->HasAddress(obj);
+      if (is_los) {
+        DCHECK(heap_->GetLargeObjectsSpace() && heap_->GetLargeObjectsSpace()->Contains(obj))
+            << "obj=" << obj
+            << " LOS bit map covers the entire lower 4GB address range";
+        los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
+      }
       if (!is_los && mark_bitmap->Test(obj)) {
-        LOG(INFO) << "holder is marked in the mark bit map.";
+        LOG(INFO) << "holder is marked in the non-moving space mark bit map.";
       } else if (is_los && los_bitmap->Test(obj)) {
         LOG(INFO) << "holder is marked in the los bit map.";
       } else {
@@ -2350,6 +2343,30 @@
   LOG(INFO) << "offset=" << offset.SizeValue();
 }
 
+bool ConcurrentCopying::IsMarkedInNonMovingSpace(mirror::Object* from_ref) {
+  DCHECK(!region_space_->HasAddress(from_ref)) << "ref=" << from_ref;
+  DCHECK(!immune_spaces_.ContainsObject(from_ref)) << "ref=" << from_ref;
+  if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
+    return true;
+  } else if (!(kEnableGenerationalConcurrentCopyingCollection && young_gen_)
+             || done_scanning_.load(std::memory_order_acquire)) {
+    // Read the comment in IsMarkedInUnevacFromSpace()
+    accounting::ContinuousSpaceBitmap* mark_bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
+    accounting::LargeObjectBitmap* los_bitmap = nullptr;
+    const bool is_los = !mark_bitmap->HasAddress(from_ref);
+    if (is_los) {
+      DCHECK(heap_->GetLargeObjectsSpace() && heap_->GetLargeObjectsSpace()->Contains(from_ref))
+          << "ref=" << from_ref
+          << " doesn't belong to non-moving space and large object space doesn't exist";
+      los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
+    }
+    if (is_los ? los_bitmap->Test(from_ref) : mark_bitmap->Test(from_ref)) {
+      return true;
+    }
+  }
+  return IsOnAllocStack(from_ref);
+}
+
 void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj,
                                                                mirror::Object* ref) {
   CHECK(ref != nullptr);
@@ -2371,62 +2388,14 @@
     }
   } else {
     // Non-moving space and large-object space (LOS) cases.
-    accounting::ContinuousSpaceBitmap* mark_bitmap =
-        heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
-    accounting::LargeObjectBitmap* los_bitmap =
-        heap_mark_bitmap_->GetLargeObjectBitmap(ref);
-    bool is_los = (mark_bitmap == nullptr);
-
-    bool marked_in_non_moving_space_or_los =
-        (kUseBakerReadBarrier
-         && kEnableGenerationalConcurrentCopyingCollection
-         && young_gen_
-         && !done_scanning_.load(std::memory_order_acquire))
-        // Don't use the mark bitmap to ensure `ref` is marked: check that the
-        // read barrier state is gray instead. This is to take into account a
-        // potential race between two read barriers on the same reference when the
-        // young-generation collector is still scanning the dirty cards.
-        //
-        // For instance consider two concurrent read barriers on the same GC root
-        // reference during the dirty-card-scanning step of a young-generation
-        // collection. Both threads would call ReadBarrier::BarrierForRoot, which
-        // would:
-        // a. mark the reference (leading to a call to
-        //    ConcurrentCopying::MarkNonMoving); then
-        // b. check the to-space invariant (leading to a call this
-        //    ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace -- this
-        //    method).
-        //
-        // In this situation, the following race could happen:
-        // 1. Thread A successfully changes `ref`'s read barrier state from
-        //    non-gray (white) to gray (with AtomicSetReadBarrierState) in
-        //    ConcurrentCopying::MarkNonMoving, then gets preempted.
-        // 2. Thread B also tries to change `ref`'s read barrier state with
-        //    AtomicSetReadBarrierState from non-gray to gray in
-        //    ConcurrentCopying::MarkNonMoving, but fails, as Thread A already
-        //    changed it.
-        // 3. Because Thread B failed the previous CAS, it does *not* set the
-        //    bit in the mark bitmap for `ref`.
-        // 4. Thread B checks the to-space invariant and calls
-        //    ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace: the bit
-        //    is not set in the mark bitmap for `ref`; checking that this bit is
-        //    set to check the to-space invariant is therefore not a reliable
-        //    test.
-        // 5. (Note that eventually, Thread A will resume its execution and set
-        //    the bit for `ref` in the mark bitmap.)
-        ? (ref->GetReadBarrierState() == ReadBarrier::GrayState())
-        // It is safe to use the heap mark bitmap otherwise.
-        : (!is_los && mark_bitmap->Test(ref)) || (is_los && los_bitmap->Test(ref));
-
     // If `ref` is on the allocation stack, then it may not be
     // marked live, but considered marked/alive (but not
     // necessarily on the live stack).
-    CHECK(marked_in_non_moving_space_or_los || IsOnAllocStack(ref))
+    CHECK(IsMarkedInNonMovingSpace(ref))
         << "Unmarked ref that's not on the allocation stack."
         << " obj=" << obj
         << " ref=" << ref
         << " rb_state=" << ref->GetReadBarrierState()
-        << " is_los=" << std::boolalpha << is_los << std::noboolalpha
         << " is_marking=" << std::boolalpha << is_marking_ << std::noboolalpha
         << " young_gen=" << std::boolalpha << young_gen_ << std::noboolalpha
         << " done_scanning="
@@ -2780,12 +2749,6 @@
         LOG(FATAL) << "Object address=" << from_ref << " type=" << from_ref->PrettyTypeOf();
       }
       bytes_allocated = non_moving_space_bytes_allocated;
-      // Mark it in the mark bitmap.
-      accounting::ContinuousSpaceBitmap* mark_bitmap =
-          heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
-      CHECK(mark_bitmap != nullptr);
-      bool previously_marked_in_bitmap = mark_bitmap->AtomicTestAndSet(to_ref);
-      CHECK(!previously_marked_in_bitmap);
     }
   }
   DCHECK(to_ref != nullptr);
@@ -2832,10 +2795,6 @@
         DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
         DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
         // Free the non-moving-space chunk.
-        accounting::ContinuousSpaceBitmap* mark_bitmap =
-            heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
-        CHECK(mark_bitmap != nullptr);
-        CHECK(mark_bitmap->Clear(to_ref));
         heap_->non_moving_space_->Free(self, to_ref);
       }
 
@@ -2884,6 +2843,14 @@
       } else {
         DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
         DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
+        if (!kEnableGenerationalConcurrentCopyingCollection || !young_gen_) {
+          // Mark it in the live bitmap.
+          CHECK(!heap_->non_moving_space_->GetLiveBitmap()->AtomicTestAndSet(to_ref));
+        }
+        if (!kUseBakerReadBarrier) {
+          // Mark it in the mark bitmap.
+          CHECK(!heap_->non_moving_space_->GetMarkBitmap()->AtomicTestAndSet(to_ref));
+        }
       }
       if (kUseBakerReadBarrier) {
         DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState());
@@ -2928,34 +2895,11 @@
       to_ref = from_ref;
     } else {
       // Non-immune non-moving space. Use the mark bitmap.
-      accounting::ContinuousSpaceBitmap* mark_bitmap =
-          heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
-      bool is_los = mark_bitmap == nullptr;
-      if (!is_los && mark_bitmap->Test(from_ref)) {
+      if (IsMarkedInNonMovingSpace(from_ref)) {
         // Already marked.
         to_ref = from_ref;
       } else {
-        accounting::LargeObjectBitmap* los_bitmap =
-            heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
-        // We may not have a large object space for dex2oat, don't assume it exists.
-        if (los_bitmap == nullptr) {
-          CHECK(heap_->GetLargeObjectsSpace() == nullptr)
-              << "LOS bitmap covers the entire address range " << from_ref
-              << " " << heap_->DumpSpaces();
-        }
-        if (los_bitmap != nullptr && is_los && los_bitmap->Test(from_ref)) {
-          // Already marked in LOS.
-          to_ref = from_ref;
-        } else {
-          // Not marked.
-          if (IsOnAllocStack(from_ref)) {
-            // If on the allocation stack, it's considered marked.
-            to_ref = from_ref;
-          } else {
-            // Not marked.
-            to_ref = nullptr;
-          }
-        }
+        to_ref = nullptr;
       }
     }
   }
@@ -2977,11 +2921,24 @@
   DCHECK(!region_space_->HasAddress(ref)) << ref;
   DCHECK(!immune_spaces_.ContainsObject(ref));
   // Use the mark bitmap.
-  accounting::ContinuousSpaceBitmap* mark_bitmap =
-      heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
-  accounting::LargeObjectBitmap* los_bitmap =
-      heap_mark_bitmap_->GetLargeObjectBitmap(ref);
-  bool is_los = mark_bitmap == nullptr;
+  accounting::ContinuousSpaceBitmap* mark_bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
+  accounting::LargeObjectBitmap* los_bitmap = nullptr;
+  const bool is_los = !mark_bitmap->HasAddress(ref);
+  if (is_los) {
+    if (!IsAligned<kPageSize>(ref)) {
+      // Ref is a large object that is not aligned, it must be heap
+      // corruption. Remove memory protection and dump data before
+      // AtomicSetReadBarrierState since it will fault if the address is not
+      // valid.
+      region_space_->Unprotect();
+      heap_->GetVerification()->LogHeapCorruption(holder, offset, ref, /* fatal= */ true);
+    }
+    DCHECK(heap_->GetLargeObjectsSpace())
+        << "ref=" << ref
+        << " doesn't belong to non-moving space and large object space doesn't exist";
+    los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
+    DCHECK(los_bitmap->HasAddress(ref));
+  }
   if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
     // The sticky-bit CC collector is only compatible with Baker-style read barriers.
     DCHECK(kUseBakerReadBarrier);
@@ -2999,12 +2956,6 @@
           ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState())) {
         // TODO: We don't actually need to scan this object later, we just need to clear the gray
         // bit.
-        // Also make sure the object is marked.
-        if (is_los) {
-          los_bitmap->AtomicTestAndSet(ref);
-        } else {
-          mark_bitmap->AtomicTestAndSet(ref);
-        }
         // We don't need to mark newly allocated objects (those in allocation stack) as they can
         // only point to to-space objects. Also, they are considered live till the next GC cycle.
         PushOntoMarkStack(self, ref);
@@ -3016,65 +2967,34 @@
     // Already marked.
   } else if (is_los && los_bitmap->Test(ref)) {
     // Already marked in LOS.
-  } else {
-    // Not marked.
-    if (IsOnAllocStack(ref)) {
-      // If it's on the allocation stack, it's considered marked. Keep it white (non-gray).
-      // Objects on the allocation stack need not be marked.
-      if (!is_los) {
-        DCHECK(!mark_bitmap->Test(ref));
-      } else {
-        DCHECK(!los_bitmap->Test(ref));
-      }
-      if (kUseBakerReadBarrier) {
-        DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::NonGrayState());
-      }
+  } else if (IsOnAllocStack(ref)) {
+    // If it's on the allocation stack, it's considered marked. Keep it white (non-gray).
+    // Objects on the allocation stack need not be marked.
+    if (!is_los) {
+      DCHECK(!mark_bitmap->Test(ref));
     } else {
-      // For the baker-style RB, we need to handle 'false-gray' cases. See the
-      // kRegionTypeUnevacFromSpace-case comment in Mark().
+      DCHECK(!los_bitmap->Test(ref));
+    }
+    if (kUseBakerReadBarrier) {
+      DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::NonGrayState());
+    }
+  } else {
+    // Not marked nor on the allocation stack. Try to mark it.
+    // This may or may not succeed, which is ok.
+    bool success = false;
+    if (kUseBakerReadBarrier) {
+      success = ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(),
+                                               ReadBarrier::GrayState());
+    } else {
+      success = is_los ?
+          !los_bitmap->AtomicTestAndSet(ref) :
+          !mark_bitmap->AtomicTestAndSet(ref);
+    }
+    if (success) {
       if (kUseBakerReadBarrier) {
-        // Test the bitmap first to reduce the chance of false gray cases.
-        if ((!is_los && mark_bitmap->Test(ref)) ||
-            (is_los && los_bitmap->Test(ref))) {
-          return ref;
-        }
+        DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState());
       }
-      if (is_los && !IsAligned<kPageSize>(ref)) {
-        // Ref is a large object that is not aligned, it must be heap
-        // corruption. Remove memory protection and dump data before
-        // AtomicSetReadBarrierState since it will fault if the address is not
-        // valid.
-        region_space_->Unprotect();
-        heap_->GetVerification()->LogHeapCorruption(holder, offset, ref, /* fatal= */ true);
-      }
-      // Not marked nor on the allocation stack. Try to mark it.
-      // This may or may not succeed, which is ok.
-      bool cas_success = false;
-      if (kUseBakerReadBarrier) {
-        cas_success = ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(),
-                                                     ReadBarrier::GrayState());
-      }
-      if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
-        // Already marked.
-        if (kUseBakerReadBarrier &&
-            cas_success &&
-            ref->GetReadBarrierState() == ReadBarrier::GrayState()) {
-          PushOntoFalseGrayStack(self, ref);
-        }
-      } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
-        // Already marked in LOS.
-        if (kUseBakerReadBarrier &&
-            cas_success &&
-            ref->GetReadBarrierState() == ReadBarrier::GrayState()) {
-          PushOntoFalseGrayStack(self, ref);
-        }
-      } else {
-        // Newly marked.
-        if (kUseBakerReadBarrier) {
-          DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState());
-        }
-        PushOntoMarkStack(self, ref);
-      }
+      PushOntoMarkStack(self, ref);
     }
   }
   return ref;
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 6535b11..237e070 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -218,6 +218,8 @@
       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
   bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
       REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsMarkedInNonMovingSpace(mirror::Object* from_ref)
+      REQUIRES_SHARED(Locks::mutator_lock_);
   bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
                                    bool do_atomic_update) override
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -283,11 +285,6 @@
   ALWAYS_INLINE mirror::Object* MarkImmuneSpace(Thread* const self,
                                                 mirror::Object* from_ref)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!immune_gray_stack_lock_);
-  void PushOntoFalseGrayStack(Thread* const self, mirror::Object* obj)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!mark_stack_lock_);
-  void ProcessFalseGrayStack() REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!mark_stack_lock_);
   void ScanImmuneObject(mirror::Object* obj)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
   mirror::Object* MarkFromReadBarrierWithMeasurements(Thread* const self,
@@ -315,7 +312,6 @@
   // (see use case in ConcurrentCopying::MarkFromReadBarrier).
   bool rb_mark_bit_stack_full_;
 
-  std::vector<mirror::Object*> false_gray_stack_ GUARDED_BY(mark_stack_lock_);
   Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   std::vector<accounting::ObjectStack*> revoked_mark_stacks_
       GUARDED_BY(mark_stack_lock_);
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 8477c9d..46ff7dc 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -90,6 +90,7 @@
   Thread* self = Thread::Current();
   uint64_t start_time = NanoTime();
   uint64_t thread_cpu_start_time = ThreadCpuNanoTime();
+  GetHeap()->CalculateWeightedAllocatedBytes();
   Iteration* current_iteration = GetCurrentIteration();
   current_iteration->Reset(gc_cause, clear_soft_references);
   // Note transaction mode is single-threaded and there's no asynchronous GC and this flag doesn't
diff --git a/runtime/gc/collector/immune_region.h b/runtime/gc/collector/immune_region.h
index c9ac435..80ee44c 100644
--- a/runtime/gc/collector/immune_region.h
+++ b/runtime/gc/collector/immune_region.h
@@ -18,7 +18,6 @@
 #define ART_RUNTIME_GC_COLLECTOR_IMMUNE_REGION_H_
 
 #include "base/macros.h"
-#include "base/mutex.h"
 
 namespace art {
 namespace mirror {
diff --git a/runtime/gc/collector/immune_spaces.h b/runtime/gc/collector/immune_spaces.h
index 72cb60d..5a8441a 100644
--- a/runtime/gc/collector/immune_spaces.h
+++ b/runtime/gc/collector/immune_spaces.h
@@ -17,8 +17,8 @@
 #ifndef ART_RUNTIME_GC_COLLECTOR_IMMUNE_SPACES_H_
 #define ART_RUNTIME_GC_COLLECTOR_IMMUNE_SPACES_H_
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "gc/space/space.h"
 #include "immune_region.h"
 
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index bb42be6..6fab371 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -20,8 +20,8 @@
 #include <memory>
 
 #include "base/atomic.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "garbage_collector.h"
 #include "gc/accounting/heap_bitmap.h"
 #include "gc_root.h"
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index f767360..2f36d02 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -173,6 +173,8 @@
            double foreground_heap_growth_multiplier,
            size_t capacity,
            size_t non_moving_space_capacity,
+           const std::vector<std::string>& boot_class_path,
+           const std::vector<std::string>& boot_class_path_locations,
            const std::string& image_file_name,
            const InstructionSet image_instruction_set,
            CollectorType foreground_collector_type,
@@ -210,6 +212,9 @@
       low_memory_mode_(low_memory_mode),
       long_pause_log_threshold_(long_pause_log_threshold),
       long_gc_log_threshold_(long_gc_log_threshold),
+      process_cpu_start_time_ns_(ProcessCpuNanoTime()),
+      last_process_cpu_time_ns_(process_cpu_start_time_ns_),
+      weighted_allocated_bytes_(0u),
       ignore_max_footprint_(ignore_max_footprint),
       zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
       zygote_space_(nullptr),
@@ -347,7 +352,9 @@
   // Load image space(s).
   std::vector<std::unique_ptr<space::ImageSpace>> boot_image_spaces;
   MemMap heap_reservation;
-  if (space::ImageSpace::LoadBootImage(image_file_name,
+  if (space::ImageSpace::LoadBootImage(boot_class_path,
+                                       boot_class_path_locations,
+                                       image_file_name,
                                        image_instruction_set,
                                        heap_reservation_size,
                                        &boot_image_spaces,
@@ -1062,6 +1069,14 @@
   }
 }
 
+void Heap::CalculateWeightedAllocatedBytes() {
+  uint64_t current_process_cpu_time = ProcessCpuNanoTime();
+  uint64_t bytes_allocated = GetBytesAllocated();
+  uint64_t weight = current_process_cpu_time - last_process_cpu_time_ns_;
+  weighted_allocated_bytes_ += weight * bytes_allocated;
+  last_process_cpu_time_ns_ = current_process_cpu_time;
+}
+
 uint64_t Heap::GetTotalGcCpuTime() {
   uint64_t sum = 0;
   for (auto* collector : garbage_collectors_) {
@@ -1139,6 +1154,11 @@
   for (auto* collector : garbage_collectors_) {
     collector->ResetMeasurements();
   }
+
+  process_cpu_start_time_ns_ = ProcessCpuNanoTime();
+  last_process_cpu_time_ns_ = process_cpu_start_time_ns_;
+  weighted_allocated_bytes_ = 0u;
+
   total_bytes_freed_ever_ = 0;
   total_objects_freed_ever_ = 0;
   total_wait_time_ = 0;
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index a43f315..8d81c11 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -174,7 +174,9 @@
        double foreground_heap_growth_multiplier,
        size_t capacity,
        size_t non_moving_space_capacity,
-       const std::string& original_image_file_name,
+       const std::vector<std::string>& boot_class_path,
+       const std::vector<std::string>& boot_class_path_locations,
+       const std::string& image_file_name,
        InstructionSet image_instruction_set,
        CollectorType foreground_collector_type,
        CollectorType background_collector_type,
@@ -395,8 +397,17 @@
     REQUIRES(!Locks::heap_bitmap_lock_)
     REQUIRES(Locks::mutator_lock_);
 
+  uint64_t GetWeightedAllocatedBytes() const {
+    return weighted_allocated_bytes_;
+  }
+
+  void CalculateWeightedAllocatedBytes();
   uint64_t GetTotalGcCpuTime();
 
+  uint64_t GetProcessCpuStartTime() const {
+    return process_cpu_start_time_ns_;
+  }
+
   // Set target ideal heap utilization ratio, implements
   // dalvik.system.VMRuntime.setTargetHeapUtilization.
   void SetTargetHeapUtilization(float target);
@@ -1161,6 +1172,15 @@
   // If we get a GC longer than long GC log threshold, then we print out the GC after it finishes.
   const size_t long_gc_log_threshold_;
 
+  // Starting time of the new process; meant to be used for measuring total process CPU time.
+  uint64_t process_cpu_start_time_ns_;
+
+  // Last time GC started; meant to be used to measure the duration between two GCs.
+  uint64_t last_process_cpu_time_ns_;
+
+  // allocated_bytes * (current_process_cpu_time - last_process_cpu_time)
+  uint64_t weighted_allocated_bytes_;
+
   // If we ignore the max footprint it lets the heap grow until it hits the heap capacity, this is
   // useful for benchmarking since it reduces time spent in GC to a low %.
   const bool ignore_max_footprint_;
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index d4af117..4944639 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -17,6 +17,7 @@
 #include "reference_processor.h"
 
 #include "art_field-inl.h"
+#include "base/mutex.h"
 #include "base/time_utils.h"
 #include "base/utils.h"
 #include "class_root.h"
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index c6c7836..17b546a 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -18,7 +18,7 @@
 #define ART_RUNTIME_GC_REFERENCE_PROCESSOR_H_
 
 #include "base/globals.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "jni.h"
 #include "reference_queue.h"
 
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 5c11e50..95871da 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -17,6 +17,7 @@
 #include "reference_queue.h"
 
 #include "accounting/card_table-inl.h"
+#include "base/mutex.h"
 #include "collector/concurrent_copying.h"
 #include "heap.h"
 #include "mirror/class-inl.h"
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index 09ab51a..53518cc 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -23,7 +23,7 @@
 
 #include "base/atomic.h"
 #include "base/globals.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "base/timing_logger.h"
 #include "jni.h"
 #include "obj_ptr.h"
@@ -31,6 +31,9 @@
 #include "thread_pool.h"
 
 namespace art {
+
+class Mutex;
+
 namespace mirror {
 class Reference;
 }  // namespace mirror
diff --git a/runtime/gc/scoped_gc_critical_section.h b/runtime/gc/scoped_gc_critical_section.h
index 864bf87..8ad0158 100644
--- a/runtime/gc/scoped_gc_critical_section.h
+++ b/runtime/gc/scoped_gc_critical_section.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_GC_SCOPED_GC_CRITICAL_SECTION_H_
 #define ART_RUNTIME_GC_SCOPED_GC_CRITICAL_SECTION_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "collector_type.h"
 #include "gc_cause.h"
 
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 383bf7a..6d9fd04 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -19,6 +19,8 @@
 
 #include "space.h"
 
+#include "base/mutex.h"
+
 namespace art {
 
 namespace mirror {
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index c772bda..02ab50b 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -103,9 +103,8 @@
 static bool GenerateImage(const std::string& image_filename,
                           InstructionSet image_isa,
                           std::string* error_msg) {
-  const std::string boot_class_path_string(Runtime::Current()->GetBootClassPathString());
-  std::vector<std::string> boot_class_path;
-  Split(boot_class_path_string, ':', &boot_class_path);
+  Runtime* runtime = Runtime::Current();
+  const std::vector<std::string>& boot_class_path = runtime->GetBootClassPath();
   if (boot_class_path.empty()) {
     *error_msg = "Failed to generate image because no boot class path specified";
     return false;
@@ -125,8 +124,11 @@
   image_option_string += image_filename;
   arg_vector.push_back(image_option_string);
 
-  for (size_t i = 0; i < boot_class_path.size(); i++) {
+  const std::vector<std::string>& boot_class_path_locations = runtime->GetBootClassPathLocations();
+  DCHECK_EQ(boot_class_path.size(), boot_class_path_locations.size());
+  for (size_t i = 0u; i < boot_class_path.size(); i++) {
     arg_vector.push_back(std::string("--dex-file=") + boot_class_path[i]);
+    arg_vector.push_back(std::string("--dex-location=") + boot_class_path_locations[i]);
   }
 
   std::string oat_file_option_string("--oat-file=");
@@ -627,16 +629,36 @@
         return MemMap::Invalid();
       }
       memcpy(map.Begin(), &image_header, sizeof(ImageHeader));
+
       const uint64_t start = NanoTime();
+      ThreadPool* pool = Runtime::Current()->GetThreadPool();
+      Thread* const self = Thread::Current();
+      const size_t kMinBlocks = 2;
+      const bool use_parallel = pool != nullptr &&image_header.GetBlockCount() >= kMinBlocks;
       for (const ImageHeader::Block& block : image_header.GetBlocks(temp_map.Begin())) {
-        TimingLogger::ScopedTiming timing2("LZ4 decompress image", logger);
-        if (!block.Decompress(/*out_ptr=*/map.Begin(), /*in_ptr=*/temp_map.Begin(), error_msg)) {
-          if (error_msg != nullptr) {
-            *error_msg = "Failed to decompress image block " + *error_msg;
+        auto function = [&](Thread*) {
+          const uint64_t start2 = NanoTime();
+          ScopedTrace trace("LZ4 decompress block");
+          if (!block.Decompress(/*out_ptr=*/map.Begin(),
+                                /*in_ptr=*/temp_map.Begin(),
+                                error_msg)) {
+            if (error_msg != nullptr) {
+              *error_msg = "Failed to decompress image block " + *error_msg;
+            }
           }
-          return MemMap::Invalid();
+          VLOG(image) << "Decompress block " << block.GetDataSize() << " -> "
+                      << block.GetImageSize() << " in " << PrettyDuration(NanoTime() - start2);
+        };
+        if (use_parallel) {
+          pool->AddTask(self, new FunctionTask(std::move(function)));
+        } else {
+          function(self);
         }
       }
+      if (use_parallel) {
+        ScopedTrace trace("Waiting for workers");
+        pool->Wait(self, true, false);
+      }
       const uint64_t time = NanoTime() - start;
       // Add one 1 ns to prevent possible divide by 0.
       VLOG(image) << "Decompressing image took " << PrettyDuration(time) << " ("
@@ -1185,8 +1207,13 @@
 
 class ImageSpace::BootImageLoader {
  public:
-  BootImageLoader(const std::string& image_location, InstructionSet image_isa)
-      : image_location_(image_location),
+  BootImageLoader(const std::vector<std::string>& boot_class_path,
+                  const std::vector<std::string>& boot_class_path_locations,
+                  const std::string& image_location,
+                  InstructionSet image_isa)
+      : boot_class_path_(boot_class_path),
+        boot_class_path_locations_(boot_class_path_locations),
+        image_location_(image_location),
         image_isa_(image_isa),
         is_zygote_(Runtime::Current()->IsZygote()),
         has_system_(false),
@@ -1234,10 +1261,8 @@
                       /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
     TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
     std::string filename = GetSystemImageFilename(image_location_.c_str(), image_isa_);
-    std::vector<std::string> locations;
-    if (!GetBootClassPathImageLocations(image_location_, filename, &locations, error_msg)) {
-      return false;
-    }
+    std::vector<std::string> locations =
+        ExpandMultiImageLocations(boot_class_path_locations_, image_location_);
     uint32_t image_start;
     uint32_t image_end;
     if (!GetBootImageAddressRange(filename, &image_start, &image_end, error_msg)) {
@@ -1270,9 +1295,16 @@
         return false;
       }
     }
-    for (std::unique_ptr<ImageSpace>& space : spaces) {
-      static constexpr bool kValidateOatFile = false;
-      if (!OpenOatFile(space.get(), kValidateOatFile, &logger, &image_reservation, error_msg)) {
+    for (size_t i = 0u, size = spaces.size(); i != size; ++i) {
+      std::string expected_boot_class_path =
+          (i == 0u) ? android::base::Join(boot_class_path_locations_, ':') : std::string();
+      if (!OpenOatFile(spaces[i].get(),
+                       boot_class_path_[i],
+                       expected_boot_class_path,
+                       /*validate_oat_file=*/ false,
+                       &logger,
+                       &image_reservation,
+                       error_msg)) {
         return false;
       }
     }
@@ -1301,10 +1333,8 @@
       /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
     TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
     DCHECK(DalvikCacheExists());
-    std::vector<std::string> locations;
-    if (!GetBootClassPathImageLocations(image_location_, cache_filename_, &locations, error_msg)) {
-      return false;
-    }
+    std::vector<std::string> locations =
+        ExpandMultiImageLocations(boot_class_path_locations_, image_location_);
     uint32_t image_start;
     uint32_t image_end;
     if (!GetBootImageAddressRange(cache_filename_, &image_start, &image_end, error_msg)) {
@@ -1346,8 +1376,16 @@
         return false;
       }
     }
-    for (std::unique_ptr<ImageSpace>& space : spaces) {
-      if (!OpenOatFile(space.get(), validate_oat_file, &logger, &image_reservation, error_msg)) {
+    for (size_t i = 0u, size = spaces.size(); i != size; ++i) {
+      std::string expected_boot_class_path =
+          (i == 0u) ? android::base::Join(boot_class_path_locations_, ':') : std::string();
+      if (!OpenOatFile(spaces[i].get(),
+                       boot_class_path_[i],
+                       expected_boot_class_path,
+                       validate_oat_file,
+                       &logger,
+                       &image_reservation,
+                       error_msg)) {
         return false;
       }
     }
@@ -1867,8 +1905,6 @@
     DCHECK(!spaces.empty());
     ImageSpace* space = spaces[0].get();
     const ImageHeader& image_header = space->GetImageHeader();
-    // Use oat_file_non_owned_ from the `space` to set the runtime methods.
-    runtime->SetInstructionSet(space->oat_file_non_owned_->GetOatHeader().GetInstructionSet());
     runtime->SetResolutionMethod(image_header.GetImageMethod(ImageHeader::kResolutionMethod));
     runtime->SetImtConflictMethod(image_header.GetImageMethod(ImageHeader::kImtConflictMethod));
     runtime->SetImtUnimplementedMethod(
@@ -1932,6 +1968,8 @@
   }
 
   bool OpenOatFile(ImageSpace* space,
+                   const std::string& dex_filename,
+                   const std::string& expected_boot_class_path,
                    bool validate_oat_file,
                    TimingLogger* logger,
                    /*inout*/MemMap* image_reservation,
@@ -1947,13 +1985,15 @@
       TimingLogger::ScopedTiming timing("OpenOatFile", logger);
       std::string oat_filename =
           ImageHeader::GetOatLocationFromImageLocation(space->GetImageFilename());
+      std::string oat_location =
+          ImageHeader::GetOatLocationFromImageLocation(space->GetImageLocation());
 
       oat_file.reset(OatFile::Open(/*zip_fd=*/ -1,
                                    oat_filename,
-                                   oat_filename,
+                                   oat_location,
                                    !Runtime::Current()->IsAotCompiler(),
                                    /*low_4gb=*/ false,
-                                   /*abs_dex_location=*/ nullptr,
+                                   /*abs_dex_location=*/ dex_filename.c_str(),
                                    image_reservation,
                                    error_msg));
       if (oat_file == nullptr) {
@@ -1974,6 +2014,17 @@
                                   space->GetName());
         return false;
       }
+      const char* oat_boot_class_path =
+          oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kBootClassPathKey);
+      oat_boot_class_path = (oat_boot_class_path != nullptr) ? oat_boot_class_path : "";
+      if (expected_boot_class_path != oat_boot_class_path) {
+        *error_msg = StringPrintf("Failed to match oat boot class path %s to expected "
+                                  "boot class path %s in image %s",
+                                  oat_boot_class_path,
+                                  expected_boot_class_path.c_str(),
+                                  space->GetName());
+        return false;
+      }
       ptrdiff_t relocation_diff = space->Begin() - image_header.GetImageBegin();
       CHECK(image_header.GetOatDataBegin() != nullptr);
       uint8_t* oat_data_begin = image_header.GetOatDataBegin() + relocation_diff;
@@ -1999,37 +2050,6 @@
     return true;
   }
 
-  // Extract boot class path from oat file associated with `image_filename`
-  // and list all associated image locations.
-  static bool GetBootClassPathImageLocations(const std::string& image_location,
-                                             const std::string& image_filename,
-                                             /*out*/ std::vector<std::string>* all_locations,
-                                             /*out*/ std::string* error_msg) {
-    std::string oat_filename = ImageHeader::GetOatLocationFromImageLocation(image_filename);
-    std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
-                                                    oat_filename,
-                                                    oat_filename,
-                                                    /*executable=*/ false,
-                                                    /*low_4gb=*/ false,
-                                                    /*abs_dex_location=*/ nullptr,
-                                                    /*reservation=*/ nullptr,
-                                                    error_msg));
-    if (oat_file == nullptr) {
-      *error_msg = StringPrintf("Failed to open oat file '%s' for image file %s: %s",
-                                oat_filename.c_str(),
-                                image_filename.c_str(),
-                                error_msg->c_str());
-      return false;
-    }
-    const OatHeader& oat_header = oat_file->GetOatHeader();
-    const char* boot_classpath = oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey);
-    all_locations->push_back(image_location);
-    if (boot_classpath != nullptr && boot_classpath[0] != 0) {
-      ExtractMultiImageLocations(image_location, boot_classpath, all_locations);
-    }
-    return true;
-  }
-
   bool GetBootImageAddressRange(const std::string& filename,
                                 /*out*/uint32_t* start,
                                 /*out*/uint32_t* end,
@@ -2096,6 +2116,8 @@
     return true;
   }
 
+  const std::vector<std::string>& boot_class_path_;
+  const std::vector<std::string>& boot_class_path_locations_;
   const std::string& image_location_;
   InstructionSet image_isa_;
   bool is_zygote_;
@@ -2143,6 +2165,8 @@
 }
 
 bool ImageSpace::LoadBootImage(
+    const std::vector<std::string>& boot_class_path,
+    const std::vector<std::string>& boot_class_path_locations,
     const std::string& image_location,
     const InstructionSet image_isa,
     size_t extra_reservation_size,
@@ -2160,7 +2184,7 @@
     return false;
   }
 
-  BootImageLoader loader(image_location, image_isa);
+  BootImageLoader loader(boot_class_path, boot_class_path_locations, image_location, image_isa);
 
   // Step 0: Extra zygote work.
 
@@ -2321,57 +2345,6 @@
       << ",name=\"" << GetName() << "\"]";
 }
 
-std::string ImageSpace::GetMultiImageBootClassPath(
-    const std::vector<std::string>& dex_locations,
-    const std::vector<std::string>& oat_filenames,
-    const std::vector<std::string>& image_filenames) {
-  DCHECK_GT(oat_filenames.size(), 1u);
-  // If the image filename was adapted (e.g., for our tests), we need to change this here,
-  // too, but need to strip all path components (they will be re-established when loading).
-  // For example, dex location
-  //    /system/framework/core-libart.art
-  // with image name
-  //    out/target/product/taimen/dex_bootjars/system/framework/arm64/boot-core-libart.art
-  // yields boot class path component
-  //    /system/framework/boot-core-libart.art .
-  std::ostringstream bootcp_oss;
-  bool first_bootcp = true;
-  for (size_t i = 0; i < dex_locations.size(); ++i) {
-    if (!first_bootcp) {
-      bootcp_oss << ":";
-    }
-
-    std::string dex_loc = dex_locations[i];
-    std::string image_filename = image_filenames[i];
-
-    // Use the dex_loc path, but the image_filename name (without path elements).
-    size_t dex_last_slash = dex_loc.rfind('/');
-
-    // npos is max(size_t). That makes this a bit ugly.
-    size_t image_last_slash = image_filename.rfind('/');
-    size_t image_last_at = image_filename.rfind('@');
-    size_t image_last_sep = (image_last_slash == std::string::npos)
-                                ? image_last_at
-                                : (image_last_at == std::string::npos)
-                                      ? image_last_slash
-                                      : std::max(image_last_slash, image_last_at);
-    // Note: whenever image_last_sep == npos, +1 overflow means using the full string.
-
-    if (dex_last_slash == std::string::npos) {
-      dex_loc = image_filename.substr(image_last_sep + 1);
-    } else {
-      dex_loc = dex_loc.substr(0, dex_last_slash + 1) +
-          image_filename.substr(image_last_sep + 1);
-    }
-
-    // Image filenames already end with .art, no need to replace.
-
-    bootcp_oss << dex_loc;
-    first_bootcp = false;
-  }
-  return bootcp_oss.str();
-}
-
 bool ImageSpace::ValidateOatFile(const OatFile& oat_file, std::string* error_msg) {
   const ArtDexFileLoader dex_file_loader;
   for (const OatDexFile* oat_dex_file : oat_file.GetOatDexFiles()) {
@@ -2432,46 +2405,55 @@
   return true;
 }
 
-void ImageSpace::ExtractMultiImageLocations(const std::string& input_image_file_name,
-                                            const std::string& boot_classpath,
-                                            std::vector<std::string>* image_file_names) {
-  DCHECK(image_file_names != nullptr);
+std::vector<std::string> ImageSpace::ExpandMultiImageLocations(
+    const std::vector<std::string>& dex_locations,
+    const std::string& image_location) {
+  DCHECK(!dex_locations.empty());
 
-  std::vector<std::string> images;
-  Split(boot_classpath, ':', &images);
+  // Find the path.
+  size_t last_slash = image_location.rfind('/');
+  CHECK_NE(last_slash, std::string::npos);
 
-  // Add the rest into the list. We have to adjust locations, possibly:
-  //
-  // For example, image_file_name is /a/b/c/d/e.art
-  //              images[0] is          f/c/d/e.art
-  // ----------------------------------------------
-  //              images[1] is          g/h/i/j.art  -> /a/b/h/i/j.art
-  const std::string& first_image = images[0];
-  // Length of common suffix.
-  size_t common = 0;
-  while (common < input_image_file_name.size() &&
-         common < first_image.size() &&
-         *(input_image_file_name.end() - common - 1) == *(first_image.end() - common - 1)) {
-    ++common;
+  // We also need to honor path components that were encoded through '@'. Otherwise the loading
+  // code won't be able to find the images.
+  if (image_location.find('@', last_slash) != std::string::npos) {
+    last_slash = image_location.rfind('@');
   }
-  // We want to replace the prefix of the input image with the prefix of the boot class path.
-  // This handles the case where the image file contains @ separators.
-  // Example image_file_name is oats/system@framework@boot.art
-  // images[0] is .../arm/boot.art
-  // means that the image name prefix will be oats/system@framework@
-  // so that the other images are openable.
-  const size_t old_prefix_length = first_image.size() - common;
-  const std::string new_prefix = input_image_file_name.substr(
-      0,
-      input_image_file_name.size() - common);
 
-  // Apply pattern to images[1] .. images[n].
-  for (size_t i = 1; i < images.size(); ++i) {
-    const std::string& image = images[i];
-    CHECK_GT(image.length(), old_prefix_length);
-    std::string suffix = image.substr(old_prefix_length);
-    image_file_names->push_back(new_prefix + suffix);
+  // Find the dot separating the primary image name from the extension.
+  size_t last_dot = image_location.rfind('.');
+  // Extract the extension and base (the path and primary image name).
+  std::string extension;
+  std::string base = image_location;
+  if (last_dot != std::string::npos && last_dot > last_slash) {
+    extension = image_location.substr(last_dot);  // Including the dot.
+    base.resize(last_dot);
   }
+  // For non-empty primary image name, add '-' to the `base`.
+  if (last_slash + 1u != base.size()) {
+    base += '-';
+  }
+
+  std::vector<std::string> locations;
+  locations.reserve(dex_locations.size());
+  locations.push_back(image_location);
+
+  // Now create the other names. Use a counted loop to skip the first one.
+  for (size_t i = 1u; i < dex_locations.size(); ++i) {
+    // Replace path with `base` (i.e. image path and prefix) and replace the original
+    // extension (if any) with `extension`.
+    std::string name = dex_locations[i];
+    size_t last_dex_slash = name.rfind('/');
+    if (last_dex_slash != std::string::npos) {
+      name = name.substr(last_dex_slash + 1);
+    }
+    size_t last_dex_dot = name.rfind('.');
+    if (last_dex_dot != std::string::npos) {
+      name.resize(last_dex_dot);
+    }
+    locations.push_back(base + name + extension);
+  }
+  return locations;
 }
 
 void ImageSpace::DumpSections(std::ostream& os) const {
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index aa45ed3..05e7fa5 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -39,9 +39,11 @@
   // Load boot image spaces from a primary image file for a specified instruction set.
   //
   // On successful return, the loaded spaces are added to boot_image_spaces (which must be
-  // empty on entry) and oat_file_end is updated with the (page-aligned) end of the last
-  // oat file.
+  // empty on entry) and `extra_reservation` is set to the requested reservation located
+  // after the end of the last loaded oat file.
   static bool LoadBootImage(
+      const std::vector<std::string>& boot_class_path,
+      const std::vector<std::string>& boot_class_path_locations,
       const std::string& image_location,
       const InstructionSet image_isa,
       size_t extra_reservation_size,
@@ -122,15 +124,10 @@
                                 bool* has_data,
                                 bool *is_global_cache);
 
-  // Use the input image filename to adapt the names in the given boot classpath to establish
-  // complete locations for secondary images.
-  static void ExtractMultiImageLocations(const std::string& input_image_file_name,
-                                        const std::string& boot_classpath,
-                                        std::vector<std::string>* image_filenames);
-
-  static std::string GetMultiImageBootClassPath(const std::vector<std::string>& dex_locations,
-                                                const std::vector<std::string>& oat_filenames,
-                                                const std::vector<std::string>& image_filenames);
+  // Expand a single image location to multi-image locations based on the dex locations.
+  static std::vector<std::string> ExpandMultiImageLocations(
+      const std::vector<std::string>& dex_locations,
+      const std::string& image_location);
 
   // Returns true if the dex checksums in the given oat file match the
   // checksums of the original dex files on disk. This is intended to be used
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index 5dd8136..7d28516 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -21,6 +21,7 @@
 
 #include <ostream>
 #include "base/memory_tool.h"
+#include "base/mutex.h"
 
 namespace art {
 namespace gc {
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 545e3d8..dd5451b 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -22,9 +22,9 @@
 
 #include "base/atomic.h"
 #include "base/globals.h"
+#include "base/locks.h"
 #include "base/macros.h"
 #include "base/mem_map.h"
-#include "base/mutex.h"
 #include "gc/accounting/space_bitmap.h"
 #include "gc/collector/object_byte_pair.h"
 
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index 0bd43f9..32af62d 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -17,8 +17,8 @@
 #ifndef ART_RUNTIME_GC_ROOT_H_
 #define ART_RUNTIME_GC_ROOT_H_
 
+#include "base/locks.h"       // For Locks::mutator_lock_.
 #include "base/macros.h"
-#include "base/mutex.h"       // For Locks::mutator_lock_.
 #include "mirror/object_reference.h"
 #include "read_barrier_option.h"
 
diff --git a/runtime/handle.h b/runtime/handle.h
index b13c43e..0c9c029 100644
--- a/runtime/handle.h
+++ b/runtime/handle.h
@@ -20,8 +20,8 @@
 #include <android-base/logging.h>
 
 #include "base/casts.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "base/value_object.h"
 #include "jni.h"
 #include "obj_ptr.h"
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index 9eaf1ec..1a1c92f 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -22,8 +22,8 @@
 #include <android-base/logging.h>
 
 #include "base/enums.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "handle.h"
 #include "stack_reference.h"
 #include "verify_object.h"
diff --git a/runtime/hidden_api.h b/runtime/hidden_api.h
index 614154c..d9d81f0 100644
--- a/runtime/hidden_api.h
+++ b/runtime/hidden_api.h
@@ -20,7 +20,7 @@
 #include "art_field.h"
 #include "art_method.h"
 #include "base/hiddenapi_flags.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "intrinsics_enum.h"
 #include "mirror/class-inl.h"
 #include "reflection.h"
diff --git a/runtime/image.h b/runtime/image.h
index 76fb3b7..9d98431 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -116,6 +116,14 @@
       return storage_mode_;
     }
 
+    uint32_t GetDataSize() const {
+      return data_size_;
+    }
+
+    uint32_t GetImageSize() const {
+      return image_size_;
+    }
+
    private:
     // Storage method for the image, the image may be compressed.
     StorageMode storage_mode_ = kDefaultStorageMode;
diff --git a/runtime/imtable.h b/runtime/imtable.h
index 3c52fb8..48a8643 100644
--- a/runtime/imtable.h
+++ b/runtime/imtable.h
@@ -23,8 +23,8 @@
 
 #include "base/casts.h"
 #include "base/enums.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 
 namespace art {
 
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 8c63c00..eb07035 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -26,9 +26,9 @@
 #include <android-base/logging.h>
 
 #include "base/bit_utils.h"
+#include "base/locks.h"
 #include "base/macros.h"
 #include "base/mem_map.h"
-#include "base/mutex.h"
 #include "gc_root.h"
 #include "obj_ptr.h"
 #include "offsets.h"
diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h
index d7e69a6..e92d195 100644
--- a/runtime/interpreter/interpreter.h
+++ b/runtime/interpreter/interpreter.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_H_
 #define ART_RUNTIME_INTERPRETER_INTERPRETER_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "dex/dex_file.h"
 #include "obj_ptr.h"
 
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index bf84227..a633a63 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -32,8 +32,8 @@
 #include "art_field-inl.h"
 #include "art_method-inl.h"
 #include "base/enums.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "class_linker-inl.h"
 #include "class_root.h"
 #include "common_dex_operations.h"
diff --git a/runtime/interpreter/interpreter_mterp_impl.h b/runtime/interpreter/interpreter_mterp_impl.h
index d8a764f..177b0fd 100644
--- a/runtime/interpreter/interpreter_mterp_impl.h
+++ b/runtime/interpreter/interpreter_mterp_impl.h
@@ -17,8 +17,8 @@
 #ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_MTERP_IMPL_H_
 #define ART_RUNTIME_INTERPRETER_INTERPRETER_MTERP_IMPL_H_
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "dex/dex_file.h"
 #include "jvalue.h"
 #include "obj_ptr.h"
diff --git a/runtime/interpreter/interpreter_switch_impl.h b/runtime/interpreter/interpreter_switch_impl.h
index 9fc4239..d4dca11 100644
--- a/runtime/interpreter/interpreter_switch_impl.h
+++ b/runtime/interpreter/interpreter_switch_impl.h
@@ -17,8 +17,8 @@
 #ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_SWITCH_IMPL_H_
 #define ART_RUNTIME_INTERPRETER_INTERPRETER_SWITCH_IMPL_H_
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "dex/dex_file.h"
 #include "dex/code_item_accessors.h"
 #include "jvalue.h"
diff --git a/runtime/interpreter/lock_count_data.h b/runtime/interpreter/lock_count_data.h
index 3098d4f..efa14c5 100644
--- a/runtime/interpreter/lock_count_data.h
+++ b/runtime/interpreter/lock_count_data.h
@@ -20,7 +20,7 @@
 #include <memory>
 #include <vector>
 
-#include "base/mutex.h"
+#include "base/locks.h"
 
 namespace art {
 
diff --git a/runtime/interpreter/shadow_frame.h b/runtime/interpreter/shadow_frame.h
index 6609021..ca98999 100644
--- a/runtime/interpreter/shadow_frame.h
+++ b/runtime/interpreter/shadow_frame.h
@@ -21,8 +21,8 @@
 #include <cstring>
 #include <string>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "dex/dex_file.h"
 #include "lock_count_data.h"
 #include "read_barrier.h"
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index e292a76..4fa7271 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -571,12 +571,9 @@
 
   Runtime* runtime = Runtime::Current();
 
-  std::vector<std::string> split;
-  Split(runtime->GetBootClassPathString(), ':', &split);
-  if (split.empty()) {
-    AbortTransactionOrFail(self,
-                           "Boot classpath not set or split error:: %s",
-                           runtime->GetBootClassPathString().c_str());
+  const std::vector<std::string>& boot_class_path = Runtime::Current()->GetBootClassPath();
+  if (boot_class_path.empty()) {
+    AbortTransactionOrFail(self, "Boot classpath not set");
     return;
   }
 
@@ -584,7 +581,7 @@
   size_t map_size;
   std::string last_error_msg;  // Only store the last message (we could concatenate).
 
-  for (const std::string& jar_file : split) {
+  for (const std::string& jar_file : boot_class_path) {
     mem_map = FindAndExtractEntry(jar_file, resource_cstr, &map_size, &last_error_msg);
     if (mem_map.IsValid()) {
       break;
diff --git a/runtime/java_frame_root_info.h b/runtime/java_frame_root_info.h
index 452a76b..8141ea2 100644
--- a/runtime/java_frame_root_info.h
+++ b/runtime/java_frame_root_info.h
@@ -19,8 +19,8 @@
 
 #include <iosfwd>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "gc_root.h"
 
 namespace art {
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index d31f166..37365ff 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -292,8 +292,7 @@
     expandBufAddUtf8String(pReply, str);
   }
 
-  std::vector<std::string> boot_class_path;
-  Split(Runtime::Current()->GetBootClassPathString(), ':', &boot_class_path);
+  std::vector<std::string> boot_class_path = Runtime::Current()->GetBootClassPath();
   expandBufAdd4BE(pReply, boot_class_path.size());
   for (const std::string& str : boot_class_path) {
     expandBufAddUtf8String(pReply, str);
diff --git a/runtime/jit/debugger_interface.h b/runtime/jit/debugger_interface.h
index 3d25910..fb5e81b 100644
--- a/runtime/jit/debugger_interface.h
+++ b/runtime/jit/debugger_interface.h
@@ -22,7 +22,7 @@
 #include <vector>
 
 #include "base/array_ref.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 
 namespace art {
 
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 4a3ef07..e43d771 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -291,22 +291,6 @@
   return success;
 }
 
-void Jit::CreateThreadPool() {
-  if (Runtime::Current()->IsSafeMode()) {
-    // Never create the pool in safe mode.
-    return;
-  }
-  // There is a DCHECK in the 'AddSamples' method to ensure the thread pool
-  // is not null when we instrument.
-
-  // We need peers as we may report the JIT thread, e.g., in the debugger.
-  constexpr bool kJitPoolNeedsPeers = true;
-  thread_pool_.reset(new ThreadPool("Jit thread pool", 1, kJitPoolNeedsPeers));
-
-  thread_pool_->SetPthreadPriority(options_->GetThreadPoolPthreadPriority());
-  Start();
-}
-
 void Jit::DeleteThreadPool() {
   Thread* self = Thread::Current();
   DCHECK(Runtime::Current()->IsShuttingDown(self));
@@ -562,10 +546,10 @@
 
 class JitCompileTask final : public Task {
  public:
-  enum TaskKind {
+  enum class TaskKind {
     kAllocateProfile,
     kCompile,
-    kCompileOsr
+    kCompileOsr,
   };
 
   JitCompileTask(ArtMethod* method, TaskKind kind) : method_(method), kind_(kind) {
@@ -582,14 +566,20 @@
 
   void Run(Thread* self) override {
     ScopedObjectAccess soa(self);
-    if (kind_ == kCompile) {
-      Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr= */ false);
-    } else if (kind_ == kCompileOsr) {
-      Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr= */ true);
-    } else {
-      DCHECK(kind_ == kAllocateProfile);
-      if (ProfilingInfo::Create(self, method_, /* retry_allocation= */ true)) {
-        VLOG(jit) << "Start profiling " << ArtMethod::PrettyMethod(method_);
+    switch (kind_) {
+      case TaskKind::kCompile:
+      case TaskKind::kCompileOsr: {
+        Runtime::Current()->GetJit()->CompileMethod(
+            method_,
+            self,
+            /* osr= */ (kind_ == TaskKind::kCompileOsr));
+        break;
+      }
+      case TaskKind::kAllocateProfile: {
+        if (ProfilingInfo::Create(self, method_, /* retry_allocation= */ true)) {
+          VLOG(jit) << "Start profiling " << ArtMethod::PrettyMethod(method_);
+        }
+        break;
       }
     }
     ProfileSaver::NotifyJitActivity();
@@ -607,6 +597,18 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
 };
 
+void Jit::CreateThreadPool() {
+  // There is a DCHECK in the 'AddSamples' method to ensure the tread pool
+  // is not null when we instrument.
+
+  // We need peers as we may report the JIT thread, e.g., in the debugger.
+  constexpr bool kJitPoolNeedsPeers = true;
+  thread_pool_.reset(new ThreadPool("Jit thread pool", 1, kJitPoolNeedsPeers));
+
+  thread_pool_->SetPthreadPriority(options_->GetThreadPoolPthreadPriority());
+  Start();
+}
+
 static bool IgnoreSamplesForMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
   if (method->IsClassInitializer() || !method->IsCompilable()) {
     // We do not want to compile such methods.
@@ -630,11 +632,10 @@
 
 void Jit::AddSamples(Thread* self, ArtMethod* method, uint16_t count, bool with_backedges) {
   if (thread_pool_ == nullptr) {
-    // Should only see this when shutting down, starting up, or in zygote, which doesn't
-    // have a thread pool.
+    // Should only see this when shutting down, starting up, or in safe mode.
     DCHECK(Runtime::Current()->IsShuttingDown(self) ||
            !Runtime::Current()->IsFinishedStarting() ||
-           Runtime::Current()->IsZygote());
+           Runtime::Current()->IsSafeMode());
     return;
   }
   if (IgnoreSamplesForMethod(method)) {
@@ -675,7 +676,8 @@
       if (!success) {
         // We failed allocating. Instead of doing the collection on the Java thread, we push
         // an allocation to a compiler thread, that will do the collection.
-        thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kAllocateProfile));
+        thread_pool_->AddTask(
+            self, new JitCompileTask(method, JitCompileTask::TaskKind::kAllocateProfile));
       }
     }
     // Avoid jumping more than one state at a time.
@@ -685,7 +687,7 @@
       if ((new_count >= HotMethodThreshold()) &&
           !code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
         DCHECK(thread_pool_ != nullptr);
-        thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompile));
+        thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompile));
       }
       // Avoid jumping more than one state at a time.
       new_count = std::min(new_count, static_cast<uint32_t>(OSRMethodThreshold() - 1));
@@ -697,7 +699,8 @@
       DCHECK(!method->IsNative());  // No back edges reported for native methods.
       if ((new_count >= OSRMethodThreshold()) &&  !code_cache_->IsOsrCompiled(method)) {
         DCHECK(thread_pool_ != nullptr);
-        thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompileOsr));
+        thread_pool_->AddTask(
+            self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompileOsr));
       }
     }
   }
@@ -730,7 +733,7 @@
         // The compiler requires a ProfilingInfo object for non-native methods.
         ProfilingInfo::Create(thread, np_method, /* retry_allocation= */ true);
       }
-      JitCompileTask compile_task(method, JitCompileTask::kCompile);
+      JitCompileTask compile_task(method, JitCompileTask::TaskKind::kCompile);
       // Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
       ScopedSetRuntimeThread ssrt(thread);
       compile_task.Run(thread);
@@ -798,7 +801,16 @@
   }
 }
 
-void Jit::PostForkChildAction() {
+void Jit::PostForkChildAction(bool is_zygote) {
+  if (is_zygote) {
+    // Don't transition if this is for a child zygote.
+    return;
+  }
+  if (Runtime::Current()->IsSafeMode()) {
+    // Delete the thread pool, we are not going to JIT.
+    thread_pool_.reset(nullptr);
+    return;
+  }
   // At this point, the compiler options have been adjusted to the particular configuration
   // of the forked child. Parse them again.
   jit_update_options_(jit_compiler_handle_);
@@ -806,6 +818,28 @@
   // Adjust the status of code cache collection: the status from zygote was to not collect.
   code_cache_->SetGarbageCollectCode(!jit_generate_debug_info_(jit_compiler_handle_) &&
       !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled());
+
+  if (thread_pool_ != nullptr) {
+    // Remove potential tasks that have been inherited from the zygote.
+    thread_pool_->RemoveAllTasks(Thread::Current());
+
+    // Resume JIT compilation.
+    thread_pool_->CreateThreads();
+  }
+}
+
+void Jit::PreZygoteFork() {
+  if (thread_pool_ == nullptr) {
+    return;
+  }
+  thread_pool_->DeleteThreads();
+}
+
+void Jit::PostZygoteFork() {
+  if (thread_pool_ == nullptr) {
+    return;
+  }
+  thread_pool_->CreateThreads();
 }
 
 }  // namespace jit
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index e12b032..7ce5f07 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -285,8 +285,14 @@
   // Start JIT threads.
   void Start();
 
-  // Transition to a zygote child state.
-  void PostForkChildAction();
+  // Transition to a child state.
+  void PostForkChildAction(bool is_zygote);
+
+  // Prepare for forking.
+  void PreZygoteFork();
+
+  // Adjust state after forking.
+  void PostZygoteFork();
 
  private:
   Jit(JitCodeCache* code_cache, JitOptions* options);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 97887cc..1d53a58 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -436,6 +436,12 @@
   initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
   max_capacity = RoundDown(max_capacity, 2 * kPageSize);
 
+  used_memory_for_data_ = 0;
+  used_memory_for_code_ = 0;
+  number_of_compilations_ = 0;
+  number_of_osr_compilations_ = 0;
+  number_of_collections_ = 0;
+
   data_pages_ = MemMap();
   exec_pages_ = MemMap();
   non_exec_pages_ = MemMap();
@@ -477,7 +483,7 @@
 JitCodeCache::~JitCodeCache() {}
 
 bool JitCodeCache::ContainsPc(const void* ptr) const {
-  return exec_pages_.Begin() <= ptr && ptr < exec_pages_.End();
+  return exec_pages_.HasAddress(ptr) || zygote_exec_pages_.HasAddress(ptr);
 }
 
 bool JitCodeCache::WillExecuteJitCode(ArtMethod* method) {
@@ -1321,7 +1327,7 @@
             return true;
           }
           const void* code = method_header->GetCode();
-          if (code_cache_->ContainsPc(code)) {
+          if (code_cache_->ContainsPc(code) && !code_cache_->IsInZygoteExecSpace(code)) {
             // Use the atomic set version, as multiple threads are executing this code.
             bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
           }
@@ -1493,7 +1499,7 @@
         // interpreter will update its entry point to the compiled code and call it.
         for (ProfilingInfo* info : profiling_infos_) {
           const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
-          if (ContainsPc(entry_point)) {
+          if (!IsInZygoteDataSpace(info) && ContainsPc(entry_point)) {
             info->SetSavedEntryPoint(entry_point);
             // Don't call Instrumentation::UpdateMethodsCode(), as it can check the declaring
             // class of the method. We may be concurrently running a GC which makes accessing
@@ -1508,7 +1514,7 @@
         // Change entry points of native methods back to the GenericJNI entrypoint.
         for (const auto& entry : jni_stubs_map_) {
           const JniStubData& data = entry.second;
-          if (!data.IsCompiled()) {
+          if (!data.IsCompiled() || IsInZygoteExecSpace(data.GetCode())) {
             continue;
           }
           // Make sure a single invocation of the GenericJNI trampoline tries to recompile.
@@ -1540,7 +1546,9 @@
     // Iterate over all compiled code and remove entries that are not marked.
     for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
       JniStubData* data = &it->second;
-      if (!data->IsCompiled() || GetLiveBitmap()->Test(FromCodeToAllocation(data->GetCode()))) {
+      if (IsInZygoteExecSpace(data->GetCode()) ||
+          !data->IsCompiled() ||
+          GetLiveBitmap()->Test(FromCodeToAllocation(data->GetCode()))) {
         ++it;
       } else {
         method_headers.insert(OatQuickMethodHeader::FromCodePointer(data->GetCode()));
@@ -1550,7 +1558,7 @@
     for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
       const void* code_ptr = it->first;
       uintptr_t allocation = FromCodeToAllocation(code_ptr);
-      if (GetLiveBitmap()->Test(allocation)) {
+      if (IsInZygoteExecSpace(code_ptr) || GetLiveBitmap()->Test(allocation)) {
         ++it;
       } else {
         OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(code_ptr);
@@ -1571,7 +1579,7 @@
       // Also remove the saved entry point from the ProfilingInfo objects.
       for (ProfilingInfo* info : profiling_infos_) {
         const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
-        if (!ContainsPc(ptr) && !info->IsInUseByCompiler()) {
+        if (!ContainsPc(ptr) && !info->IsInUseByCompiler() && !IsInZygoteDataSpace(info)) {
           info->GetMethod()->SetProfilingInfo(nullptr);
         }
 
@@ -1596,6 +1604,9 @@
     for (const auto& entry : jni_stubs_map_) {
       const JniStubData& data = entry.second;
       const void* code_ptr = data.GetCode();
+      if (IsInZygoteExecSpace(code_ptr)) {
+        continue;
+      }
       const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
       for (ArtMethod* method : data.GetMethods()) {
         if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
@@ -1607,6 +1618,9 @@
     for (const auto& it : method_code_map_) {
       ArtMethod* method = it.second;
       const void* code_ptr = it.first;
+      if (IsInZygoteExecSpace(code_ptr)) {
+        continue;
+      }
       const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
       if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
         GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
@@ -1953,6 +1967,7 @@
         instrumentation->UpdateNativeMethodsCodeToJitCode(m, entrypoint);
       }
       if (collection_in_progress_) {
+        CHECK(!IsInZygoteExecSpace(data->GetCode()));
         GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(data->GetCode()));
       }
     }
@@ -2057,6 +2072,7 @@
 }
 
 void JitCodeCache::FreeCode(uint8_t* code) {
+  CHECK(!IsInZygoteExecSpace(code));
   used_memory_for_code_ -= mspace_usable_size(code);
   mspace_free(exec_mspace_, code);
 }
@@ -2068,6 +2084,7 @@
 }
 
 void JitCodeCache::FreeData(uint8_t* data) {
+  CHECK(!IsInZygoteDataSpace(data));
   used_memory_for_data_ -= mspace_usable_size(data);
   mspace_free(data_mspace_, data);
 }
@@ -2091,13 +2108,11 @@
 }
 
 void JitCodeCache::PostForkChildAction(bool is_system_server, bool is_zygote) {
+  if (is_zygote) {
+    // Don't transition if this is for a child zygote.
+    return;
+  }
   MutexLock mu(Thread::Current(), lock_);
-  // Currently, we don't expect any compilations from zygote.
-  CHECK_EQ(number_of_compilations_, 0u);
-  CHECK_EQ(number_of_osr_compilations_, 0u);
-  CHECK(jni_stubs_map_.empty());
-  CHECK(method_code_map_.empty());
-  CHECK(osr_code_map_.empty());
 
   zygote_data_pages_ = std::move(data_pages_);
   zygote_exec_pages_ = std::move(exec_pages_);
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 7a838fd..e2f3357 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -71,6 +71,7 @@
 
 namespace jit {
 
+class MarkCodeClosure;
 class ScopedCodeCacheWrite;
 
 // Alignment in bits that will suit all architectures.
@@ -387,6 +388,14 @@
 
   const MemMap* GetUpdatableCodeMapping() const;
 
+  bool IsInZygoteDataSpace(const void* ptr) const {
+    return zygote_data_pages_.HasAddress(ptr);
+  }
+
+  bool IsInZygoteExecSpace(const void* ptr) const {
+    return zygote_exec_pages_.HasAddress(ptr);
+  }
+
   bool IsWeakAccessEnabled(Thread* self) const;
   void WaitUntilInlineCacheAccessible(Thread* self)
       REQUIRES(!lock_)
@@ -487,6 +496,7 @@
 
   friend class art::JitJniStubTestHelper;
   friend class ScopedCodeCacheWrite;
+  friend class MarkCodeClosure;
 
   DISALLOW_COPY_AND_ASSIGN(JitCodeCache);
 };
diff --git a/runtime/jni/jni_env_ext.cc b/runtime/jni/jni_env_ext.cc
index efe43ee..976f89b 100644
--- a/runtime/jni/jni_env_ext.cc
+++ b/runtime/jni/jni_env_ext.cc
@@ -21,6 +21,7 @@
 
 #include "android-base/stringprintf.h"
 
+#include "base/mutex.h"
 #include "base/to_str.h"
 #include "check_jni.h"
 #include "indirect_reference_table.h"
diff --git a/runtime/jni/jni_env_ext.h b/runtime/jni/jni_env_ext.h
index 3a007ad..61de074 100644
--- a/runtime/jni/jni_env_ext.h
+++ b/runtime/jni/jni_env_ext.h
@@ -19,8 +19,8 @@
 
 #include <jni.h>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "indirect_reference_table.h"
 #include "obj_ptr.h"
 #include "reference_table.h"
diff --git a/runtime/jvalue.h b/runtime/jvalue.h
index b42d995..d03749c 100644
--- a/runtime/jvalue.h
+++ b/runtime/jvalue.h
@@ -17,8 +17,8 @@
 #ifndef ART_RUNTIME_JVALUE_H_
 #define ART_RUNTIME_JVALUE_H_
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 
 #include <stdint.h>
 
diff --git a/runtime/managed_stack.h b/runtime/managed_stack.h
index 6a0f075..3fb83ac 100644
--- a/runtime/managed_stack.h
+++ b/runtime/managed_stack.h
@@ -23,8 +23,8 @@
 
 #include <android-base/logging.h>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "base/bit_utils.h"
 
 namespace art {
diff --git a/runtime/mirror/class_loader.h b/runtime/mirror/class_loader.h
index e3cb12f..783ba6a 100644
--- a/runtime/mirror/class_loader.h
+++ b/runtime/mirror/class_loader.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_MIRROR_CLASS_LOADER_H_
 #define ART_RUNTIME_MIRROR_CLASS_LOADER_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "obj_ptr.h"
 #include "object.h"
 #include "object_reference.h"
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 58b199d..c742928 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -19,7 +19,7 @@
 
 #include "array.h"
 #include "base/bit_utils.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "dex/dex_file_types.h"
 #include "gc_root.h"  // Note: must not use -inl here to avoid circular dependency.
 #include "object.h"
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index d6a39aa..8636928 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -19,7 +19,7 @@
 
 #include "base/atomic.h"
 #include "base/globals.h"
-#include "base/mutex.h"  // For Locks::mutator_lock_.
+#include "base/locks.h"  // For Locks::mutator_lock_.
 #include "heap_poisoning.h"
 #include "obj_ptr.h"
 
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index 63c5ae5..9ace4f7 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -18,8 +18,8 @@
 #define ART_RUNTIME_MIRROR_REFERENCE_H_
 
 #include "base/enums.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "obj_ptr.h"
 #include "object.h"
 #include "read_barrier_option.h"
diff --git a/runtime/monitor_objects_stack_visitor.h b/runtime/monitor_objects_stack_visitor.h
index c943402..3968239 100644
--- a/runtime/monitor_objects_stack_visitor.h
+++ b/runtime/monitor_objects_stack_visitor.h
@@ -20,7 +20,7 @@
 #include <android-base/logging.h>
 
 #include "art_method.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "monitor.h"
 #include "stack.h"
 #include "thread.h"
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index e213dc7..3e5003c 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -24,7 +24,8 @@
 #include <limits.h>
 #include "nativehelper/scoped_utf_chars.h"
 
-#include "android-base/stringprintf.h"
+#include <android-base/stringprintf.h>
+#include <android-base/strings.h>
 
 #include "arch/instruction_set.h"
 #include "art_method-inl.h"
@@ -222,7 +223,8 @@
 }
 
 static jstring VMRuntime_bootClassPath(JNIEnv* env, jobject) {
-  return env->NewStringUTF(DefaultToDot(Runtime::Current()->GetBootClassPathString()));
+  std::string boot_class_path = android::base::Join(Runtime::Current()->GetBootClassPath(), ':');
+  return env->NewStringUTF(DefaultToDot(boot_class_path));
 }
 
 static jstring VMRuntime_classPath(JNIEnv* env, jobject) {
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 530371d..b7ac1e8 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -44,10 +44,6 @@
 #include "thread_list.h"
 #include "trace.h"
 
-#if defined(__linux__)
-#include <sys/prctl.h>
-#endif
-
 #include <sys/resource.h>
 
 namespace art {
@@ -59,37 +55,6 @@
 
 using android::base::StringPrintf;
 
-static void EnableDebugger() {
-#if defined(__linux__)
-  // To let a non-privileged gdbserver attach to this
-  // process, we must set our dumpable flag.
-  if (prctl(PR_SET_DUMPABLE, 1, 0, 0, 0) == -1) {
-    PLOG(ERROR) << "prctl(PR_SET_DUMPABLE) failed for pid " << getpid();
-  }
-
-  // Even if Yama is on a non-privileged native debugger should
-  // be able to attach to the debuggable app.
-  if (prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, 0, 0, 0) == -1) {
-    // if Yama is off prctl(PR_SET_PTRACER) returns EINVAL - don't log in this
-    // case since it's expected behaviour.
-    if (errno != EINVAL) {
-      PLOG(ERROR) << "prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY) failed for pid " << getpid();
-    }
-  }
-#endif
-  // We don't want core dumps, though, so set the soft limit on core dump size
-  // to 0 without changing the hard limit.
-  rlimit rl;
-  if (getrlimit(RLIMIT_CORE, &rl) == -1) {
-    PLOG(ERROR) << "getrlimit(RLIMIT_CORE) failed for pid " << getpid();
-  } else {
-    rl.rlim_cur = 0;
-    if (setrlimit(RLIMIT_CORE, &rl) == -1) {
-      PLOG(ERROR) << "setrlimit(RLIMIT_CORE) failed for pid " << getpid();
-    }
-  }
-}
-
 class ClassSet {
  public:
   // The number of classes we reasonably expect to have to look at. Realistically the number is more
@@ -211,9 +176,6 @@
   }
 
   Dbg::SetJdwpAllowed((runtime_flags & DEBUG_ENABLE_JDWP) != 0);
-  if ((runtime_flags & DEBUG_ENABLE_JDWP) != 0) {
-    EnableDebugger();
-  }
   runtime_flags &= ~DEBUG_ENABLE_JDWP;
 
   const bool safe_mode = (runtime_flags & DEBUG_ENABLE_SAFEMODE) != 0;
@@ -287,6 +249,13 @@
   return reinterpret_cast<jlong>(ThreadForEnv(env));
 }
 
+static void ZygoteHooks_nativePostZygoteFork(JNIEnv*, jclass) {
+  Runtime* runtime = Runtime::Current();
+  if (runtime->IsZygote()) {
+    runtime->PostZygoteFork();
+  }
+}
+
 static void ZygoteHooks_nativePostForkSystemServer(JNIEnv* env ATTRIBUTE_UNUSED,
                                                    jclass klass ATTRIBUTE_UNUSED) {
   // This JIT code cache for system server is created whilst the runtime is still single threaded.
@@ -343,7 +312,7 @@
           /* is_system_server= */ false, is_zygote);
     }
     // This must be called after EnableDebugFeatures.
-    Runtime::Current()->GetJit()->PostForkChildAction();
+    Runtime::Current()->GetJit()->PostForkChildAction(is_zygote);
   }
 
   // Update tracing.
@@ -441,6 +410,7 @@
 
 static JNINativeMethod gMethods[] = {
   NATIVE_METHOD(ZygoteHooks, nativePreFork, "()J"),
+  NATIVE_METHOD(ZygoteHooks, nativePostZygoteFork, "()V"),
   NATIVE_METHOD(ZygoteHooks, nativePostForkSystemServer, "()V"),
   NATIVE_METHOD(ZygoteHooks, nativePostForkChild, "(JIZZLjava/lang/String;)V"),
   NATIVE_METHOD(ZygoteHooks, startZygoteNoThreadCreation, "()V"),
diff --git a/runtime/non_debuggable_classes.h b/runtime/non_debuggable_classes.h
index e1b5633..e2c51e6 100644
--- a/runtime/non_debuggable_classes.h
+++ b/runtime/non_debuggable_classes.h
@@ -19,7 +19,7 @@
 
 #include <vector>
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "jni.h"
 
 namespace art {
diff --git a/runtime/nth_caller_visitor.h b/runtime/nth_caller_visitor.h
index 71c6a82..ffec179 100644
--- a/runtime/nth_caller_visitor.h
+++ b/runtime/nth_caller_visitor.h
@@ -18,7 +18,7 @@
 #define ART_RUNTIME_NTH_CALLER_VISITOR_H_
 
 #include "art_method.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "stack.h"
 
 namespace art {
diff --git a/runtime/oat.h b/runtime/oat.h
index ee46f42..b09c81e 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -31,8 +31,8 @@
 class PACKED(4) OatHeader {
  public:
   static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
-  // Last oat version changed reason: Image checksums.
-  static constexpr uint8_t kOatVersion[] = { '1', '6', '4', '\0' };
+  // Last oat version changed reason: Pass boot class path to LoadBootImage.
+  static constexpr uint8_t kOatVersion[] = { '1', '6', '5', '\0' };
 
   static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
   static constexpr const char* kDebuggableKey = "debuggable";
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 2c882ec..9552ca3 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -27,6 +27,7 @@
 #include "base/bit_vector-inl.h"
 #include "base/file_utils.h"
 #include "base/logging.h"  // For VLOG.
+#include "base/mutex-inl.h"
 #include "base/stl_util.h"
 #include "base/systrace.h"
 #include "class_linker.h"
diff --git a/runtime/oat_file_manager.h b/runtime/oat_file_manager.h
index 7d96a7a..99e1b73 100644
--- a/runtime/oat_file_manager.h
+++ b/runtime/oat_file_manager.h
@@ -23,8 +23,8 @@
 #include <unordered_map>
 #include <vector>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "jni.h"
 
 namespace art {
diff --git a/runtime/obj_ptr.h b/runtime/obj_ptr.h
index efbb66f..9e2ee29 100644
--- a/runtime/obj_ptr.h
+++ b/runtime/obj_ptr.h
@@ -21,8 +21,8 @@
 #include <type_traits>
 
 #include "base/globals.h"
+#include "base/locks.h"  // For Locks::mutator_lock_.
 #include "base/macros.h"
-#include "base/mutex.h"  // For Locks::mutator_lock_.
 
 // Always inline ObjPtr methods even in debug builds.
 #define OBJPTR_INLINE __attribute__ ((always_inline))
diff --git a/runtime/object_lock.h b/runtime/object_lock.h
index 5916f90..15b763a 100644
--- a/runtime/object_lock.h
+++ b/runtime/object_lock.h
@@ -17,8 +17,8 @@
 #ifndef ART_RUNTIME_OBJECT_LOCK_H_
 #define ART_RUNTIME_OBJECT_LOCK_H_
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "handle.h"
 
 namespace art {
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 29b5690..17ff3a2 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -20,6 +20,7 @@
 #include <sstream>
 
 #include <android-base/logging.h>
+#include <android-base/strings.h>
 
 #include "base/file_utils.h"
 #include "base/macros.h"
@@ -78,7 +79,7 @@
       .Define("-showversion")
           .IntoKey(M::ShowVersion)
       .Define("-Xbootclasspath:_")
-          .WithType<std::string>()
+          .WithType<ParseStringList<':'>>()  // std::vector<std::string>, split by :
           .IntoKey(M::BootClassPath)
       .Define("-Xbootclasspath-locations:_")
           .WithType<ParseStringList<':'>>()  // std::vector<std::string>, split by :
@@ -513,7 +514,7 @@
                  GetInstructionSetString(kRuntimeISA));
     Exit(0);
   } else if (args.Exists(M::BootClassPath)) {
-    LOG(INFO) << "setting boot class path to " << *args.Get(M::BootClassPath);
+    LOG(INFO) << "setting boot class path to " << args.Get(M::BootClassPath)->Join();
   }
 
   if (args.GetOrDefault(M::Interpret)) {
@@ -525,8 +526,9 @@
   }
 
   // Set a default boot class path if we didn't get an explicit one via command line.
-  if (getenv("BOOTCLASSPATH") != nullptr) {
-    args.SetIfMissing(M::BootClassPath, std::string(getenv("BOOTCLASSPATH")));
+  const char* env_bcp = getenv("BOOTCLASSPATH");
+  if (env_bcp != nullptr) {
+    args.SetIfMissing(M::BootClassPath, ParseStringList<':'>::Split(env_bcp));
   }
 
   // Set a default class path if we didn't get an explicit one via command line.
@@ -586,22 +588,20 @@
     args.Set(M::BackgroundGc, BackgroundGcOption { background_collector_type_ });
   }
 
-  auto boot_class_path_string = args.GetOrDefault(M::BootClassPath);
-  {
-    auto&& boot_class_path = args.GetOrDefault(M::BootClassPath);
-    auto&& boot_class_path_locations = args.GetOrDefault(M::BootClassPathLocations);
-    if (args.Exists(M::BootClassPathLocations)) {
-      size_t boot_class_path_count = ParseStringList<':'>::Split(boot_class_path).Size();
-
-      if (boot_class_path_count != boot_class_path_locations.Size()) {
-        Usage("The number of boot class path files does not match"
-            " the number of boot class path locations given\n"
-            "  boot class path files     (%zu): %s\n"
-            "  boot class path locations (%zu): %s\n",
-            boot_class_path.size(), boot_class_path_string.c_str(),
-            boot_class_path_locations.Size(), boot_class_path_locations.Join().c_str());
-        return false;
-      }
+  const ParseStringList<':'>* boot_class_path_locations = args.Get(M::BootClassPathLocations);
+  if (boot_class_path_locations != nullptr && boot_class_path_locations->Size() != 0u) {
+    const ParseStringList<':'>* boot_class_path = args.Get(M::BootClassPath);
+    if (boot_class_path == nullptr ||
+        boot_class_path_locations->Size() != boot_class_path->Size()) {
+      Usage("The number of boot class path files does not match"
+          " the number of boot class path locations given\n"
+          "  boot class path files     (%zu): %s\n"
+          "  boot class path locations (%zu): %s\n",
+          (boot_class_path != nullptr) ? boot_class_path->Size() : 0u,
+          (boot_class_path != nullptr) ? boot_class_path->Join().c_str() : "<nil>",
+          boot_class_path_locations->Size(),
+          boot_class_path_locations->Join().c_str());
+      return false;
     }
   }
 
diff --git a/runtime/parsed_options_test.cc b/runtime/parsed_options_test.cc
index 705cc6c..cbb7b82 100644
--- a/runtime/parsed_options_test.cc
+++ b/runtime/parsed_options_test.cc
@@ -40,8 +40,7 @@
   boot_class_path += "-Xbootclasspath:";
 
   bool first_dex_file = true;
-  for (const std::string &dex_file_name :
-           CommonRuntimeTest::GetLibCoreDexFileNames()) {
+  for (const std::string &dex_file_name : CommonRuntimeTest::GetLibCoreDexFileNames()) {
     if (!first_dex_file) {
       class_path += ":";
     } else {
@@ -50,6 +49,8 @@
     class_path += dex_file_name;
   }
   boot_class_path += class_path;
+  std::vector<std::string> expected_boot_class_path;
+  Split(class_path, ':', &expected_boot_class_path);
 
   RuntimeOptions options;
   options.push_back(std::make_pair(boot_class_path.c_str(), nullptr));
@@ -78,9 +79,11 @@
   using Opt = RuntimeArgumentMap;
 
 #define EXPECT_PARSED_EQ(expected, actual_key) EXPECT_EQ(expected, map.GetOrDefault(actual_key))
+#define EXPECT_PARSED_EQ_AS_STRING_VECTOR(expected, actual_key) \
+  EXPECT_EQ(expected, static_cast<std::vector<std::string>>(map.GetOrDefault(actual_key)))
 #define EXPECT_PARSED_EXISTS(actual_key) EXPECT_TRUE(map.Exists(actual_key))
 
-  EXPECT_PARSED_EQ(class_path, Opt::BootClassPath);
+  EXPECT_PARSED_EQ_AS_STRING_VECTOR(expected_boot_class_path, Opt::BootClassPath);
   EXPECT_PARSED_EQ(class_path, Opt::ClassPath);
   EXPECT_PARSED_EQ(std::string("boot_image"), Opt::Image);
   EXPECT_PARSED_EXISTS(Opt::CheckJni);
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index 0741da6..3b89377 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -19,8 +19,8 @@
 
 #include <android-base/logging.h>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "base/runtime_debug.h"
 #include "gc_root.h"
 #include "jni.h"
diff --git a/runtime/reference_table.h b/runtime/reference_table.h
index 6af5ca5..6388944 100644
--- a/runtime/reference_table.h
+++ b/runtime/reference_table.h
@@ -23,7 +23,7 @@
 #include <vector>
 
 #include "base/allocator.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "gc_root.h"
 #include "obj_ptr.h"
 
diff --git a/runtime/reflection.h b/runtime/reflection.h
index 74580a2..574e302 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_REFLECTION_H_
 #define ART_RUNTIME_REFLECTION_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "dex/primitive.h"
 #include "jni.h"
 #include "obj_ptr.h"
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index e6cc471..2ffaf98 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -23,6 +23,7 @@
 #include "art_method.h"
 #include "base/callee_save_type.h"
 #include "base/casts.h"
+#include "base/mutex.h"
 #include "entrypoints/quick/callee_save_frame.h"
 #include "gc_root-inl.h"
 #include "interpreter/mterp/mterp.h"
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index ab79b9e..11e0bf4 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -34,6 +34,7 @@
 #include <cstdio>
 #include <cstdlib>
 #include <limits>
+#include <thread>
 #include <vector>
 
 #include "android-base/strings.h"
@@ -233,8 +234,7 @@
       class_linker_(nullptr),
       signal_catcher_(nullptr),
       java_vm_(nullptr),
-      fault_message_lock_("Fault message lock"),
-      fault_message_(""),
+      fault_message_(nullptr),
       threads_being_born_(0),
       shutdown_cond_(new ConditionVariable("Runtime shutdown", *Locks::runtime_shutdown_lock_)),
       shutting_down_(false),
@@ -278,7 +278,6 @@
       // Initially assume we perceive jank in case the process state is never updated.
       process_state_(kProcessStateJankPerceptible),
       zygote_no_threads_(false),
-      process_cpu_start_time_(ProcessCpuNanoTime()),
       verifier_logging_threshold_ms_(100) {
   static_assert(Runtime::kCalleeSaveSize ==
                     static_cast<uint32_t>(CalleeSaveType::kLastCalleeSaveType), "Unexpected size");
@@ -322,20 +321,26 @@
   }
 
   if (dump_gc_performance_on_shutdown_) {
-    process_cpu_end_time_ = ProcessCpuNanoTime();
+    heap_->CalculateWeightedAllocatedBytes();
+    uint64_t process_cpu_end_time = ProcessCpuNanoTime();
     ScopedLogSeverity sls(LogSeverity::INFO);
     // This can't be called from the Heap destructor below because it
     // could call RosAlloc::InspectAll() which needs the thread_list
     // to be still alive.
     heap_->DumpGcPerformanceInfo(LOG_STREAM(INFO));
 
-    uint64_t process_cpu_time = process_cpu_end_time_ - process_cpu_start_time_;
+    uint64_t process_cpu_time = process_cpu_end_time - heap_->GetProcessCpuStartTime();
     uint64_t gc_cpu_time = heap_->GetTotalGcCpuTime();
     float ratio = static_cast<float>(gc_cpu_time) / process_cpu_time;
     LOG_STREAM(INFO) << "GC CPU time " << PrettyDuration(gc_cpu_time)
         << " out of process CPU time " << PrettyDuration(process_cpu_time)
         << " (" << ratio << ")"
         << "\n";
+    float weighted_allocated_bytes =
+      static_cast<float>(heap_->GetWeightedAllocatedBytes()) / process_cpu_time;
+    LOG_STREAM(INFO) << "Weighted bytes allocated over CPU time: "
+        << " (" <<  PrettySize(weighted_allocated_bytes)  << ")"
+        << "\n";
   }
 
   if (jit_ != nullptr) {
@@ -388,6 +393,11 @@
     jit_->DeleteThreadPool();
   }
 
+  // Thread pools must be deleted before the runtime shuts down to avoid hanging.
+  if (thread_pool_ != nullptr) {
+    thread_pool_.reset();
+  }
+
   // Make sure our internal threads are dead before we start tearing down things they're using.
   GetRuntimeCallbacks()->StopDebugger();
   delete signal_catcher_;
@@ -590,9 +600,18 @@
 }
 
 void Runtime::PreZygoteFork() {
+  if (GetJit() != nullptr) {
+    GetJit()->PreZygoteFork();
+  }
   heap_->PreZygoteFork();
 }
 
+void Runtime::PostZygoteFork() {
+  if (GetJit() != nullptr) {
+    GetJit()->PostZygoteFork();
+  }
+}
+
 void Runtime::CallExitHook(jint status) {
   if (exit_ != nullptr) {
     ScopedThreadStateChange tsc(Thread::Current(), kNative);
@@ -906,8 +925,13 @@
     }
   }
 
-  if (jit_ != nullptr) {
-    jit_->CreateThreadPool();
+  if (thread_pool_ == nullptr) {
+    constexpr size_t kStackSize = 64 * KB;
+    constexpr size_t kMaxRuntimeWorkers = 4u;
+    const size_t num_workers =
+        std::min(static_cast<size_t>(std::thread::hardware_concurrency()), kMaxRuntimeWorkers);
+    thread_pool_.reset(new ThreadPool("Runtime", num_workers, /*create_peers=*/false, kStackSize));
+    thread_pool_->StartWorkers(Thread::Current());
   }
 
   // Create the thread pools.
@@ -1073,7 +1097,45 @@
   Monitor::Init(runtime_options.GetOrDefault(Opt::LockProfThreshold),
                 runtime_options.GetOrDefault(Opt::StackDumpLockProfThreshold));
 
-  boot_class_path_string_ = runtime_options.ReleaseOrDefault(Opt::BootClassPath);
+  image_location_ = runtime_options.GetOrDefault(Opt::Image);
+  SetInstructionSet(runtime_options.GetOrDefault(Opt::ImageInstructionSet));
+  boot_class_path_ = runtime_options.ReleaseOrDefault(Opt::BootClassPath);
+  boot_class_path_locations_ = runtime_options.ReleaseOrDefault(Opt::BootClassPathLocations);
+  DCHECK(boot_class_path_locations_.empty() ||
+         boot_class_path_locations_.size() == boot_class_path_.size());
+  if (boot_class_path_.empty()) {
+    // Try to extract the boot class path from the system boot image.
+    if (image_location_.empty()) {
+      LOG(ERROR) << "Empty boot class path, cannot continue without image.";
+      return false;
+    }
+    std::string system_oat_filename = ImageHeader::GetOatLocationFromImageLocation(
+        GetSystemImageFilename(image_location_.c_str(), instruction_set_));
+    std::string system_oat_location = ImageHeader::GetOatLocationFromImageLocation(image_location_);
+    std::string error_msg;
+    std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
+                                                    system_oat_filename,
+                                                    system_oat_location,
+                                                    /*executable=*/ false,
+                                                    /*low_4gb=*/ false,
+                                                    /*abs_dex_location=*/ nullptr,
+                                                    /*reservation=*/ nullptr,
+                                                    &error_msg));
+    if (oat_file == nullptr) {
+      LOG(ERROR) << "Could not open boot oat file for extracting boot class path: " << error_msg;
+      return false;
+    }
+    const OatHeader& oat_header = oat_file->GetOatHeader();
+    const char* oat_boot_class_path = oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey);
+    if (oat_boot_class_path != nullptr) {
+      Split(oat_boot_class_path, ':', &boot_class_path_);
+    }
+    if (boot_class_path_.empty()) {
+      LOG(ERROR) << "Boot class path missing from boot image oat file " << oat_file->GetLocation();
+      return false;
+    }
+  }
+
   class_path_string_ = runtime_options.ReleaseOrDefault(Opt::ClassPath);
   properties_ = runtime_options.ReleaseOrDefault(Opt::PropertiesList);
 
@@ -1099,7 +1161,6 @@
     }
   }
   image_compiler_options_ = runtime_options.ReleaseOrDefault(Opt::ImageCompilerOptions);
-  image_location_ = runtime_options.GetOrDefault(Opt::Image);
 
   max_spins_before_thin_lock_inflation_ =
       runtime_options.GetOrDefault(Opt::MaxSpinsBeforeThinLockInflation);
@@ -1168,8 +1229,10 @@
                        foreground_heap_growth_multiplier,
                        runtime_options.GetOrDefault(Opt::MemoryMaximumSize),
                        runtime_options.GetOrDefault(Opt::NonMovingSpaceCapacity),
-                       runtime_options.GetOrDefault(Opt::Image),
-                       runtime_options.GetOrDefault(Opt::ImageInstructionSet),
+                       GetBootClassPath(),
+                       GetBootClassPathLocations(),
+                       image_location_,
+                       instruction_set_,
                        // Override the collector type to CC if the read barrier config.
                        kUseReadBarrier ? gc::kCollectorTypeCC : xgc_option.collector_type_,
                        kUseReadBarrier ? BackgroundGcOption(gc::kCollectorTypeCCBackground)
@@ -1369,16 +1432,6 @@
         image_space->VerifyImageAllocations();
       }
     }
-    if (boot_class_path_string_.empty()) {
-      // The bootclasspath is not explicitly specified: construct it from the loaded dex files.
-      const std::vector<const DexFile*>& boot_class_path = GetClassLinker()->GetBootClassPath();
-      std::vector<std::string> dex_locations;
-      dex_locations.reserve(boot_class_path.size());
-      for (const DexFile* dex_file : boot_class_path) {
-        dex_locations.push_back(dex_file->GetLocation());
-      }
-      boot_class_path_string_ = android::base::Join(dex_locations, ':');
-    }
     {
       ScopedTrace trace2("AddImageStringsToTable");
       for (gc::space::ImageSpace* image_space : heap_->GetBootImageSpaces()) {
@@ -1391,24 +1444,12 @@
       DeoptimizeBootImage();
     }
   } else {
-    std::vector<std::string> dex_filenames;
-    Split(boot_class_path_string_, ':', &dex_filenames);
-
-    std::vector<std::string> dex_locations;
-    if (!runtime_options.Exists(Opt::BootClassPathLocations)) {
-      dex_locations = dex_filenames;
-    } else {
-      dex_locations = runtime_options.GetOrDefault(Opt::BootClassPathLocations);
-      CHECK_EQ(dex_filenames.size(), dex_locations.size());
-    }
-
     std::vector<std::unique_ptr<const DexFile>> boot_class_path;
     if (runtime_options.Exists(Opt::BootClassPathDexList)) {
       boot_class_path.swap(*runtime_options.GetOrDefault(Opt::BootClassPathDexList));
     } else {
-      OpenDexFiles(dex_filenames, dex_locations, &boot_class_path);
+      OpenDexFiles(GetBootClassPath(), GetBootClassPathLocations(), &boot_class_path);
     }
-    instruction_set_ = runtime_options.GetOrDefault(Opt::ImageInstructionSet);
     if (!class_linker_->InitWithoutImage(std::move(boot_class_path), &error_msg)) {
       LOG(ERROR) << "Could not initialize without image: " << error_msg;
       return false;
@@ -2347,8 +2388,27 @@
 }
 
 void Runtime::SetFaultMessage(const std::string& message) {
-  MutexLock mu(Thread::Current(), fault_message_lock_);
-  fault_message_ = message;
+  std::string* new_msg = new std::string(message);
+  std::string* cur_msg = fault_message_.exchange(new_msg);
+  delete cur_msg;
+}
+
+std::string Runtime::GetFaultMessage() {
+  // Retrieve the message. Temporarily replace with null so that SetFaultMessage will not delete
+  // the string in parallel.
+  std::string* cur_msg = fault_message_.exchange(nullptr);
+
+  // Make a copy of the string.
+  std::string ret = cur_msg == nullptr ? "" : *cur_msg;
+
+  // Put the message back if it hasn't been updated.
+  std::string* null_str = nullptr;
+  if (!fault_message_.compare_exchange_strong(null_str, cur_msg)) {
+    // Already replaced.
+    delete cur_msg;
+  }
+
+  return ret;
 }
 
 void Runtime::AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* argv)
@@ -2410,6 +2470,8 @@
     LOG(WARNING) << "Failed to allocate JIT";
     // Release JIT code cache resources (several MB of memory).
     jit_code_cache_.reset();
+  } else {
+    jit->CreateThreadPool();
   }
 }
 
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 0ccc7b7..b76a658 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -28,9 +28,9 @@
 #include <vector>
 
 #include "arch/instruction_set.h"
+#include "base/locks.h"
 #include "base/macros.h"
 #include "base/mem_map.h"
-#include "base/mutex.h"
 #include "deoptimization_kind.h"
 #include "dex/dex_file_types.h"
 #include "experimental_flags.h"
@@ -99,6 +99,7 @@
 class StackOverflowHandler;
 class SuspensionHandler;
 class ThreadList;
+class ThreadPool;
 class Trace;
 struct TraceConfig;
 class Transaction;
@@ -242,8 +243,14 @@
 
   ~Runtime();
 
-  const std::string& GetBootClassPathString() const {
-    return boot_class_path_string_;
+  const std::vector<std::string>& GetBootClassPath() const {
+    return boot_class_path_;
+  }
+
+  const std::vector<std::string>& GetBootClassPathLocations() const {
+    DCHECK(boot_class_path_locations_.empty() ||
+           boot_class_path_locations_.size() == boot_class_path_.size());
+    return boot_class_path_locations_.empty() ? boot_class_path_ : boot_class_path_locations_;
   }
 
   const std::string& GetClassPathString() const {
@@ -444,6 +451,7 @@
   bool UseJitCompilation() const;
 
   void PreZygoteFork();
+  void PostZygoteFork();
   void InitNonZygoteOrPostFork(
       JNIEnv* env,
       bool is_system_server,
@@ -510,12 +518,7 @@
   void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void SetFaultMessage(const std::string& message) REQUIRES(!fault_message_lock_);
-  // Only read by the signal handler, NO_THREAD_SAFETY_ANALYSIS to prevent lock order violations
-  // with the unexpected_signal_lock_.
-  const std::string& GetFaultMessage() NO_THREAD_SAFETY_ANALYSIS {
-    return fault_message_;
-  }
+  void SetFaultMessage(const std::string& message);
 
   void AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* arg_vector) const;
 
@@ -789,6 +792,10 @@
     return verifier_logging_threshold_ms_;
   }
 
+  ThreadPool* GetThreadPool() {
+    return thread_pool_.get();
+  }
+
  private:
   static void InitPlatformSignalHandlers();
 
@@ -819,6 +826,12 @@
   void VisitConstantRoots(RootVisitor* visitor)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Note: To be lock-free, GetFaultMessage temporarily replaces the lock message with null.
+  //       As such, there is a window where a call will return an empty string. In general,
+  //       only aborting code should retrieve this data (via GetFaultMessageForAbortLogging
+  //       friend).
+  std::string GetFaultMessage();
+
   // A pointer to the active runtime or null.
   static Runtime* instance_;
 
@@ -859,7 +872,8 @@
   std::vector<std::string> image_compiler_options_;
   std::string image_location_;
 
-  std::string boot_class_path_string_;
+  std::vector<std::string> boot_class_path_;
+  std::vector<std::string> boot_class_path_locations_;
   std::string class_path_string_;
   std::vector<std::string> properties_;
 
@@ -882,6 +896,9 @@
   // Shared linear alloc for now.
   std::unique_ptr<LinearAlloc> linear_alloc_;
 
+  // Thread pool
+  std::unique_ptr<ThreadPool> thread_pool_;
+
   // The number of spins that are done before thread suspension is used to forcibly inflate.
   size_t max_spins_before_thin_lock_inflation_;
   MonitorList* monitor_list_;
@@ -901,9 +918,9 @@
   std::unique_ptr<jit::JitCodeCache> jit_code_cache_;
   std::unique_ptr<jit::JitOptions> jit_options_;
 
-  // Fault message, printed when we get a SIGSEGV.
-  Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-  std::string fault_message_ GUARDED_BY(fault_message_lock_);
+  // Fault message, printed when we get a SIGSEGV. Stored as a native-heap object and accessed
+  // lock-free, so needs to be atomic.
+  std::atomic<std::string*> fault_message_;
 
   // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by
   // the shutdown lock so that threads aren't born while we're shutting down.
@@ -1101,11 +1118,11 @@
 
   MemMap protected_fault_page_;
 
-  uint64_t process_cpu_start_time_;
-  uint64_t process_cpu_end_time_;
-
   uint32_t verifier_logging_threshold_ms_;
 
+  // Note: See comments on GetFaultMessage.
+  friend std::string GetFaultMessageForAbortLogging();
+
   DISALLOW_COPY_AND_ASSIGN(Runtime);
 };
 
diff --git a/runtime/runtime_callbacks.h b/runtime/runtime_callbacks.h
index 4cce15e..32ee3aa3 100644
--- a/runtime/runtime_callbacks.h
+++ b/runtime/runtime_callbacks.h
@@ -20,8 +20,8 @@
 #include <vector>
 
 #include "base/array_ref.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "dex/dex_file.h"
 #include "handle.h"
 
diff --git a/runtime/runtime_common.cc b/runtime/runtime_common.cc
index eae2505..5676577 100644
--- a/runtime/runtime_common.cc
+++ b/runtime/runtime_common.cc
@@ -371,6 +371,11 @@
 #pragma GCC diagnostic ignored "-Wframe-larger-than="
 #endif
 
+std::string GetFaultMessageForAbortLogging() {
+  Runtime* runtime = Runtime::Current();
+  return  (runtime != nullptr) ? runtime->GetFaultMessage() : "";
+}
+
 static void HandleUnexpectedSignalCommonDump(int signal_number,
                                              siginfo_t* info,
                                              void* raw_context,
@@ -427,9 +432,9 @@
     }
 
     if (dump_on_stderr) {
-      std::cerr << "Fault message: " << runtime->GetFaultMessage() << std::endl;
+      std::cerr << "Fault message: " << GetFaultMessageForAbortLogging() << std::endl;
     } else {
-      LOG(FATAL_WITHOUT_ABORT) << "Fault message: " << runtime->GetFaultMessage();
+      LOG(FATAL_WITHOUT_ABORT) << "Fault message: " << GetFaultMessageForAbortLogging();
     }
   }
 }
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 5cec309..2b2919e 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -37,7 +37,7 @@
 RUNTIME_OPTIONS_KEY (Unit,                Zygote)
 RUNTIME_OPTIONS_KEY (Unit,                Help)
 RUNTIME_OPTIONS_KEY (Unit,                ShowVersion)
-RUNTIME_OPTIONS_KEY (std::string,         BootClassPath)
+RUNTIME_OPTIONS_KEY (ParseStringList<':'>,BootClassPath)           // std::vector<std::string>
 RUNTIME_OPTIONS_KEY (ParseStringList<':'>,BootClassPathLocations)  // std::vector<std::string>
 RUNTIME_OPTIONS_KEY (std::string,         ClassPath)
 RUNTIME_OPTIONS_KEY (std::string,         Image)
diff --git a/runtime/scoped_thread_state_change-inl.h b/runtime/scoped_thread_state_change-inl.h
index 3089c24..2541ab5 100644
--- a/runtime/scoped_thread_state_change-inl.h
+++ b/runtime/scoped_thread_state_change-inl.h
@@ -22,6 +22,7 @@
 #include <android-base/logging.h>
 
 #include "base/casts.h"
+#include "base/mutex.h"
 #include "jni/jni_env_ext-inl.h"
 #include "obj_ptr-inl.h"
 #include "runtime.h"
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index 0c42c5a..b2ad90a 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -19,8 +19,8 @@
 
 #include "jni.h"
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "base/value_object.h"
 #include "thread_state.h"
 
diff --git a/runtime/stack.h b/runtime/stack.h
index 9d30115..0edf4f5 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -20,8 +20,8 @@
 #include <stdint.h>
 #include <string>
 
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "quick/quick_method_frame_info.h"
 #include "stack_map.h"
 
diff --git a/runtime/subtype_check.h b/runtime/subtype_check.h
index 106c7f1..493ea85 100644
--- a/runtime/subtype_check.h
+++ b/runtime/subtype_check.h
@@ -20,7 +20,7 @@
 #include "subtype_check_bits_and_status.h"
 #include "subtype_check_info.h"
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "mirror/class.h"
 #include "runtime.h"
 
diff --git a/runtime/thread.h b/runtime/thread.h
index ccde236..6db1943 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -30,8 +30,8 @@
 #include "base/atomic.h"
 #include "base/enums.h"
 #include "base/globals.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "base/safe_map.h"
 #include "entrypoints/jni/jni_entrypoints.h"
 #include "entrypoints/quick/quick_entrypoints.h"
@@ -563,11 +563,11 @@
   bool Interrupted();
   // Implements java.lang.Thread.isInterrupted.
   bool IsInterrupted();
-  void Interrupt(Thread* self) REQUIRES(!*wait_mutex_);
+  void Interrupt(Thread* self) REQUIRES(!wait_mutex_);
   void SetInterrupted(bool i) {
     tls32_.interrupted.store(i, std::memory_order_seq_cst);
   }
-  void Notify() REQUIRES(!*wait_mutex_);
+  void Notify() REQUIRES(!wait_mutex_);
 
   ALWAYS_INLINE void PoisonObjectPointers() {
     ++poison_object_cookie_;
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index 8723c99..de698c2 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -123,7 +123,10 @@
   tasks_.clear();
 }
 
-ThreadPool::ThreadPool(const char* name, size_t num_threads, bool create_peers)
+ThreadPool::ThreadPool(const char* name,
+                       size_t num_threads,
+                       bool create_peers,
+                       size_t worker_stack_size)
   : name_(name),
     task_queue_lock_("task queue lock"),
     task_queue_condition_("task queue condition", task_queue_lock_),
@@ -133,28 +136,33 @@
     waiting_count_(0),
     start_time_(0),
     total_wait_time_(0),
-    // Add one since the caller of constructor waits on the barrier too.
-    creation_barier_(num_threads + 1),
+    creation_barier_(0),
     max_active_workers_(num_threads),
-    create_peers_(create_peers) {
+    create_peers_(create_peers),
+    worker_stack_size_(worker_stack_size) {
+  CreateThreads();
+}
+
+void ThreadPool::CreateThreads() {
+  CHECK(threads_.empty());
   Thread* self = Thread::Current();
-  while (GetThreadCount() < num_threads) {
-    const std::string worker_name = StringPrintf("%s worker thread %zu", name_.c_str(),
-                                                 GetThreadCount());
-    threads_.push_back(
-        new ThreadPoolWorker(this, worker_name, ThreadPoolWorker::kDefaultStackSize));
+  {
+    MutexLock mu(self, task_queue_lock_);
+    shutting_down_ = false;
+    // Add one since the caller of constructor waits on the barrier too.
+    creation_barier_.Init(self, max_active_workers_ + 1);
+    while (GetThreadCount() < max_active_workers_) {
+      const std::string worker_name = StringPrintf("%s worker thread %zu", name_.c_str(),
+                                                   GetThreadCount());
+      threads_.push_back(
+          new ThreadPoolWorker(this, worker_name, worker_stack_size_));
+    }
   }
   // Wait for all of the threads to attach.
-  creation_barier_.Wait(self);
+  creation_barier_.Wait(Thread::Current());
 }
 
-void ThreadPool::SetMaxActiveWorkers(size_t threads) {
-  MutexLock mu(Thread::Current(), task_queue_lock_);
-  CHECK_LE(threads, GetThreadCount());
-  max_active_workers_ = threads;
-}
-
-ThreadPool::~ThreadPool() {
+void ThreadPool::DeleteThreads() {
   {
     Thread* self = Thread::Current();
     MutexLock mu(self, task_queue_lock_);
@@ -164,10 +172,22 @@
     task_queue_condition_.Broadcast(self);
     completion_condition_.Broadcast(self);
   }
-  // Wait for the threads to finish.
+  // Wait for the threads to finish. We expect the user of the pool
+  // not to run multi-threaded calls to `CreateThreads` and `DeleteThreads`,
+  // so we don't guard the field here.
   STLDeleteElements(&threads_);
 }
 
+void ThreadPool::SetMaxActiveWorkers(size_t max_workers) {
+  MutexLock mu(Thread::Current(), task_queue_lock_);
+  CHECK_LE(max_workers, GetThreadCount());
+  max_active_workers_ = max_workers;
+}
+
+ThreadPool::~ThreadPool() {
+  DeleteThreads();
+}
+
 void ThreadPool::StartWorkers(Thread* self) {
   MutexLock mu(self, task_queue_lock_);
   started_ = true;
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index 98a1193..f55d72e 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -18,6 +18,7 @@
 #define ART_RUNTIME_THREAD_POOL_H_
 
 #include <deque>
+#include <functional>
 #include <vector>
 
 #include "barrier.h"
@@ -48,6 +49,18 @@
   }
 };
 
+class FunctionTask : public SelfDeletingTask {
+ public:
+  explicit FunctionTask(std::function<void(Thread*)>&& func) : func_(std::move(func)) {}
+
+  void Run(Thread* self) override {
+    func_(self);
+  }
+
+ private:
+  std::function<void(Thread*)> func_;
+};
+
 class ThreadPoolWorker {
  public:
   static const size_t kDefaultStackSize = 1 * MB;
@@ -110,9 +123,18 @@
   // If create_peers is true, all worker threads will have a Java peer object. Note that if the
   // pool is asked to do work on the current thread (see Wait), a peer may not be available. Wait
   // will conservatively abort if create_peers and do_work are true.
-  ThreadPool(const char* name, size_t num_threads, bool create_peers = false);
+  ThreadPool(const char* name,
+             size_t num_threads,
+             bool create_peers = false,
+             size_t worker_stack_size = ThreadPoolWorker::kDefaultStackSize);
   virtual ~ThreadPool();
 
+  // Create the threads of this pool.
+  void CreateThreads();
+
+  // Stops and deletes all threads in this pool.
+  void DeleteThreads();
+
   // Wait for all tasks currently on queue to get completed. If the pool has been stopped, only
   // wait till all already running tasks are done.
   // When the pool was created with peers for workers, do_work must not be true (see ThreadPool()).
@@ -158,7 +180,6 @@
   // How many worker threads are waiting on the condition.
   volatile size_t waiting_count_ GUARDED_BY(task_queue_lock_);
   std::deque<Task*> tasks_ GUARDED_BY(task_queue_lock_);
-  // TODO: make this immutable/const?
   std::vector<ThreadPoolWorker*> threads_;
   // Work balance detection.
   uint64_t start_time_ GUARDED_BY(task_queue_lock_);
@@ -166,6 +187,7 @@
   Barrier creation_barier_;
   size_t max_active_workers_ GUARDED_BY(task_queue_lock_);
   const bool create_peers_;
+  const size_t worker_stack_size_;
 
  private:
   friend class ThreadPoolWorker;
diff --git a/runtime/trace.h b/runtime/trace.h
index 926a34f..1089962 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -28,6 +28,7 @@
 
 #include "base/atomic.h"
 #include "base/globals.h"
+#include "base/locks.h"
 #include "base/macros.h"
 #include "base/os.h"
 #include "base/safe_map.h"
@@ -42,6 +43,7 @@
 class ArtField;
 class ArtMethod;
 class DexFile;
+class LOCKABLE Mutex;
 class ShadowFrame;
 class Thread;
 
@@ -173,57 +175,57 @@
   uint32_t GetClockOverheadNanoSeconds();
 
   void CompareAndUpdateStackTrace(Thread* thread, std::vector<ArtMethod*>* stack_trace)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_);
 
   // InstrumentationListener implementation.
   void MethodEntered(Thread* thread,
                      Handle<mirror::Object> this_object,
                      ArtMethod* method,
                      uint32_t dex_pc)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_)
       override;
   void MethodExited(Thread* thread,
                     Handle<mirror::Object> this_object,
                     ArtMethod* method,
                     uint32_t dex_pc,
                     const JValue& return_value)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_)
       override;
   void MethodUnwind(Thread* thread,
                     Handle<mirror::Object> this_object,
                     ArtMethod* method,
                     uint32_t dex_pc)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_)
       override;
   void DexPcMoved(Thread* thread,
                   Handle<mirror::Object> this_object,
                   ArtMethod* method,
                   uint32_t new_dex_pc)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_)
       override;
   void FieldRead(Thread* thread,
                  Handle<mirror::Object> this_object,
                  ArtMethod* method,
                  uint32_t dex_pc,
                  ArtField* field)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_) override;
   void FieldWritten(Thread* thread,
                     Handle<mirror::Object> this_object,
                     ArtMethod* method,
                     uint32_t dex_pc,
                     ArtField* field,
                     const JValue& field_value)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_) override;
   void ExceptionThrown(Thread* thread,
                        Handle<mirror::Throwable> exception_object)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_) override;
   void ExceptionHandled(Thread* thread, Handle<mirror::Throwable> exception_object)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_) override;
   void Branch(Thread* thread,
               ArtMethod* method,
               uint32_t dex_pc,
               int32_t dex_pc_offset)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_) override;
   void WatchedFramePop(Thread* thread, const ShadowFrame& frame)
       REQUIRES_SHARED(Locks::mutator_lock_) override;
   // Reuse an old stack trace if it exists, otherwise allocate a new one.
@@ -258,20 +260,20 @@
       // how to annotate this.
       NO_THREAD_SAFETY_ANALYSIS;
   void FinishTracing()
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_);
 
   void ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff);
 
   void LogMethodTraceEvent(Thread* thread, ArtMethod* method,
                            instrumentation::Instrumentation::InstrumentationEvent event,
                            uint32_t thread_clock_diff, uint32_t wall_clock_diff)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_);
 
   // Methods to output traced methods and threads.
   void GetVisitedMethods(size_t end_offset, std::set<ArtMethod*>* visited_methods)
-      REQUIRES(!*unique_methods_lock_);
+      REQUIRES(!unique_methods_lock_);
   void DumpMethodList(std::ostream& os, const std::set<ArtMethod*>& visited_methods)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_);
   void DumpThreadList(std::ostream& os) REQUIRES(!Locks::thread_list_lock_);
 
   // Methods to register seen entitites in streaming mode. The methods return true if the entity
@@ -289,15 +291,15 @@
   void FlushBuf()
       REQUIRES(streaming_lock_);
 
-  uint32_t EncodeTraceMethod(ArtMethod* method) REQUIRES(!*unique_methods_lock_);
+  uint32_t EncodeTraceMethod(ArtMethod* method) REQUIRES(!unique_methods_lock_);
   uint32_t EncodeTraceMethodAndAction(ArtMethod* method, TraceAction action)
-      REQUIRES(!*unique_methods_lock_);
-  ArtMethod* DecodeTraceMethod(uint32_t tmid) REQUIRES(!*unique_methods_lock_);
-  std::string GetMethodLine(ArtMethod* method) REQUIRES(!*unique_methods_lock_)
+      REQUIRES(!unique_methods_lock_);
+  ArtMethod* DecodeTraceMethod(uint32_t tmid) REQUIRES(!unique_methods_lock_);
+  std::string GetMethodLine(ArtMethod* method) REQUIRES(!unique_methods_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_);
 
   // Singleton instance of the Trace or null when no method tracing is active.
   static Trace* volatile the_trace_ GUARDED_BY(Locks::trace_lock_);
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 3099b23..3369784 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -24,8 +24,8 @@
 
 #include "base/arena_object.h"
 #include "base/bit_vector.h"
+#include "base/locks.h"
 #include "base/macros.h"
-#include "base/mutex.h"
 #include "base/stringpiece.h"
 #include "dex/primitive.h"
 #include "gc_root.h"
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 9bb60bb..de66bf5 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -23,7 +23,7 @@
 
 #include <android-base/logging.h>
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "base/safe_map.h"
 #include "base/scoped_arena_containers.h"
 
diff --git a/runtime/verifier/verifier_deps.cc b/runtime/verifier/verifier_deps.cc
index b666c15..d346a95 100644
--- a/runtime/verifier/verifier_deps.cc
+++ b/runtime/verifier/verifier_deps.cc
@@ -22,6 +22,7 @@
 #include "art_method-inl.h"
 #include "base/indenter.h"
 #include "base/leb128.h"
+#include "base/mutex-inl.h"
 #include "base/stl_util.h"
 #include "compiler_callbacks.h"
 #include "dex/dex_file-inl.h"
diff --git a/runtime/verifier/verifier_deps.h b/runtime/verifier/verifier_deps.h
index 0146b17..dfd4a5c 100644
--- a/runtime/verifier/verifier_deps.h
+++ b/runtime/verifier/verifier_deps.h
@@ -22,7 +22,7 @@
 #include <vector>
 
 #include "base/array_ref.h"
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "dex/dex_file_types.h"
 #include "handle.h"
 #include "obj_ptr.h"
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index 0b7ed09..66cbbec 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -17,7 +17,7 @@
 #ifndef ART_RUNTIME_WELL_KNOWN_CLASSES_H_
 #define ART_RUNTIME_WELL_KNOWN_CLASSES_H_
 
-#include "base/mutex.h"
+#include "base/locks.h"
 #include "jni.h"
 #include "obj_ptr.h"
 
diff --git a/test/118-noimage-dex2oat/run b/test/118-noimage-dex2oat/run
index d68b0a0..d1b9725 100644
--- a/test/118-noimage-dex2oat/run
+++ b/test/118-noimage-dex2oat/run
@@ -31,39 +31,26 @@
   exit 1
 fi
 
-if [[ $@ == *--host* ]]; then
-    framework="${ANDROID_HOST_OUT}/framework"
-    bpath_suffix="-hostdex"
-else
-    framework="/system/framework"
-    bpath_suffix=""
-fi
-bpath="${framework}/core-libart${bpath_suffix}.jar"
-bpath="${bpath}:${framework}/conscrypt${bpath_suffix}.jar"
-bpath="${bpath}:${framework}/okhttp${bpath_suffix}.jar"
-bpath="${bpath}:${framework}/bouncycastle${bpath_suffix}.jar"
-bpath_arg="--runtime-option -Xbootclasspath:${bpath}"
-
 
 # Make sure we can run without an oat file.
 echo "Run -Xnoimage-dex2oat"
-${RUN} ${flags} ${bpath_arg} --runtime-option -Xnoimage-dex2oat
+${RUN} ${flags} --runtime-option -Xnoimage-dex2oat
 return_status1=$?
 
 # Make sure we cannot run without an oat file without fallback.
 echo "Run -Xnoimage-dex2oat -Xno-dex-file-fallback"
-${RUN} ${flags} ${bpath_arg} --runtime-option -Xnoimage-dex2oat \
+${RUN} ${flags} --runtime-option -Xnoimage-dex2oat \
   --runtime-option -Xno-dex-file-fallback
 return_status2=$?
 
 # Make sure we can run with the oat file.
 echo "Run -Ximage-dex2oat"
-${RUN} ${flags} ${bpath_arg} --runtime-option -Ximage-dex2oat
+${RUN} ${flags} --runtime-option -Ximage-dex2oat
 return_status3=$?
 
 # Make sure we can run with the default settings.
 echo "Run default"
-${RUN} ${flags} ${bpath_arg}
+${RUN} ${flags}
 return_status4=$?
 
 # Make sure we don't silently ignore an early failure.
diff --git a/test/924-threads/src/art/Test924.java b/test/924-threads/src/art/Test924.java
index e8e9781..e97c9c6 100644
--- a/test/924-threads/src/art/Test924.java
+++ b/test/924-threads/src/art/Test924.java
@@ -27,6 +27,7 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.time.Instant;
 
 public class Test924 {
   public static void run() throws Exception {
@@ -109,6 +110,7 @@
     final CountDownLatch cdl4 = new CountDownLatch(1);
     final CountDownLatch cdl5 = new CountDownLatch(1);
     final Holder h = new Holder();
+    final long ALMOST_INFINITE = 100000000;  // 1.1 days!
     final NativeWaiter w = new NativeWaiter();
     Runnable r = new Runnable() {
       @Override
@@ -121,7 +123,7 @@
 
           cdl2.countDown();
           synchronized(cdl2) {
-            cdl2.wait(1000);  // Wait a second.
+            cdl2.wait(ALMOST_INFINITE);
           }
 
           cdl3_1.await();
@@ -131,7 +133,9 @@
           }
 
           cdl4.countDown();
-          Thread.sleep(1000);
+          try {
+            Thread.sleep(ALMOST_INFINITE);
+          } catch (InterruptedException e) { }
 
           cdl5.countDown();
           while (!h.flag) {
@@ -152,18 +156,20 @@
 
     // Waiting.
     cdl1.await();
-    Thread.yield();
-    Thread.sleep(100);
-    printThreadState(t);
+    // This is super inconsistent so just wait for the desired state for up to 5 minutes then give
+    // up and continue
+    final int WAITING_INDEF = 0x191;
+    waitForState(t, WAITING_INDEF);
     synchronized(cdl1) {
       cdl1.notifyAll();
     }
 
     // Timed waiting.
     cdl2.await();
-    Thread.yield();
-    Thread.sleep(100);
-    printThreadState(t);
+    // This is super inconsistent so just wait for the desired state for up to 5 minutes then give
+    // up and continue
+    final int WAITING_TIMED = 0x1a1;
+    waitForState(t, WAITING_TIMED);
     synchronized(cdl2) {
       cdl2.notifyAll();
     }
@@ -185,14 +191,16 @@
 
     // Sleeping.
     cdl4.await();
-    Thread.yield();
-    Thread.sleep(100);
-    printThreadState(t);
+    // This is super inconsistent so just wait for the desired state for up to 5 minutes then give
+    // up and continue
+    final int WAITING_SLEEP = 0xe1;
+    waitForState(t, WAITING_SLEEP);
+    t.interrupt();
 
     // Running.
     cdl5.await();
     Thread.yield();
-    Thread.sleep(100);
+    Thread.sleep(1000);
     printThreadState(t);
     h.flag = true;
 
@@ -204,11 +212,26 @@
     // Dying.
     t.join();
     Thread.yield();
-    Thread.sleep(100);
+    Thread.sleep(1000);
 
     printThreadState(t);
   }
 
+  private static void waitForState(Thread t, int desired) throws Exception {
+    Thread.yield();
+    Thread.sleep(1000);
+    // This is super inconsistent so just wait for the desired state for up to 5 minutes then give
+    // up and continue
+    int state;
+    Instant deadline = Instant.now().plusSeconds(60 * 5);
+    while ((state = getThreadState(t)) != desired && deadline.isAfter(Instant.now())) {
+      Thread.yield();
+      Thread.sleep(100);
+      Thread.yield();
+    }
+    printThreadState(state);
+  }
+
   private static void doAllThreadsTests() {
     Thread[] threads = getAllThreads();
     List<Thread> threadList = new ArrayList<>(Arrays.asList(threads));
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 148aea4..5d07601 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -50,9 +50,9 @@
 ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar
 ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar
 ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/core-simple-testdex.jar
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/conscrypt-testdex.jar
 ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/okhttp-testdex.jar
 ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/bouncycastle-testdex.jar
-ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/conscrypt-testdex.jar
 
 # All tests require the host executables. The tests also depend on the core images, but on
 # specific version depending on the compiler.
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index ac6002b..4e5152b 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -541,23 +541,38 @@
   exit
 fi
 
+bpath_modules="core-oj core-libart core-simple conscrypt okhttp bouncycastle"
+if [ "${HOST}" = "y" ]; then
+    framework="${ANDROID_HOST_OUT}/framework"
+    if [ "${ANDROID_HOST_OUT:0:${#ANDROID_BUILD_TOP}+1}" = "${ANDROID_BUILD_TOP}/" ]; then
+      framework_location="${ANDROID_HOST_OUT:${#ANDROID_BUILD_TOP}+1}/framework"
+    else
+      echo "error: ANDROID_BUILD_TOP/ is not a prefix of ANDROID_HOST_OUT"
+      echo "ANDROID_BUILD_TOP=${ANDROID_BUILD_TOP}"
+      echo "ANDROID_HOST_OUT=${ANDROID_HOST_OUT}"
+      exit
+    fi
+    bpath_suffix="-hostdex"
+else
+    framework="${ANDROID_ROOT}/framework"
+    framework_location="${ANDROID_ROOT}/framework"
+    bpath_suffix="-testdex"
+fi
+bpath=""
+bpath_locations=""
+bpath_separator=""
+for bpath_module in ${bpath_modules}; do
+  bpath+="${bpath_separator}${framework}/${bpath_module}${bpath_suffix}.jar"
+  bpath_locations+="${bpath_separator}${framework_location}/${bpath_module}${bpath_suffix}.jar"
+  bpath_separator=":"
+done
+# Pass down the bootclasspath
+FLAGS="${FLAGS} -Xbootclasspath:${bpath}"
+FLAGS="${FLAGS} -Xbootclasspath-locations:${bpath_locations}"
+COMPILE_FLAGS="${COMPILE_FLAGS} --runtime-arg -Xbootclasspath:${bpath}"
+COMPILE_FLAGS="${COMPILE_FLAGS} --runtime-arg -Xbootclasspath-locations:${bpath_locations}"
 
 if [ "$HAVE_IMAGE" = "n" ]; then
-    if [ "${HOST}" = "y" ]; then
-        framework="${ANDROID_HOST_OUT}/framework"
-        bpath_suffix="-hostdex"
-    else
-        framework="${ANDROID_ROOT}/framework"
-        bpath_suffix="-testdex"
-    fi
-    bpath="${framework}/core-libart${bpath_suffix}.jar"
-    bpath="${bpath}:${framework}/core-oj${bpath_suffix}.jar"
-    bpath="${bpath}:${framework}/core-simple${bpath_suffix}.jar"
-    bpath="${bpath}:${framework}/conscrypt${bpath_suffix}.jar"
-    bpath="${bpath}:${framework}/okhttp${bpath_suffix}.jar"
-    bpath="${bpath}:${framework}/bouncycastle${bpath_suffix}.jar"
-    # Pass down the bootclasspath
-    FLAGS="${FLAGS} -Xbootclasspath:${bpath}"
     # Disable image dex2oat - this will forbid the runtime to patch or compile an image.
     FLAGS="${FLAGS} -Xnoimage-dex2oat"
 
diff --git a/tools/bootjars.sh b/tools/bootjars.sh
index dca209d..ad6ee6b 100755
--- a/tools/bootjars.sh
+++ b/tools/bootjars.sh
@@ -54,7 +54,7 @@
 
 if [[ $mode == target ]]; then
   if [[ $core_jars_only == y ]]; then
-    selected_env_var=TARGET_CORE_JARS
+    selected_env_var=TARGET_TEST_CORE_JARS
   else
     selected_env_var=PRODUCT_BOOT_JARS
   fi
@@ -64,11 +64,31 @@
     echo "Error: --host does not have non-core boot jars, --core required" >&2
     exit 1
   fi
-  selected_env_var=HOST_CORE_JARS
+  selected_env_var=HOST_TEST_CORE_JARS
   intermediates_env_var=HOST_OUT_COMMON_INTERMEDIATES
 fi
 
-boot_jars_list=$(get_build_var "$selected_env_var")
+if [[ $core_jars_only == y ]]; then
+  # FIXME: The soong invocation we're using for getting the variables does not give us anything
+  # defined in Android.common_path.mk, otherwise we would just use HOST-/TARGET_TEST_CORE_JARS.
+
+  # The core_jars_list must match the TEST_CORE_JARS variable in the Android.common_path.mk .
+  core_jars_list="core-oj core-libart core-simple conscrypt okhttp bouncycastle"
+  core_jars_suffix=
+  if [[ $mode == target ]]; then
+    core_jars_suffix=-testdex
+  elif [[ $mode == host ]]; then
+    core_jars_suffix=-hostdex
+  fi
+  boot_jars_list=""
+  boot_separator=""
+  for boot_module in ${core_jars_list}; do
+    boot_jars_list+="${boot_separator}${boot_module}${core_jars_suffix}"
+    boot_separator=" "
+  done
+else
+  boot_jars_list=$(get_build_var "$selected_env_var")
+fi
 
 # Print only the list of boot jars.
 if [[ $print_file_path == n ]]; then
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeHandlerTest.java b/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeHandlerTest.java
index 9d2f014..9f924b2 100644
--- a/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeHandlerTest.java
+++ b/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeHandlerTest.java
@@ -67,7 +67,7 @@
                 "  @Annotation(returnType=Integer.class)",
                 "  public String method() {return null;}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION,
@@ -91,7 +91,7 @@
                 "  @Annotation(returnType=Integer.class)",
                 "  public String method() {return null;}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION,
@@ -113,7 +113,7 @@
                 "  @Annotation(returnType=Integer.class)",
                 "  public String method() {return null;}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION,
@@ -138,7 +138,7 @@
                 "  @Annotation(returnType=Integer.class)",
                 "  public String field;",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION,
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeMultiHandlerTest.java b/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeMultiHandlerTest.java
index 1202564..25f2844 100644
--- a/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeMultiHandlerTest.java
+++ b/tools/class2greylist/test/src/com/android/class2greylist/CovariantReturnTypeMultiHandlerTest.java
@@ -74,7 +74,7 @@
                 "  @Annotation(returnType=Long.class)",
                 "  public String method() {return null;}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of("Lannotation/Annotation$Multi;",
@@ -104,7 +104,7 @@
                 "  @Annotation(returnType=Long.class)",
                 "  public String method() {return null;}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of("Lannotation/Annotation$Multi;",
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/UnsupportedAppUsageAnnotationHandlerTest.java b/tools/class2greylist/test/src/com/android/class2greylist/UnsupportedAppUsageAnnotationHandlerTest.java
index cdf01af..dc767fe 100644
--- a/tools/class2greylist/test/src/com/android/class2greylist/UnsupportedAppUsageAnnotationHandlerTest.java
+++ b/tools/class2greylist/test/src/com/android/class2greylist/UnsupportedAppUsageAnnotationHandlerTest.java
@@ -82,7 +82,7 @@
                 "  @Anno",
                 "  public void method() {}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
@@ -103,7 +103,7 @@
                 "  @Anno",
                 "  public Class() {}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
@@ -124,7 +124,7 @@
                 "  @Anno",
                 "  public int i;",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
@@ -145,7 +145,7 @@
                 "  @Anno(expectedSignature=\"La/b/Class;->method()V\")",
                 "  public void method() {}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
@@ -166,7 +166,7 @@
                 "  @Anno(expectedSignature=\"La/b/Class;->nomethod()V\")",
                 "  public void method() {}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
@@ -186,7 +186,7 @@
                 "    public void method() {}",
                 "  }",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class$Inner"), mStatus,
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
@@ -205,7 +205,7 @@
                 "public class Class {",
                 "  public void method() {}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
@@ -224,7 +224,7 @@
                 "  @Anno(expectedSignature=\"La/b/Class;->method(Ljava/lang/String;)V\")",
                 "  public void method(T arg) {}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP))
@@ -252,7 +252,7 @@
                 "  @Anno(expectedSignature=\"La/b/Class;->method(Ljava/lang/String;)V\")",
                 "  public void method(T arg) {}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP));
@@ -284,7 +284,7 @@
                 "  @Anno(expectedSignature=\"La/b/Class;->method(Ljava/lang/String;)V\")",
                 "  public void method(T arg) {}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP));
@@ -320,7 +320,7 @@
                 "package a.b;",
                 "public class Class extends Base implements Interface {",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP));
@@ -354,7 +354,7 @@
                 "  @Anno",
                 "  public void method(T arg) {}",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Set<String> publicApis = Sets.newHashSet(
                 "La/b/Base;->method(Ljava/lang/Object;)V",
@@ -385,7 +385,7 @@
                 "  @Anno(expectedSignature=\"La/b/Class;->field:I\")",
                 "  public volatile int field;",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(
@@ -407,7 +407,7 @@
                 "  @Anno(expectedSignature=\"La/b/Class;->wrong:I\")",
                 "  public volatile int field;",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(x -> true, NULL_SDK_MAP));
@@ -424,7 +424,7 @@
                 "  @Anno(maxTargetSdk=1)",
                 "  public int field;",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(
@@ -444,7 +444,7 @@
                 "  @Anno",
                 "  public int field;",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(
@@ -464,7 +464,7 @@
                 "  @Anno(maxTargetSdk=2)",
                 "  public int field;",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
 
         Map<String, AnnotationHandler> handlerMap =
                 ImmutableMap.of(ANNOTATION, createGreylistHandler(
@@ -493,7 +493,7 @@
                 "  @Anno2(maxTargetSdk=2, trackingBug=123456789)",
                 "  public int field;",
                 "}"));
-        assertThat(mJavac.compile()).isTrue();
+        mJavac.compile();
         new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
                 ImmutableMap.of("Lannotation/Anno2;", createGreylistHandler(x -> true,
                         ImmutableMap.of(2, "flag2")))
diff --git a/tools/class2greylist/test/src/com/android/javac/Javac.java b/tools/class2greylist/test/src/com/android/javac/Javac.java
index 202f412..94e4e49 100644
--- a/tools/class2greylist/test/src/com/android/javac/Javac.java
+++ b/tools/class2greylist/test/src/com/android/javac/Javac.java
@@ -18,6 +18,7 @@
 
 import com.google.common.io.Files;
 
+import java.util.stream.Collectors;
 import org.apache.bcel.classfile.ClassParser;
 import org.apache.bcel.classfile.JavaClass;
 
@@ -76,15 +77,24 @@
         return this;
     }
 
-    public boolean compile() {
+    public void compile() {
+        DiagnosticCollector<JavaFileObject> diagnosticCollector = new DiagnosticCollector<>();
         JavaCompiler.CompilationTask task = mJavac.getTask(
                 null,
                 mFileMan,
-                null,
+                diagnosticCollector,
                 null,
                 null,
                 mCompilationUnits);
-        return task.call();
+        boolean result = task.call();
+        if (!result) {
+            throw new IllegalStateException(
+                "Compilation failed:" +
+                    diagnosticCollector.getDiagnostics()
+                        .stream()
+                        .map(Object::toString)
+                        .collect(Collectors.joining("\n")));
+        }
     }
 
     public InputStream getClassFile(String classname) throws IOException {
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index a5fa332..f97dd4f 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -157,7 +157,6 @@
 {
   description: "Missing resource in classpath",
   result: EXEC_FAILED,
-  modes: [device],
   names: ["libcore.java.util.prefs.OldAbstractPreferencesTest#testClear",
           "libcore.java.util.prefs.OldAbstractPreferencesTest#testExportNode",
           "libcore.java.util.prefs.OldAbstractPreferencesTest#testExportSubtree",
@@ -187,7 +186,8 @@
           "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testExportSubtree",
           "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testFlush",
           "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testSync",
-          "org.apache.harmony.tests.java.util.prefs.FilePreferencesImplTest#testPutGet"]
+          "org.apache.harmony.tests.java.util.prefs.FilePreferencesImplTest#testPutGet"],
+  bug: 120526172
 },
 {
   description: "Only work with --mode=activity",
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index 2d39b2a..63f1fce 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -46,6 +46,21 @@
   done
 }
 
+function boot_classpath_arg {
+  local dir="$1"
+  local suffix="$2"
+  shift 2
+  printf -- "--vm-arg -Xbootclasspath"
+  for var
+  do
+    printf -- ":${dir}/${var}${suffix}.jar";
+  done
+}
+
+# Note: This must match the TEST_CORE_JARS in Android.common_path.mk
+# because that's what we use for compiling the core.art image.
+BOOT_CLASSPATH_JARS="core-oj core-libart core-simple conscrypt okhttp bouncycastle"
+
 DEPS="core-tests jsr166-tests mockito-target"
 
 for lib in $DEPS
@@ -110,6 +125,7 @@
   if [[ "$1" == "--mode=device" ]]; then
     device_mode=true
     vogar_args="$vogar_args --vm-arg -Ximage:/data/art-test/core.art"
+    vogar_args="$vogar_args $(boot_classpath_arg /system/framework -testdex $BOOT_CLASSPATH_JARS)"
     shift
   elif [[ "$1" == "--mode=host" ]]; then
     # We explicitly give a wrong path for the image, to ensure vogar