Snap for 5184694 from 3d4fe36260918aeddda0d812eddbc1f93257945b to qt-release
Change-Id: Ib2ab308a5937adb6a57c81697b9e33acb5048eac
diff --git a/adbconnection/adbconnection.cc b/adbconnection/adbconnection.cc
index cf35914..b6d6600 100644
--- a/adbconnection/adbconnection.cc
+++ b/adbconnection/adbconnection.cc
@@ -476,7 +476,6 @@
int rc = TEMP_FAILURE_RETRY(recvmsg(control_sock_, &msg, 0));
if (rc <= 0) {
- PLOG(WARNING) << "Receiving file descriptor from ADB failed (socket " << control_sock_ << ")";
return android::base::unique_fd(-1);
} else {
VLOG(jdwp) << "Fds have been received from ADB!";
@@ -624,7 +623,6 @@
android::base::unique_fd new_fd(ReadFdFromAdb());
if (new_fd == -1) {
// Something went wrong. We need to retry getting the control socket.
- PLOG(ERROR) << "Something went wrong getting fds from adb. Retry!";
control_sock_.reset();
break;
} else if (adb_connection_socket_ != -1) {
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index 03e68ae..1880726 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -73,8 +73,21 @@
HOST_CORE_IMG_LOCATION := $(HOST_OUT_JAVA_LIBRARIES)/core.art
TARGET_CORE_IMG_LOCATION := $(ART_TARGET_TEST_OUT)/core.art
-# Jar files for core.art.
-TEST_CORE_JARS := core-oj core-libart core-simple conscrypt okhttp bouncycastle
+# Modules to compile for core.art.
+CORE_IMG_JARS := core-oj core-libart core-simple okhttp bouncycastle
+HOST_CORE_IMG_JARS := $(addsuffix -hostdex,$(CORE_IMG_JARS))
+TARGET_CORE_IMG_JARS := $(addsuffix -testdex,$(CORE_IMG_JARS))
+HOST_CORE_IMG_DEX_LOCATIONS := $(foreach jar,$(HOST_CORE_IMG_JARS), $(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar)
+ifeq ($(ART_TEST_ANDROID_ROOT),)
+TARGET_CORE_IMG_DEX_LOCATIONS := $(foreach jar,$(TARGET_CORE_IMG_JARS),/$(DEXPREOPT_BOOT_JAR_DIR)/$(jar).jar)
+else
+TARGET_CORE_IMG_DEX_LOCATIONS := $(foreach jar,$(TARGET_CORE_IMG_JARS),$(ART_TEST_ANDROID_ROOT)/$(jar).jar)
+endif
+HOST_CORE_IMG_DEX_FILES := $(foreach jar,$(HOST_CORE_IMG_JARS), $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar)
+TARGET_CORE_IMG_DEX_FILES := $(foreach jar,$(TARGET_CORE_IMG_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar)
+
+# Jar files for the boot class path for testing. Must start with CORE_IMG_JARS.
+TEST_CORE_JARS := $(CORE_IMG_JARS) conscrypt
HOST_TEST_CORE_JARS := $(addsuffix -hostdex,$(TEST_CORE_JARS))
TARGET_TEST_CORE_JARS := $(addsuffix -testdex,$(TEST_CORE_JARS))
HOST_CORE_DEX_LOCATIONS := $(foreach jar,$(HOST_TEST_CORE_JARS), $(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar)
@@ -83,7 +96,6 @@
else
TARGET_CORE_DEX_LOCATIONS := $(foreach jar,$(TARGET_TEST_CORE_JARS),$(ART_TEST_ANDROID_ROOT)/framework/$(jar).jar)
endif
-
HOST_CORE_DEX_FILES := $(foreach jar,$(HOST_TEST_CORE_JARS), $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar)
TARGET_CORE_DEX_FILES := $(foreach jar,$(TARGET_TEST_CORE_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 6885946..1bcca7c 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -452,12 +452,7 @@
$$(gtest_exe) \
$$($(3)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so \
$$($(3)TARGET_OUT_SHARED_LIBRARIES)/libopenjdkd.so \
- $$(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar \
- $$(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar \
- $$(TARGET_OUT_JAVA_LIBRARIES)/core-simple-testdex.jar \
- $$(TARGET_OUT_JAVA_LIBRARIES)/conscrypt-testdex.jar \
- $$(TARGET_OUT_JAVA_LIBRARIES)/okhttp-testdex.jar \
- $$(TARGET_OUT_JAVA_LIBRARIES)/bouncycastle-testdex.jar
+ $$(foreach jar,$$(TARGET_TEST_CORE_JARS),$$(TARGET_OUT_JAVA_LIBRARIES)/$$(jar).jar)
ART_TEST_TARGET_GTEST_DEPENDENCIES += $$(gtest_deps)
@@ -515,7 +510,8 @@
$$($(3)ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$$(ART_HOST_SHLIB_EXTENSION) \
$$(gtest_exe) \
$$(ART_GTEST_$(1)_HOST_DEPS) \
- $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX))
+ $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX)) \
+ $(HOST_OUT_EXECUTABLES)/timeout_dumper
ART_TEST_HOST_GTEST_DEPENDENCIES += $$(gtest_deps)
@@ -528,7 +524,9 @@
$$(gtest_output): NAME := $$(gtest_rule)
ifeq (,$(SANITIZE_HOST))
$$(gtest_output): $$(gtest_exe) $$(gtest_deps)
- $(hide) ($$(call ART_TEST_SKIP,$$(NAME)) && $$< --gtest_output=xml:$$@ && \
+ $(hide) ($$(call ART_TEST_SKIP,$$(NAME)) && \
+ timeout -k 120s -s SIGRTMIN+2 2400s $(HOST_OUT_EXECUTABLES)/timeout_dumper \
+ $$< --gtest_output=xml:$$@ && \
$$(call ART_TEST_PASSED,$$(NAME))) || $$(call ART_TEST_FAILED,$$(NAME))
else
# Note: envsetup currently exports ASAN_OPTIONS=detect_leaks=0 to suppress leak detection, as some
@@ -540,7 +538,9 @@
# under ASAN.
$$(gtest_output): $$(gtest_exe) $$(gtest_deps)
$(hide) ($$(call ART_TEST_SKIP,$$(NAME)) && set -o pipefail && \
- ASAN_OPTIONS=detect_leaks=1 $$< --gtest_output=xml:$$@ 2>&1 | tee $$<.tmp.out >&2 && \
+ ASAN_OPTIONS=detect_leaks=1 timeout -k 120s -s SIGRTMIN+2 3600s \
+ $(HOST_OUT_EXECUTABLES)/timeout_dumper \
+ $$< --gtest_output=xml:$$@ 2>&1 | tee $$<.tmp.out >&2 && \
{ $$(call ART_TEST_PASSED,$$(NAME)) ; rm $$<.tmp.out ; }) || \
( grep -q AddressSanitizer $$<.tmp.out && export ANDROID_BUILD_TOP=`pwd` && \
{ echo "ABI: 'x86_64'" | cat - $$<.tmp.out | development/scripts/stack | tail -n 3000 ; } ; \
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index e2adac1..2ad1143 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -39,8 +39,6 @@
# Use dex2oat debug version for better error reporting
# $(1): compiler - optimizing, interpreter or interp-ac (interpreter-access-checks).
# $(2): 2ND_ or undefined, 2ND_ for 32-bit host builds.
-# NB depending on HOST_CORE_DEX_LOCATIONS so we are sure to have the dex files in frameworks for
-# run-test --no-image
define create-core-oat-host-rules
core_compile_options :=
core_image_name :=
@@ -80,13 +78,15 @@
$$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options)
$$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name)
$$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name)
-$$(core_image_name): $$(HOST_CORE_DEX_LOCATIONS) $$(core_dex2oat_dependency)
+$$(core_image_name): $$(HOST_CORE_IMG_DEX_LOCATIONS) $$(core_dex2oat_dependency)
@echo "host dex2oat: $$@"
@mkdir -p $$(dir $$@)
$$(hide) ANDROID_LOG_TAGS="*:e" $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
--runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
- --image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(HOST_CORE_DEX_FILES)) \
- $$(addprefix --dex-location=,$$(HOST_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
+ --image-classes=$$(PRELOADED_CLASSES) \
+ $$(addprefix --dex-file=,$$(HOST_CORE_IMG_DEX_FILES)) \
+ $$(addprefix --dex-location=,$$(HOST_CORE_IMG_DEX_LOCATIONS)) \
+ --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
--oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \
--base=$$(LIBART_IMG_HOST_BASE_ADDRESS) --instruction-set=$$($(2)ART_HOST_ARCH) \
$$(LOCAL_$(2)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION) \
@@ -169,13 +169,15 @@
$$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options)
$$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name)
$$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name)
-$$(core_image_name): $$(TARGET_CORE_DEX_FILES) $$(core_dex2oat_dependency)
+$$(core_image_name): $$(TARGET_CORE_IMG_DEX_FILES) $$(core_dex2oat_dependency)
@echo "target dex2oat: $$@"
@mkdir -p $$(dir $$@)
$$(hide) $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
--runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
- --image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(TARGET_CORE_DEX_FILES)) \
- $$(addprefix --dex-location=,$$(TARGET_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
+ --image-classes=$$(PRELOADED_CLASSES) \
+ $$(addprefix --dex-file=,$$(TARGET_CORE_IMG_DEX_FILES)) \
+ $$(addprefix --dex-location=,$$(TARGET_CORE_IMG_DEX_LOCATIONS)) \
+ --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
--oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \
--base=$$(LIBART_IMG_TARGET_BASE_ADDRESS) --instruction-set=$$($(2)TARGET_ARCH) \
--instruction-set-variant=$$($(2)DEX2OAT_TARGET_CPU_VARIANT) \
diff --git a/build/apex/Android.bp b/build/apex/Android.bp
index f2e12f6..0ec0a15 100644
--- a/build/apex/Android.bp
+++ b/build/apex/Android.bp
@@ -19,6 +19,11 @@
"libopenjdkjvmti",
"libadbconnection",
]
+bionic_native_shared_libs = [
+ "libc",
+ "libm",
+ "libdl",
+]
// - Fake library that avoids namespace issues and gives some warnings for nosy apps.
art_runtime_fake_native_shared_libs = [
// FIXME: Does not work as-is, because `libart_fake` is defined in libart_fake/Android.mk,
@@ -102,7 +107,8 @@
compile_multilib: "both",
manifest: "manifest.json",
native_shared_libs: art_runtime_base_native_shared_libs
- + art_runtime_fake_native_shared_libs,
+ + art_runtime_fake_native_shared_libs
+ + bionic_native_shared_libs,
multilib: {
both: {
// TODO: Add logic to create a `dalvikvm` symlink to `dalvikvm32` or `dalvikvm64`
@@ -130,7 +136,8 @@
manifest: "manifest.json",
native_shared_libs: art_runtime_base_native_shared_libs
+ art_runtime_fake_native_shared_libs
- + art_runtime_debug_native_shared_libs,
+ + art_runtime_debug_native_shared_libs
+ + bionic_native_shared_libs,
multilib: {
both: {
// TODO: Add logic to create a `dalvikvm` symlink to `dalvikvm32` or `dalvikvm64`
diff --git a/build/apex/runtests.sh b/build/apex/runtests.sh
index 924c44b..b5e8d8b 100755
--- a/build/apex/runtests.sh
+++ b/build/apex/runtests.sh
@@ -33,11 +33,7 @@
sudo apt-get install libguestfs-tools
"
-which tree > /dev/null || die "This script requires the 'tree' tool.
-On Debian-based systems, this can be installed with:
- sudo apt-get install tree
-"
[[ -n "$ANDROID_PRODUCT_OUT" ]] \
|| die "You need to source and lunch before you can use this script."
@@ -46,6 +42,7 @@
build_apex_p=true
list_image_files_p=false
+print_image_tree_p=false
function usage {
cat <<EOF
@@ -53,7 +50,8 @@
Build (optional) and run tests on Android Runtime APEX package (on host).
-s, --skip-build skip the build step
- -l, --list-files list the contents of the ext4 image
+ -l, --list-files list the contents of the ext4 image using `find`
+ -t, --print-tree list the contents of the ext4 image using `tree`
-h, --help display this help and exit
EOF
@@ -64,6 +62,7 @@
case "$1" in
(-s|--skip-build) build_apex_p=false;;
(-l|--list-files) list_image_files_p=true;;
+ (-t|--print-tree) print_image_tree_p=true;;
(-h|--help) usage;;
(*) die "Unknown option: '$1'
Try '$0 --help' for more information.";;
@@ -71,6 +70,14 @@
shift
done
+if $print_image_tree_p; then
+ which tree >/dev/null || die "This script requires the 'tree' tool.
+On Debian-based systems, this can be installed with:
+
+ sudo apt-get install tree
+"
+fi
+
# build_apex APEX_MODULE
# ----------------------
@@ -82,6 +89,24 @@
fi
}
+# maybe_list_apex_contents MOUNT_POINT
+# ------------------------------------
+# If any listing/printing option was used, honor them and display the contents
+# of the APEX payload at MOUNT_POINT.
+function maybe_list_apex_contents {
+ local mount_point=$1
+
+ # List the contents of the mounted image using `find` (optional).
+ if $list_image_files_p; then
+ say "Listing image files" && find "$mount_point"
+ fi
+
+ # List the contents of the mounted image using `tree` (optional).
+ if $print_image_tree_p; then
+ say "Printing image tree" && ls -ld "$mount_point" && tree -aph --du "$mount_point"
+ fi
+}
+
function check_binary {
[[ -x "$mount_point/bin/$1" ]] || die "Cannot find binary '$1' in mounted image"
}
@@ -218,10 +243,6 @@
# Mount the image from the Android Runtime APEX.
guestmount -a "$image_file" -m "$partition" "$mount_point"
-
- # List the contents of the mounted image (optional).
- $list_image_files_p \
- && say "Listing image files" && ls -ld "$mount_point" && tree -ap "$mount_point"
}
# Testing release APEX package (com.android.runtime.release).
@@ -229,6 +250,8 @@
apex_module="com.android.runtime.release"
+say "Processing APEX package $apex_module"
+
work_dir=$(mktemp -d)
mount_point="$work_dir/image"
@@ -240,6 +263,9 @@
# Set up APEX package.
setup_target_apex "$apex_module" "$mount_point"
+# List the contents of the APEX image (optional).
+maybe_list_apex_contents "$mount_point"
+
# Run tests on APEX package.
say "Checking APEX package $apex_module"
check_release_contents
@@ -249,12 +275,15 @@
cleanup_target
say "$apex_module tests passed"
+echo
# Testing debug APEX package (com.android.runtime.debug).
# -------------------------------------------------------
apex_module="com.android.runtime.debug"
+say "Processing APEX package $apex_module"
+
work_dir=$(mktemp -d)
mount_point="$work_dir/image"
@@ -266,6 +295,9 @@
# Set up APEX package.
setup_target_apex "$apex_module" "$mount_point"
+# List the contents of the APEX image (optional).
+maybe_list_apex_contents "$mount_point"
+
# Run tests on APEX package.
say "Checking APEX package $apex_module"
check_release_contents
@@ -279,6 +311,7 @@
cleanup_target
say "$apex_module tests passed"
+echo
# Testing host APEX package (com.android.runtime.host).
@@ -319,6 +352,8 @@
apex_module="com.android.runtime.host"
+say "Processing APEX package $apex_module"
+
work_dir=$(mktemp -d)
mount_point="$work_dir/zip"
@@ -330,6 +365,9 @@
# Set up APEX package.
setup_host_apex "$apex_module" "$mount_point"
+# List the contents of the APEX image (optional).
+maybe_list_apex_contents "$mount_point"
+
# Run tests on APEX package.
say "Checking APEX package $apex_module"
check_release_contents
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 18f7105..0039be0 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1742,6 +1742,9 @@
if (&cls->GetDexFile() == &accessor.GetDexFile()) {
ObjectLock<mirror::Class> lock(self, cls);
mirror::Class::SetStatus(cls, status, self);
+ if (status >= ClassStatus::kVerified) {
+ cls->SetVerificationAttempted();
+ }
}
} else {
DCHECK(self->IsExceptionPending());
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 9b8bb3e..e57bbfa 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -126,11 +126,11 @@
}
extern "C" bool jit_compile_method(
- void* handle, ArtMethod* method, Thread* self, bool osr)
+ void* handle, ArtMethod* method, Thread* self, bool baseline, bool osr)
REQUIRES_SHARED(Locks::mutator_lock_) {
auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
DCHECK(jit_compiler != nullptr);
- return jit_compiler->CompileMethod(self, method, osr);
+ return jit_compiler->CompileMethod(self, method, baseline, osr);
}
extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t count)
@@ -181,7 +181,7 @@
}
}
-bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool osr) {
+bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool baseline, bool osr) {
SCOPED_TRACE << "JIT compiling " << method->PrettyMethod();
DCHECK(!method->IsProxyMethod());
@@ -198,7 +198,7 @@
TimingLogger::ScopedTiming t2("Compiling", &logger);
JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache();
success = compiler_driver_->GetCompiler()->JitCompile(
- self, code_cache, method, /* baseline= */ false, osr, jit_logger_.get());
+ self, code_cache, method, baseline, osr, jit_logger_.get());
}
// Trim maps to reduce memory usage.
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index d201611..29d2761 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -37,7 +37,7 @@
virtual ~JitCompiler();
// Compilation entrypoint. Returns whether the compilation succeeded.
- bool CompileMethod(Thread* self, ArtMethod* method, bool osr)
+ bool CompileMethod(Thread* self, ArtMethod* method, bool baseline, bool osr)
REQUIRES_SHARED(Locks::mutator_lock_);
const CompilerOptions& GetCompilerOptions() const {
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 92b9543..bd4304c 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -1300,15 +1300,15 @@
EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass));
EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj1));
EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj2));
- EXPECT_EQ(0x12345678ABCDEF88ll, val1);
- EXPECT_EQ(0x7FEDCBA987654321ll, val2);
+ EXPECT_EQ(0x12345678ABCDEF88LL, val1);
+ EXPECT_EQ(0x7FEDCBA987654321LL, val2);
return 42;
}
void JniCompilerTest::GetTextImpl() {
SetUpForTest(true, "getText", "(JLjava/lang/Object;JLjava/lang/Object;)I",
CURRENT_JNI_WRAPPER(my_gettext));
- jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 0x12345678ABCDEF88ll, jobj_,
+ jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 0x12345678ABCDEF88LL, jobj_,
INT64_C(0x7FEDCBA987654321), jobj_);
EXPECT_EQ(result, 42);
}
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 5bd11226..50b13c8 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -243,7 +243,8 @@
// compilation.
#define UNREACHABLE_INTRINSIC(Arch, Name) \
void IntrinsicLocationsBuilder ## Arch::Visit ## Name(HInvoke* invoke) { \
- if (!codegen_->GetCompilerOptions().IsBaseline()) { \
+ if (Runtime::Current()->IsAotCompiler() && \
+ !codegen_->GetCompilerOptions().IsBaseline()) { \
LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \
<< " should have been converted to HIR"; \
} \
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index a5bba9b..872fab3 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -624,7 +624,6 @@
explicit Dex2Oat(TimingLogger* timings) :
compiler_kind_(Compiler::kOptimizing),
// Take the default set of instruction features from the build.
- boot_image_checksum_(0),
key_value_store_(nullptr),
verification_results_(nullptr),
runtime_(nullptr),
@@ -1437,17 +1436,22 @@
if (!IsBootImage()) {
// When compiling an app, create the runtime early to retrieve
- // the image location key needed for the oat header.
+ // the boot image checksums needed for the oat header.
if (!CreateRuntime(std::move(runtime_options))) {
return dex2oat::ReturnCode::kCreateRuntime;
}
if (CompilerFilter::DependsOnImageChecksum(compiler_options_->GetCompilerFilter())) {
TimingLogger::ScopedTiming t3("Loading image checksum", timings_);
- std::vector<ImageSpace*> image_spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces();
- boot_image_checksum_ = image_spaces[0]->GetImageHeader().GetImageChecksum();
- } else {
- boot_image_checksum_ = 0u;
+ Runtime* runtime = Runtime::Current();
+ key_value_store_->Put(OatHeader::kBootClassPathKey,
+ android::base::Join(runtime->GetBootClassPathLocations(), ':'));
+ std::vector<ImageSpace*> image_spaces = runtime->GetHeap()->GetBootImageSpaces();
+ const std::vector<const DexFile*>& bcp_dex_files =
+ runtime->GetClassLinker()->GetBootClassPath();
+ key_value_store_->Put(
+ OatHeader::kBootClassPathChecksumsKey,
+ gc::space::ImageSpace::GetBootClassPathChecksums(image_spaces, bcp_dex_files));
}
// Open dex files for class path.
@@ -2015,7 +2019,7 @@
elf_writer->EndDataBimgRelRo(data_bimg_rel_ro);
}
- if (!oat_writer->WriteHeader(elf_writer->GetStream(), boot_image_checksum_)) {
+ if (!oat_writer->WriteHeader(elf_writer->GetStream())) {
LOG(ERROR) << "Failed to write oat header to the ELF file " << oat_file->GetPath();
return false;
}
@@ -2646,7 +2650,6 @@
std::unique_ptr<CompilerOptions> compiler_options_;
Compiler::Kind compiler_kind_;
- uint32_t boot_image_checksum_;
std::unique_ptr<SafeMap<std::string, std::string> > key_value_store_;
std::unique_ptr<VerificationResults> verification_results_;
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index bd8cf5a..fa0a3d4 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -326,8 +326,7 @@
elf_writer->EndDataBimgRelRo(data_bimg_rel_ro);
}
- bool header_ok = oat_writer->WriteHeader(elf_writer->GetStream(),
- /*boot_image_checksum=*/ 0u);
+ bool header_ok = oat_writer->WriteHeader(elf_writer->GetStream());
ASSERT_TRUE(header_ok);
writer->UpdateOatFileHeader(i, oat_writer->GetOatHeader());
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index 61d105f..e4e4b13 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -32,6 +32,7 @@
#include "base/enums.h"
#include "base/globals.h"
#include "base/logging.h" // For VLOG.
+#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "class_linker-inl.h"
#include "class_root.h"
@@ -153,6 +154,26 @@
: nullptr;
}
+bool ImageWriter::IsImageObject(ObjPtr<mirror::Object> obj) const {
+ // For boot image, we keep all objects remaining after the GC in PrepareImageAddressSpace().
+ if (compiler_options_.IsBootImage()) {
+ return true;
+ }
+ // Objects already in the boot image do not belong to the image being written.
+ if (IsInBootImage(obj.Ptr())) {
+ return false;
+ }
+ // DexCaches for the boot class path components that are not a part of the boot image
+ // cannot be garbage collected in PrepareImageAddressSpace() but we do not want to
+ // include them in the app image. So make sure we include only the app DexCaches.
+ if (obj->IsDexCache() &&
+ !ContainsElement(compiler_options_.GetDexFilesForOatFile(),
+ obj->AsDexCache()->GetDexFile())) {
+ return false;
+ }
+ return true;
+}
+
// Return true if an object is already in an image space.
bool ImageWriter::IsInBootImage(const void* obj) const {
gc::Heap* const heap = Runtime::Current()->GetHeap();
@@ -437,7 +458,7 @@
*/
heap->VisitObjects([this, &visitor](ObjPtr<mirror::Object> object)
REQUIRES_SHARED(Locks::mutator_lock_) {
- if (!IsInBootImage(object.Ptr())) {
+ if (IsImageObject(object)) {
visitor.SetObject(object);
if (object->IsDexCache()) {
@@ -680,7 +701,7 @@
ObjPtr<mirror::ClassLoader> class_loader = GetAppClassLoader();
std::vector<ObjPtr<mirror::DexCache>> dex_caches = FindDexCaches(self);
for (ObjPtr<mirror::DexCache> dex_cache : dex_caches) {
- if (IsInBootImage(dex_cache.Ptr())) {
+ if (!IsImageObject(dex_cache)) {
continue; // Boot image DexCache is not written to the app image.
}
PreloadDexCache(dex_cache, class_loader);
@@ -989,7 +1010,7 @@
for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
ObjPtr<mirror::DexCache> dex_cache =
ObjPtr<mirror::DexCache>::DownCast(self->DecodeJObject(data.weak_root));
- if (dex_cache == nullptr || IsInBootImage(dex_cache.Ptr())) {
+ if (dex_cache == nullptr || !IsImageObject(dex_cache)) {
continue;
}
const DexFile* dex_file = dex_cache->GetDexFile();
@@ -1758,7 +1779,8 @@
for (ObjPtr<mirror::DexCache> dex_cache : dex_caches) {
// Pass the class loader associated with the DexCache. This can either be
// the app's `class_loader` or `nullptr` if boot class loader.
- PruneDexCache(dex_cache, IsInBootImage(dex_cache.Ptr()) ? nullptr : GetAppClassLoader());
+ bool is_app_image_dex_cache = compiler_options_.IsAppImage() && IsImageObject(dex_cache);
+ PruneDexCache(dex_cache, is_app_image_dex_cache ? GetAppClassLoader() : nullptr);
}
// Drop the array class cache in the ClassLinker, as these are roots holding those classes live.
@@ -1856,7 +1878,7 @@
continue;
}
const DexFile* dex_file = dex_cache->GetDexFile();
- if (!IsInBootImage(dex_cache.Ptr())) {
+ if (IsImageObject(dex_cache)) {
dex_cache_count += image_dex_files.find(dex_file) != image_dex_files.end() ? 1u : 0u;
}
}
@@ -1875,7 +1897,7 @@
continue;
}
const DexFile* dex_file = dex_cache->GetDexFile();
- if (!IsInBootImage(dex_cache.Ptr())) {
+ if (IsImageObject(dex_cache)) {
non_image_dex_caches += image_dex_files.find(dex_file) != image_dex_files.end() ? 1u : 0u;
}
}
@@ -1889,7 +1911,7 @@
continue;
}
const DexFile* dex_file = dex_cache->GetDexFile();
- if (!IsInBootImage(dex_cache.Ptr()) &&
+ if (IsImageObject(dex_cache) &&
image_dex_files.find(dex_file) != image_dex_files.end()) {
dex_caches->Set<false>(i, dex_cache.Ptr());
++i;
@@ -1942,7 +1964,7 @@
mirror::Object* ImageWriter::TryAssignBinSlot(WorkStack& work_stack,
mirror::Object* obj,
size_t oat_index) {
- if (obj == nullptr || IsInBootImage(obj)) {
+ if (obj == nullptr || !IsImageObject(obj)) {
// Object is null or already in the image, there is no work to do.
return obj;
}
@@ -2373,7 +2395,7 @@
{
auto ensure_bin_slots_assigned = [&](mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_) {
- if (!Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(obj)) {
+ if (IsImageObject(obj)) {
CHECK(IsImageBinSlotAssigned(obj)) << mirror::Object::PrettyTypeOf(obj) << " " << obj;
}
};
@@ -2444,7 +2466,7 @@
{
auto unbin_objects_into_offset = [&](mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_) {
- if (!IsInBootImage(obj)) {
+ if (IsImageObject(obj)) {
UnbinObjectsIntoOffset(obj);
}
};
@@ -2909,7 +2931,7 @@
}
void ImageWriter::CopyAndFixupObject(Object* obj) {
- if (IsInBootImage(obj)) {
+ if (!IsImageObject(obj)) {
return;
}
size_t offset = GetImageOffset(obj);
diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h
index 33bacf8..b680265 100644
--- a/dex2oat/linker/image_writer.h
+++ b/dex2oat/linker/image_writer.h
@@ -674,7 +674,12 @@
template <typename T>
T* NativeCopyLocation(T* obj) REQUIRES_SHARED(Locks::mutator_lock_);
- // Return true of obj is inside of the boot image space. This may only return true if we are
+ // Return true if `obj` belongs to the image we're writing.
+ // For a boot image, this is true for all objects.
+ // For an app image, boot image objects and boot class path dex caches are excluded.
+ bool IsImageObject(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Return true if `obj` is inside of the boot image space. This may only return true if we are
// compiling an app image.
bool IsInBootImage(const void* obj) const;
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index d045698..e2a9ac2 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -2808,11 +2808,9 @@
return true;
}
-bool OatWriter::WriteHeader(OutputStream* out, uint32_t boot_image_checksum) {
+bool OatWriter::WriteHeader(OutputStream* out) {
CHECK(write_state_ == WriteState::kWriteHeader);
- oat_header_->SetBootImageChecksum(boot_image_checksum);
-
// Update checksum with header data.
DCHECK_EQ(oat_header_->GetChecksum(), 0u); // For checksum calculation.
const uint8_t* header_begin = reinterpret_cast<const uint8_t*>(oat_header_.get());
diff --git a/dex2oat/linker/oat_writer.h b/dex2oat/linker/oat_writer.h
index 9cd2fd0..cc0e83a 100644
--- a/dex2oat/linker/oat_writer.h
+++ b/dex2oat/linker/oat_writer.h
@@ -198,7 +198,7 @@
// Check the size of the written oat file.
bool CheckOatSize(OutputStream* out, size_t file_offset, size_t relative_offset);
// Write the oat header. This finalizes the oat file.
- bool WriteHeader(OutputStream* out, uint32_t boot_image_checksum);
+ bool WriteHeader(OutputStream* out);
// Returns whether the oat file has an associated image.
bool HasImage() const {
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index 5de1540..ecf9db8 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -240,7 +240,7 @@
elf_writer->EndDataBimgRelRo(data_bimg_rel_ro);
}
- if (!oat_writer.WriteHeader(elf_writer->GetStream(), /*boot_image_checksum=*/ 42u)) {
+ if (!oat_writer.WriteHeader(elf_writer->GetStream())) {
return false;
}
@@ -396,6 +396,7 @@
ScratchFile tmp_base, tmp_oat(tmp_base, ".oat"), tmp_vdex(tmp_base, ".vdex");
SafeMap<std::string, std::string> key_value_store;
+ key_value_store.Put(OatHeader::kBootClassPathChecksumsKey, "testkey");
bool success = WriteElf(tmp_vdex.GetFile(),
tmp_oat.GetFile(),
class_linker->GetBootClassPath(),
@@ -418,7 +419,8 @@
const OatHeader& oat_header = oat_file->GetOatHeader();
ASSERT_TRUE(oat_header.IsValid());
ASSERT_EQ(class_linker->GetBootClassPath().size(), oat_header.GetDexFileCount()); // core
- ASSERT_EQ(42u, oat_header.GetBootImageChecksum());
+ ASSERT_TRUE(oat_header.GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey) != nullptr);
+ ASSERT_STREQ("testkey", oat_header.GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey));
ASSERT_TRUE(java_lang_dex_file_ != nullptr);
const DexFile& dex_file = *java_lang_dex_file_;
@@ -464,7 +466,7 @@
TEST_F(OatTest, OatHeaderSizeCheck) {
// If this test is failing and you have to update these constants,
// it is time to update OatHeader::kOatVersion
- EXPECT_EQ(68U, sizeof(OatHeader));
+ EXPECT_EQ(64U, sizeof(OatHeader));
EXPECT_EQ(4U, sizeof(OatMethodOffsets));
EXPECT_EQ(8U, sizeof(OatQuickMethodHeader));
EXPECT_EQ(166 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
diff --git a/libartbase/Android.bp b/libartbase/Android.bp
index 58d12a1..c3fb5fd 100644
--- a/libartbase/Android.bp
+++ b/libartbase/Android.bp
@@ -177,9 +177,6 @@
header_libs: [
"libnativehelper_header_only",
],
- include_dirs: [
- "external/icu/icu4c/source/common",
- ],
}
art_cc_test {
diff --git a/libartbase/base/common_art_test.cc b/libartbase/base/common_art_test.cc
index 278203d..46724ee 100644
--- a/libartbase/base/common_art_test.cc
+++ b/libartbase/base/common_art_test.cc
@@ -26,7 +26,6 @@
#include "android-base/stringprintf.h"
#include "android-base/strings.h"
#include "android-base/unique_fd.h"
-#include <unicode/uvernum.h>
#include "art_field-inl.h"
#include "base/file_utils.h"
@@ -329,15 +328,18 @@
}
std::vector<std::string> CommonArtTestImpl::GetLibCoreDexFileNames() {
- // Note: This must match the TEST_CORE_JARS in Android.common_path.mk
+ // Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
// because that's what we use for compiling the core.art image.
+ // It may contain additional modules from TEST_CORE_JARS.
static const char* const kLibcoreModules[] = {
+ // CORE_IMG_JARS modules.
"core-oj",
"core-libart",
"core-simple",
- "conscrypt",
"okhttp",
"bouncycastle",
+ // Additional modules.
+ "conscrypt",
};
std::vector<std::string> result;
diff --git a/libdexfile/Android.bp b/libdexfile/Android.bp
index 7f25f02..4d6aa5c 100644
--- a/libdexfile/Android.bp
+++ b/libdexfile/Android.bp
@@ -131,6 +131,71 @@
],
}
+cc_library_headers {
+ name: "libdexfile_external_headers",
+ host_supported: true,
+ header_libs: ["libbase_headers"],
+ export_header_lib_headers: ["libbase_headers"],
+ export_include_dirs: ["external/include"],
+
+ target: {
+ windows: {
+ enabled: true,
+ },
+ },
+}
+
+cc_library {
+ name: "libdexfile_external",
+ host_supported: true,
+ srcs: [
+ "external/dex_file_ext.cc",
+ ],
+ header_libs: ["libdexfile_external_headers"],
+ shared_libs: [
+ "libbase",
+ "libdexfile",
+ ],
+
+ // TODO(b/120670568): Enable this when linking bug is fixed.
+ // stubs: {
+ // symbol_file: "external/libdexfile_external.map.txt",
+ // versions: ["1"],
+ // },
+
+ // Hide symbols using version scripts for targets that support it, i.e. all
+ // but Darwin.
+ // TODO(b/120670568): Clean this up when stubs above is enabled.
+ target: {
+ android: {
+ version_script: "external/libdexfile_external.map.txt",
+ },
+ linux_bionic: {
+ version_script: "external/libdexfile_external.map.txt",
+ },
+ linux_glibc: {
+ version_script: "external/libdexfile_external.map.txt",
+ },
+ windows: {
+ version_script: "external/libdexfile_external.map.txt",
+ },
+ },
+}
+
+// Support library with a C++ API for accessing the libdexfile API for external
+// (non-ART) users. They should link to their own instance of this (either
+// statically or through linker namespaces).
+cc_library {
+ name: "libdexfile_support",
+ host_supported: true,
+ srcs: [
+ "external/dex_file_supp.cc",
+ ],
+ header_libs: ["libdexfile_external_headers"],
+ shared_libs: ["libdexfile_external"],
+ export_header_lib_headers: ["libdexfile_external_headers"],
+}
+
art_cc_test {
name: "art_libdexfile_tests",
defaults: [
diff --git a/libdexfile/dex/art_dex_file_loader.cc b/libdexfile/dex/art_dex_file_loader.cc
index 20a519b..ae1322d 100644
--- a/libdexfile/dex/art_dex_file_loader.cc
+++ b/libdexfile/dex/art_dex_file_loader.cc
@@ -156,14 +156,16 @@
return false;
}
-std::unique_ptr<const DexFile> ArtDexFileLoader::Open(const uint8_t* base,
- size_t size,
- const std::string& location,
- uint32_t location_checksum,
- const OatDexFile* oat_dex_file,
- bool verify,
- bool verify_checksum,
- std::string* error_msg) const {
+std::unique_ptr<const DexFile> ArtDexFileLoader::Open(
+ const uint8_t* base,
+ size_t size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::unique_ptr<DexFileContainer> container) const {
ScopedTrace trace(std::string("Open dex file from RAM ") + location);
return OpenCommon(base,
size,
@@ -175,7 +177,7 @@
verify,
verify_checksum,
error_msg,
- /*container=*/ nullptr,
+ std::move(container),
/*verify_result=*/ nullptr);
}
diff --git a/libdexfile/dex/art_dex_file_loader.h b/libdexfile/dex/art_dex_file_loader.h
index 40d4673..d41eac5 100644
--- a/libdexfile/dex/art_dex_file_loader.h
+++ b/libdexfile/dex/art_dex_file_loader.h
@@ -54,14 +54,16 @@
bool* only_contains_uncompressed_dex = nullptr) const override;
// Opens .dex file, backed by existing memory
- std::unique_ptr<const DexFile> Open(const uint8_t* base,
- size_t size,
- const std::string& location,
- uint32_t location_checksum,
- const OatDexFile* oat_dex_file,
- bool verify,
- bool verify_checksum,
- std::string* error_msg) const override;
+ std::unique_ptr<const DexFile> Open(
+ const uint8_t* base,
+ size_t size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::unique_ptr<DexFileContainer> container = nullptr) const override;
// Opens .dex file that has been memory-mapped by the caller.
std::unique_ptr<const DexFile> Open(const std::string& location,
diff --git a/libdexfile/dex/dex_file_loader.cc b/libdexfile/dex/dex_file_loader.cc
index 3667c8c..1884bcf 100644
--- a/libdexfile/dex/dex_file_loader.cc
+++ b/libdexfile/dex/dex_file_loader.cc
@@ -212,14 +212,16 @@
return false;
}
-std::unique_ptr<const DexFile> DexFileLoader::Open(const uint8_t* base,
- size_t size,
- const std::string& location,
- uint32_t location_checksum,
- const OatDexFile* oat_dex_file,
- bool verify,
- bool verify_checksum,
- std::string* error_msg) const {
+std::unique_ptr<const DexFile> DexFileLoader::Open(
+ const uint8_t* base,
+ size_t size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::unique_ptr<DexFileContainer> container) const {
return OpenCommon(base,
size,
/*data_base=*/ nullptr,
@@ -230,7 +232,7 @@
verify,
verify_checksum,
error_msg,
- /*container=*/ nullptr,
+ std::move(container),
/*verify_result=*/ nullptr);
}
diff --git a/libdexfile/dex/dex_file_loader.h b/libdexfile/dex/dex_file_loader.h
index 8fc836e..49e177f 100644
--- a/libdexfile/dex/dex_file_loader.h
+++ b/libdexfile/dex/dex_file_loader.h
@@ -121,14 +121,16 @@
bool* zip_file_only_contains_uncompress_dex = nullptr) const;
// Opens .dex file, backed by existing memory
- virtual std::unique_ptr<const DexFile> Open(const uint8_t* base,
- size_t size,
- const std::string& location,
- uint32_t location_checksum,
- const OatDexFile* oat_dex_file,
- bool verify,
- bool verify_checksum,
- std::string* error_msg) const;
+ virtual std::unique_ptr<const DexFile> Open(
+ const uint8_t* base,
+ size_t size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::unique_ptr<DexFileContainer> container = nullptr) const;
// Open a dex file with a separate data section.
virtual std::unique_ptr<const DexFile> OpenWithDataSection(
diff --git a/libdexfile/external/dex_file_ext.cc b/libdexfile/external/dex_file_ext.cc
new file mode 100644
index 0000000..3c193f4
--- /dev/null
+++ b/libdexfile/external/dex_file_ext.cc
@@ -0,0 +1,346 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+#include <stdint.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <cerrno>
+#include <cstring>
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <android-base/logging.h>
+#include <android-base/macros.h>
+#include <android-base/mapped_file.h>
+#include <android-base/stringprintf.h>
+
+#include <dex/class_accessor-inl.h>
+#include <dex/code_item_accessors-inl.h>
+#include <dex/dex_file-inl.h>
+#include <dex/dex_file_loader.h>
+
+#include "art_api/ext_dex_file.h"
+
+extern "C" class ExtDexFileString {
+ public:
+ const std::string str_;
+};
+
+namespace art {
+namespace {
+
+const ExtDexFileString empty_string{""};
+
+struct MethodCacheEntry {
+ int32_t offset; // Offset relative to the start of the dex file header.
+ int32_t len;
+ int32_t index; // Method index.
+ std::string name; // Method name. Not filled in for all cache entries.
+};
+
+class MappedFileContainer : public DexFileContainer {
+ public:
+ explicit MappedFileContainer(std::unique_ptr<android::base::MappedFile>&& map)
+ : map_(std::move(map)) {}
+ ~MappedFileContainer() override {}
+ int GetPermissions() override { return 0; }
+ bool IsReadOnly() override { return true; }
+ bool EnableWrite() override { return false; }
+ bool DisableWrite() override { return false; }
+
+ private:
+ std::unique_ptr<android::base::MappedFile> map_;
+ DISALLOW_COPY_AND_ASSIGN(MappedFileContainer);
+};
+
+} // namespace
+} // namespace art
+
+extern "C" {
+
+const ExtDexFileString* ExtDexFileMakeString(const char* str) {
+ if (str[0] == '\0') {
+ return &art::empty_string;
+ }
+ return new ExtDexFileString{str};
+}
+
+const char* ExtDexFileGetString(const ExtDexFileString* ext_string, /*out*/ size_t* size) {
+ DCHECK(ext_string != nullptr);
+ *size = ext_string->str_.size();
+ return ext_string->str_.data();
+}
+
+void ExtDexFileFreeString(const ExtDexFileString* ext_string) {
+ DCHECK(ext_string != nullptr);
+ if (ext_string != &art::empty_string) {
+ delete (ext_string);
+ }
+}
+
+// Wraps DexFile to add the caching needed by the external interface. This is
+// what gets passed over as ExtDexFile*.
+class ExtDexFile {
+ // Method cache for GetMethodInfoForOffset. This is populated as we iterate
+ // sequentially through the class defs. MethodCacheEntry.name is only set for
+ // methods returned by GetMethodInfoForOffset.
+ std::map<int32_t, art::MethodCacheEntry> method_cache_;
+
+ // Index of first class def for which method_cache_ isn't complete.
+ uint32_t class_def_index_ = 0;
+
+ public:
+ std::unique_ptr<const art::DexFile> dex_file_;
+ explicit ExtDexFile(std::unique_ptr<const art::DexFile>&& dex_file)
+ : dex_file_(std::move(dex_file)) {}
+
+ art::MethodCacheEntry* GetMethodCacheEntryForOffset(int64_t dex_offset) {
+ // First look in the method cache.
+ auto it = method_cache_.upper_bound(dex_offset);
+ if (it != method_cache_.end() && dex_offset >= it->second.offset) {
+ return &it->second;
+ }
+
+ for (; class_def_index_ < dex_file_->NumClassDefs(); class_def_index_++) {
+ art::ClassAccessor accessor(*dex_file_, class_def_index_);
+
+ for (const art::ClassAccessor::Method& method : accessor.GetMethods()) {
+ art::CodeItemInstructionAccessor code = method.GetInstructions();
+ if (!code.HasCodeItem()) {
+ continue;
+ }
+
+ int32_t offset = reinterpret_cast<const uint8_t*>(code.Insns()) - dex_file_->Begin();
+ int32_t len = code.InsnsSizeInBytes();
+ int32_t index = method.GetIndex();
+ auto res =
+ method_cache_.emplace(offset + len, art::MethodCacheEntry{offset, len, index, ""});
+ if (offset <= dex_offset && dex_offset < offset + len) {
+ return &res.first->second;
+ }
+ }
+ }
+
+ return nullptr;
+ }
+
+ const std::string& GetMethodName(art::MethodCacheEntry& entry) {
+ if (entry.name.empty()) {
+ entry.name = dex_file_->PrettyMethod(entry.index, false);
+ }
+ return entry.name;
+ }
+};
+
+int ExtDexFileOpenFromMemory(const void* addr,
+ /*inout*/ size_t* size,
+ const char* location,
+ /*out*/ const ExtDexFileString** ext_error_msg,
+ /*out*/ ExtDexFile** ext_dex_file) {
+ if (*size < sizeof(art::DexFile::Header)) {
+ *size = sizeof(art::DexFile::Header);
+ *ext_error_msg = nullptr;
+ return false;
+ }
+
+ const art::DexFile::Header* header = reinterpret_cast<const art::DexFile::Header*>(addr);
+ uint32_t file_size = header->file_size_;
+ if (art::CompactDexFile::IsMagicValid(header->magic_)) {
+ // Compact dex files store the data section separately so that it can be shared.
+ // Therefore we need to extend the read memory range to include it.
+ // TODO: This might be wasteful as we might read data in between as well.
+ // In practice, this should be fine, as such sharing only happens on disk.
+ uint32_t computed_file_size;
+ if (__builtin_add_overflow(header->data_off_, header->data_size_, &computed_file_size)) {
+ *ext_error_msg = new ExtDexFileString{
+ android::base::StringPrintf("Corrupt CompactDexFile header in '%s'", location)};
+ return false;
+ }
+ if (computed_file_size > file_size) {
+ file_size = computed_file_size;
+ }
+ } else if (!art::StandardDexFile::IsMagicValid(header->magic_)) {
+ *ext_error_msg = new ExtDexFileString{
+ android::base::StringPrintf("Unrecognized dex file header in '%s'", location)};
+ return false;
+ }
+
+ if (*size < file_size) {
+ *size = file_size;
+ *ext_error_msg = nullptr;
+ return false;
+ }
+
+ std::string loc_str(location);
+ art::DexFileLoader loader;
+ std::string error_msg;
+ std::unique_ptr<const art::DexFile> dex_file = loader.Open(static_cast<const uint8_t*>(addr),
+ *size,
+ loc_str,
+ header->checksum_,
+ /*oat_dex_file=*/nullptr,
+ /*verify=*/false,
+ /*verify_checksum=*/false,
+ &error_msg);
+ if (dex_file == nullptr) {
+ *ext_error_msg = new ExtDexFileString{std::move(error_msg)};
+ return false;
+ }
+
+ *ext_dex_file = new ExtDexFile(std::move(dex_file));
+ return true;
+}
+
+int ExtDexFileOpenFromFd(int fd,
+ off_t offset,
+ const char* location,
+ /*out*/ const ExtDexFileString** ext_error_msg,
+ /*out*/ ExtDexFile** ext_dex_file) {
+ size_t length;
+ {
+ struct stat sbuf;
+ std::memset(&sbuf, 0, sizeof(sbuf));
+ if (fstat(fd, &sbuf) == -1) {
+ *ext_error_msg = new ExtDexFileString{
+ android::base::StringPrintf("fstat '%s' failed: %s", location, std::strerror(errno))};
+ return false;
+ }
+ if (S_ISDIR(sbuf.st_mode)) {
+ *ext_error_msg = new ExtDexFileString{
+ android::base::StringPrintf("Attempt to mmap directory '%s'", location)};
+ return false;
+ }
+ length = sbuf.st_size;
+ }
+
+ if (length < offset + sizeof(art::DexFile::Header)) {
+ *ext_error_msg = new ExtDexFileString{android::base::StringPrintf(
+ "Offset %" PRId64 " too large for '%s' of size %zu", int64_t{offset}, location, length)};
+ return false;
+ }
+
+ // Cannot use MemMap in libartbase here, because it pulls in dlopen which we
+ // can't have when being compiled statically.
+ std::unique_ptr<android::base::MappedFile> map =
+ android::base::MappedFile::FromFd(fd, offset, length, PROT_READ);
+ if (map == nullptr) {
+ *ext_error_msg = new ExtDexFileString{
+ android::base::StringPrintf("mmap '%s' failed: %s", location, std::strerror(errno))};
+ return false;
+ }
+
+ const art::DexFile::Header* header = reinterpret_cast<const art::DexFile::Header*>(map->data());
+ uint32_t file_size;
+ if (__builtin_add_overflow(offset, header->file_size_, &file_size)) {
+ *ext_error_msg =
+ new ExtDexFileString{android::base::StringPrintf("Corrupt header in '%s'", location)};
+ return false;
+ }
+ if (length < file_size) {
+ *ext_error_msg = new ExtDexFileString{
+ android::base::StringPrintf("Dex file '%s' too short: expected %" PRIu32 ", got %" PRIu64,
+ location,
+ file_size,
+ uint64_t{length})};
+ return false;
+ }
+
+ void* addr = map->data();
+ size_t size = map->size();
+ auto container = std::make_unique<art::MappedFileContainer>(std::move(map));
+
+ std::string loc_str(location);
+ std::string error_msg;
+ art::DexFileLoader loader;
+ std::unique_ptr<const art::DexFile> dex_file = loader.Open(reinterpret_cast<const uint8_t*>(addr),
+ size,
+ loc_str,
+ header->checksum_,
+ /*oat_dex_file=*/nullptr,
+ /*verify=*/false,
+ /*verify_checksum=*/false,
+ &error_msg,
+ std::move(container));
+ if (dex_file == nullptr) {
+ *ext_error_msg = new ExtDexFileString{std::move(error_msg)};
+ return false;
+ }
+ *ext_dex_file = new ExtDexFile(std::move(dex_file));
+ return true;
+}
+
+int ExtDexFileGetMethodInfoForOffset(ExtDexFile* ext_dex_file,
+ int64_t dex_offset,
+ /*out*/ ExtDexFileMethodInfo* method_info) {
+ if (!ext_dex_file->dex_file_->IsInDataSection(ext_dex_file->dex_file_->Begin() + dex_offset)) {
+ return false; // The DEX offset is not within the bytecode of this dex file.
+ }
+
+ if (ext_dex_file->dex_file_->IsCompactDexFile()) {
+ // The data section of compact dex files might be shared.
+ // Check the subrange unique to this compact dex.
+ const art::CompactDexFile::Header& cdex_header =
+ ext_dex_file->dex_file_->AsCompactDexFile()->GetHeader();
+ uint32_t begin = cdex_header.data_off_ + cdex_header.OwnedDataBegin();
+ uint32_t end = cdex_header.data_off_ + cdex_header.OwnedDataEnd();
+ if (dex_offset < begin || dex_offset >= end) {
+ return false; // The DEX offset is not within the bytecode of this dex file.
+ }
+ }
+
+ art::MethodCacheEntry* entry = ext_dex_file->GetMethodCacheEntryForOffset(dex_offset);
+ if (entry != nullptr) {
+ method_info->offset = entry->offset;
+ method_info->len = entry->len;
+ method_info->name = new ExtDexFileString{ext_dex_file->GetMethodName(*entry)};
+ return true;
+ }
+
+ return false;
+}
+
+void ExtDexFileGetAllMethodInfos(ExtDexFile* ext_dex_file,
+ int with_signature,
+ ExtDexFileMethodInfoCallback* method_info_cb,
+ void* user_data) {
+ for (art::ClassAccessor accessor : ext_dex_file->dex_file_->GetClasses()) {
+ for (const art::ClassAccessor::Method& method : accessor.GetMethods()) {
+ art::CodeItemInstructionAccessor code = method.GetInstructions();
+ if (!code.HasCodeItem()) {
+ continue;
+ }
+
+ ExtDexFileMethodInfo method_info;
+ method_info.offset = static_cast<int32_t>(reinterpret_cast<const uint8_t*>(code.Insns()) -
+ ext_dex_file->dex_file_->Begin());
+ method_info.len = code.InsnsSizeInBytes();
+ method_info.name = new ExtDexFileString{
+ ext_dex_file->dex_file_->PrettyMethod(method.GetIndex(), with_signature)};
+ method_info_cb(&method_info, user_data);
+ }
+ }
+}
+
+void ExtDexFileFree(ExtDexFile* ext_dex_file) { delete (ext_dex_file); }
+
+} // extern "C"
diff --git a/libdexfile/external/dex_file_supp.cc b/libdexfile/external/dex_file_supp.cc
new file mode 100644
index 0000000..6514c8a
--- /dev/null
+++ b/libdexfile/external/dex_file_supp.cc
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "art_api/ext_dex_file.h"
+
+namespace art_api {
+namespace dex {
+
+DexFile::~DexFile() { ExtDexFileFree(ext_dex_file_); }
+
+MethodInfo DexFile::AbsorbMethodInfo(const ExtDexFileMethodInfo& ext_method_info) {
+ return {ext_method_info.offset, ext_method_info.len, DexString(ext_method_info.name)};
+}
+
+void DexFile::AddMethodInfoCallback(const ExtDexFileMethodInfo* ext_method_info, void* ctx) {
+ auto vect = static_cast<MethodInfoVector*>(ctx);
+ vect->emplace_back(AbsorbMethodInfo(*ext_method_info));
+}
+
+} // namespace dex
+} // namespace art_api
diff --git a/libdexfile/external/include/art_api/ext_dex_file.h b/libdexfile/external/include/art_api/ext_dex_file.h
new file mode 100644
index 0000000..5f64ab1
--- /dev/null
+++ b/libdexfile/external/include/art_api/ext_dex_file.h
@@ -0,0 +1,248 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBDEXFILE_EXTERNAL_INCLUDE_ART_API_EXT_DEX_FILE_H_
+#define ART_LIBDEXFILE_EXTERNAL_INCLUDE_ART_API_EXT_DEX_FILE_H_
+
+// Dex file external API
+
+#include <sys/types.h>
+
+#include <cstring>
+#include <memory>
+#include <string>
+#include <string_view>
+#include <vector>
+
+#include <android-base/macros.h>
+
+extern "C" {
+
+// This is the stable C ABI that backs art_api::dex below. Structs and functions
+// may only be added here.
+// TODO(b/120978655): Move this to a separate pure C header.
+//
+// Clients should use the C++ wrappers in art_api::dex instead.
+
+// Opaque wrapper for an std::string allocated in libdexfile which must be freed
+// using ExtDexFileFreeString.
+class ExtDexFileString;
+
+// Returns an ExtDexFileString initialized to the given string.
+const ExtDexFileString* ExtDexFileMakeString(const char* str);
+
+// Returns a pointer to the underlying null-terminated character array and its
+// size for the given ExtDexFileString.
+const char* ExtDexFileGetString(const ExtDexFileString* ext_string, /*out*/ size_t* size);
+
+// Frees an ExtDexFileString.
+void ExtDexFileFreeString(const ExtDexFileString* ext_string);
+
+struct ExtDexFileMethodInfo {
+ int32_t offset;
+ int32_t len;
+ const ExtDexFileString* name;
+};
+
+class ExtDexFile;
+
+// See art_api::dex::DexFile::OpenFromMemory. Returns true on success.
+int ExtDexFileOpenFromMemory(const void* addr,
+ /*inout*/ size_t* size,
+ const char* location,
+ /*out*/ const ExtDexFileString** error_msg,
+ /*out*/ ExtDexFile** ext_dex_file);
+
+// See art_api::dex::DexFile::OpenFromFd. Returns true on success.
+int ExtDexFileOpenFromFd(int fd,
+ off_t offset,
+ const char* location,
+ /*out*/ const ExtDexFileString** error_msg,
+ /*out*/ ExtDexFile** ext_dex_file);
+
+// See art_api::dex::DexFile::GetMethodInfoForOffset. Returns true on success.
+int ExtDexFileGetMethodInfoForOffset(ExtDexFile* ext_dex_file,
+ int64_t dex_offset,
+ /*out*/ ExtDexFileMethodInfo* method_info);
+
+typedef void ExtDexFileMethodInfoCallback(const ExtDexFileMethodInfo* ext_method_info,
+ void* user_data);
+
+// See art_api::dex::DexFile::GetAllMethodInfos.
+void ExtDexFileGetAllMethodInfos(ExtDexFile* ext_dex_file,
+ int with_signature,
+ ExtDexFileMethodInfoCallback* method_info_cb,
+ void* user_data);
+
+// Frees an ExtDexFile.
+void ExtDexFileFree(ExtDexFile* ext_dex_file);
+
+} // extern "C"
+
+namespace art_api {
+namespace dex {
+
+// Minimal std::string look-alike for a string returned from libdexfile.
+class DexString final {
+ public:
+ DexString(DexString&& dex_str) { ReplaceExtString(std::move(dex_str)); }
+ explicit DexString(const char* str = "") : ext_string_(ExtDexFileMakeString(str)) {}
+ ~DexString() { ExtDexFileFreeString(ext_string_); }
+
+ DexString& operator=(DexString&& dex_str) {
+ ReplaceExtString(std::move(dex_str));
+ return *this;
+ }
+
+ const char* data() const {
+ size_t ignored;
+ return ExtDexFileGetString(ext_string_, &ignored);
+ }
+ const char* c_str() const { return data(); }
+
+ size_t size() const {
+ size_t len;
+ (void)ExtDexFileGetString(ext_string_, &len);
+ return len;
+ }
+ size_t length() const { return size(); }
+
+ operator std::string_view() const {
+ size_t len;
+ const char* chars = ExtDexFileGetString(ext_string_, &len);
+ return std::string_view(chars, len);
+ }
+
+ private:
+ friend class DexFile;
+ friend bool operator==(const DexString&, const DexString&);
+ explicit DexString(const ExtDexFileString* ext_string) : ext_string_(ext_string) {}
+ const ExtDexFileString* ext_string_; // Owned instance. Never nullptr.
+
+ void ReplaceExtString(DexString&& dex_str) {
+ ext_string_ = dex_str.ext_string_;
+ dex_str.ext_string_ = ExtDexFileMakeString("");
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(DexString);
+};
+
+inline bool operator==(const DexString& s1, const DexString& s2) {
+ size_t l1, l2;
+ const char* str1 = ExtDexFileGetString(s1.ext_string_, &l1);
+ const char* str2 = ExtDexFileGetString(s2.ext_string_, &l2);
+ // Use memcmp to avoid assumption about absence of null characters in the strings.
+ return l1 == l2 && !std::memcmp(str1, str2, l1);
+}
+
+struct MethodInfo {
+ int32_t offset; // Code offset relative to the start of the dex file header
+ int32_t len; // Code length
+ DexString name;
+};
+
+inline bool operator==(const MethodInfo& s1, const MethodInfo& s2) {
+ return s1.offset == s2.offset && s1.len == s2.len && s1.name == s2.name;
+}
+
+// External stable API to access ordinary dex files and CompactDex. This wraps
+// the stable C ABI and handles instance ownership. Thread-compatible but not
+// thread-safe.
+class DexFile {
+ public:
+ DexFile(DexFile&& dex_file) {
+ ext_dex_file_ = dex_file.ext_dex_file_;
+ dex_file.ext_dex_file_ = nullptr;
+ }
+ virtual ~DexFile();
+
+ // Interprets a chunk of memory as a dex file. As long as *size is too small,
+ // returns nullptr, sets *size to a new size to try again with, and sets
+ // *error_msg to "". That might happen repeatedly. Also returns nullptr
+ // on error in which case *error_msg is set to a nonempty string.
+ //
+ // location is a string that describes the dex file, and is preferably its
+ // path. It is mostly used to make error messages better, and may be "".
+ //
+ // The caller must retain the memory.
+ static std::unique_ptr<DexFile> OpenFromMemory(const void* addr,
+ size_t* size,
+ const std::string& location,
+ /*out*/ std::string* error_msg) {
+ ExtDexFile* ext_dex_file;
+ const ExtDexFileString* ext_error_msg = nullptr;
+ if (ExtDexFileOpenFromMemory(addr, size, location.c_str(), &ext_error_msg, &ext_dex_file)) {
+ return std::unique_ptr<DexFile>(new DexFile(ext_dex_file));
+ }
+ *error_msg = (ext_error_msg == nullptr) ? "" : std::string(DexString(ext_error_msg));
+ return nullptr;
+ }
+
+ // mmaps the given file offset in the open fd and reads a dexfile from there.
+ // Returns nullptr on error in which case *error_msg is set.
+ //
+ // location is a string that describes the dex file, and is preferably its
+ // path. It is mostly used to make error messages better, and may be "".
+ static std::unique_ptr<DexFile> OpenFromFd(int fd,
+ off_t offset,
+ const std::string& location,
+ /*out*/ std::string* error_msg) {
+ ExtDexFile* ext_dex_file;
+ const ExtDexFileString* ext_error_msg = nullptr;
+ if (ExtDexFileOpenFromFd(fd, offset, location.c_str(), &ext_error_msg, &ext_dex_file)) {
+ return std::unique_ptr<DexFile>(new DexFile(ext_dex_file));
+ }
+ *error_msg = std::string(DexString(ext_error_msg));
+ return nullptr;
+ }
+
+ // Given an offset relative to the start of the dex file header, if there is a
+ // method whose instruction range includes that offset then returns info about
+ // it, otherwise returns a struct with offset == 0.
+ MethodInfo GetMethodInfoForOffset(int64_t dex_offset) {
+ ExtDexFileMethodInfo ext_method_info;
+ if (ExtDexFileGetMethodInfoForOffset(ext_dex_file_, dex_offset, &ext_method_info)) {
+ return AbsorbMethodInfo(ext_method_info);
+ }
+ return {/*offset=*/0, /*len=*/0, /*name=*/DexString()};
+ }
+
+ // Returns info structs about all methods in the dex file. MethodInfo.name
+ // receives the full function signature if with_signature is set, otherwise it
+ // gets the class and method name only.
+ std::vector<MethodInfo> GetAllMethodInfos(bool with_signature = true) {
+ MethodInfoVector res;
+ ExtDexFileGetAllMethodInfos(
+ ext_dex_file_, with_signature, AddMethodInfoCallback, static_cast<void*>(&res));
+ return res;
+ }
+
+ private:
+ explicit DexFile(ExtDexFile* ext_dex_file) : ext_dex_file_(ext_dex_file) {}
+ ExtDexFile* ext_dex_file_; // Owned instance. nullptr only in moved-from zombies.
+
+ typedef std::vector<MethodInfo> MethodInfoVector;
+
+ static MethodInfo AbsorbMethodInfo(const ExtDexFileMethodInfo& ext_method_info);
+ static void AddMethodInfoCallback(const ExtDexFileMethodInfo* ext_method_info, void* user_data);
+
+ DISALLOW_COPY_AND_ASSIGN(DexFile);
+};
+
+} // namespace dex
+} // namespace art_api
+
+#endif // ART_LIBDEXFILE_EXTERNAL_INCLUDE_ART_API_EXT_DEX_FILE_H_
diff --git a/libdexfile/external/libdexfile_external.map.txt b/libdexfile/external/libdexfile_external.map.txt
new file mode 100644
index 0000000..450b633
--- /dev/null
+++ b/libdexfile/external/libdexfile_external.map.txt
@@ -0,0 +1,13 @@
+LIBDEXFILE_EXTERNAL_1 {
+ global:
+ ExtDexFileFree;
+ ExtDexFileFreeString;
+ ExtDexFileGetAllMethodInfos;
+ ExtDexFileGetMethodInfoForOffset;
+ ExtDexFileGetString;
+ ExtDexFileMakeString;
+ ExtDexFileOpenFromFd;
+ ExtDexFileOpenFromMemory;
+ local:
+ *;
+};
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 1c74a92..d2a5bb8 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -473,9 +473,6 @@
GetQuickToInterpreterBridgeOffset);
#undef DUMP_OAT_HEADER_OFFSET
- os << "BOOT IMAGE CHECKSUM:\n";
- os << StringPrintf("0x%08x\n\n", oat_header.GetBootImageChecksum());
-
// Print the key-value store.
{
os << "KEY VALUE STORE:\n";
diff --git a/runtime/Android.bp b/runtime/Android.bp
index b03ef60..4780f16 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -420,6 +420,8 @@
"libbacktrace",
"libbase",
"libcutils",
+ "libdexfile_external", // libunwindstack dependency
+ "libdexfile_support", // libunwindstack dependency
"liblog",
"libnativebridge",
"libnativeloader",
@@ -553,9 +555,6 @@
header_libs: [
"libnativehelper_header_only",
],
- include_dirs: [
- "external/icu/icu4c/source/common",
- ],
}
art_cc_test {
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index d33541c..3b92e2c 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -212,6 +212,22 @@
self->AssertPendingException();
}
+// Ensures that methods have the kAccSkipAccessChecks bit set. We use the
+// kAccVerificationAttempted bit on the class access flags to determine whether this has been done
+// before.
+template <bool kNeedsVerified = false>
+static void EnsureSkipAccessChecksMethods(Handle<mirror::Class> klass, PointerSize pointer_size)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (kNeedsVerified) {
+ // To not fail access-flags access checks, push a minimal state.
+ mirror::Class::SetStatus(klass, ClassStatus::kVerified, Thread::Current());
+ }
+ if (!klass->WasVerificationAttempted()) {
+ klass->SetSkipAccessChecksFlagOnAllMethods(pointer_size);
+ klass->SetVerificationAttempted();
+ }
+}
+
void ClassLinker::ThrowEarlierClassFailure(ObjPtr<mirror::Class> c, bool wrap_in_no_class_def) {
// The class failed to initialize on a previous attempt, so we want to throw
// a NoClassDefFoundError (v2 2.17.5). The exception to this rule is if we
@@ -1037,20 +1053,15 @@
runtime->SetSentinel(heap->AllocNonMovableObject<true>(
self, java_lang_Object, java_lang_Object->GetObjectSize(), VoidFunctor()));
- const std::vector<std::string>& boot_class_path = runtime->GetBootClassPath();
- if (boot_class_path.size() != spaces.size()) {
- *error_msg = StringPrintf("Boot class path has %zu components but there are %zu image spaces.",
- boot_class_path.size(),
- spaces.size());
- return false;
- }
+ const std::vector<std::string>& boot_class_path_locations = runtime->GetBootClassPathLocations();
+ CHECK_LE(spaces.size(), boot_class_path_locations.size());
for (size_t i = 0u, size = spaces.size(); i != size; ++i) {
// Boot class loader, use a null handle.
std::vector<std::unique_ptr<const DexFile>> dex_files;
if (!AddImageSpace(spaces[i],
ScopedNullHandle<mirror::ClassLoader>(),
/*dex_elements=*/ nullptr,
- /*dex_location=*/ boot_class_path[i].c_str(),
+ /*dex_location=*/ boot_class_path_locations[i].c_str(),
/*out*/&dex_files,
error_msg)) {
return false;
@@ -1069,6 +1080,15 @@
return true;
}
+void ClassLinker::AddExtraBootDexFiles(
+ Thread* self,
+ std::vector<std::unique_ptr<const DexFile>>&& additional_dex_files) {
+ for (std::unique_ptr<const DexFile>& dex_file : additional_dex_files) {
+ AppendToBootClassPath(self, *dex_file);
+ boot_dex_files_.push_back(std::move(dex_file));
+ }
+}
+
bool ClassLinker::IsBootClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
ObjPtr<mirror::ClassLoader> class_loader) {
return class_loader == nullptr ||
@@ -3946,6 +3966,7 @@
h_class->SetAccessFlags(kAccPublic | kAccFinal | kAccAbstract);
h_class->SetPrimitiveType(type);
h_class->SetIfTable(GetClassRoot<mirror::Object>(this)->GetIfTable());
+ EnsureSkipAccessChecksMethods</* kNeedsVerified= */ true>(h_class, image_pointer_size_);
mirror::Class::SetStatus(h_class, ClassStatus::kInitialized, self);
const char* descriptor = Primitive::Descriptor(type);
ObjPtr<mirror::Class> existing = InsertClass(descriptor,
@@ -4093,6 +4114,7 @@
new_class->PopulateEmbeddedVTable(image_pointer_size_);
ImTable* object_imt = java_lang_Object->GetImt(image_pointer_size_);
new_class->SetImt(object_imt, image_pointer_size_);
+ EnsureSkipAccessChecksMethods</* kNeedsVerified= */ true>(new_class, image_pointer_size_);
mirror::Class::SetStatus(new_class, ClassStatus::kInitialized, self);
// don't need to set new_class->SetObjectSize(..)
// because Object::SizeOf delegates to Array::SizeOf
@@ -4123,6 +4145,8 @@
// and remove "interface".
access_flags |= kAccAbstract | kAccFinal;
access_flags &= ~kAccInterface;
+ // Arrays are access-checks-clean and preverified.
+ access_flags |= kAccVerificationAttempted;
new_class->SetAccessFlags(access_flags);
@@ -4357,17 +4381,6 @@
return false;
}
-// Ensures that methods have the kAccSkipAccessChecks bit set. We use the
-// kAccVerificationAttempted bit on the class access flags to determine whether this has been done
-// before.
-static void EnsureSkipAccessChecksMethods(Handle<mirror::Class> klass, PointerSize pointer_size)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (!klass->WasVerificationAttempted()) {
- klass->SetSkipAccessChecksFlagOnAllMethods(pointer_size);
- klass->SetVerificationAttempted();
- }
-}
-
verifier::FailureKind ClassLinker::VerifyClass(
Thread* self, Handle<mirror::Class> klass, verifier::HardFailLogMode log_level) {
{
@@ -4844,6 +4857,7 @@
{
// Lock on klass is released. Lock new class object.
ObjectLock<mirror::Class> initialization_lock(self, klass);
+ EnsureSkipAccessChecksMethods(klass, image_pointer_size_);
mirror::Class::SetStatus(klass, ClassStatus::kInitialized, self);
}
@@ -5594,8 +5608,7 @@
DCHECK(c != nullptr);
if (c->IsInitialized()) {
- EnsureSkipAccessChecksMethods(c, image_pointer_size_);
- self->AssertNoPendingException();
+ DCHECK(c->WasVerificationAttempted()) << c->PrettyClassAndClassLoader();
return true;
}
// SubtypeCheckInfo::Initialized must happen-before any new-instance for that type.
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index d3eab7c..d0a7c9b 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -127,6 +127,12 @@
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::dex_lock_);
+ // Add boot class path dex files that were not included in the boot image.
+ // ClassLinker takes ownership of these dex files.
+ void AddExtraBootDexFiles(Thread* self,
+ std::vector<std::unique_ptr<const DexFile>>&& additional_dex_files)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// Add an image space to the class linker, may fix up classloader fields and dex cache fields.
// The dex files that were newly opened for the space are placed in the out argument
// out_dex_files. Returns true if the operation succeeded.
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index fe45b9e..061c788 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -114,7 +114,8 @@
EXPECT_EQ(0, primitive->GetIfTableCount());
EXPECT_TRUE(primitive->GetIfTable() != nullptr);
EXPECT_EQ(primitive->GetIfTable()->Count(), 0u);
- EXPECT_EQ(kAccPublic | kAccFinal | kAccAbstract, primitive->GetAccessFlags());
+ EXPECT_EQ(kAccPublic | kAccFinal | kAccAbstract | kAccVerificationAttempted,
+ primitive->GetAccessFlags());
}
void AssertObjectClass(ObjPtr<mirror::Class> JavaLangObject)
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index a101976..a20baa0 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -24,7 +24,6 @@
#include "nativehelper/scoped_local_ref.h"
#include "android-base/stringprintf.h"
-#include <unicode/uvernum.h>
#include "art_field-inl.h"
#include "base/file_utils.h"
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
index b46c933..7f697d1 100644
--- a/runtime/dexopt_test.cc
+++ b/runtime/dexopt_test.cc
@@ -116,19 +116,19 @@
ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
EXPECT_EQ(filter, odex_file->GetCompilerFilter());
- std::unique_ptr<ImageHeader> image_header(
- gc::space::ImageSpace::ReadImageHeader(image_location.c_str(),
- kRuntimeISA,
- &error_msg));
- ASSERT_TRUE(image_header != nullptr) << error_msg;
+ std::string boot_image_checksums = gc::space::ImageSpace::GetBootClassPathChecksums(
+ Runtime::Current()->GetBootClassPath(), image_location, kRuntimeISA, &error_msg);
+ ASSERT_FALSE(boot_image_checksums.empty()) << error_msg;
+
const OatHeader& oat_header = odex_file->GetOatHeader();
- uint32_t boot_image_checksum = image_header->GetImageChecksum();
if (CompilerFilter::DependsOnImageChecksum(filter)) {
+ const char* checksums = oat_header.GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey);
+ ASSERT_TRUE(checksums != nullptr);
if (with_alternate_image) {
- EXPECT_NE(boot_image_checksum, oat_header.GetBootImageChecksum());
+ EXPECT_NE(boot_image_checksums, checksums);
} else {
- EXPECT_EQ(boot_image_checksum, oat_header.GetBootImageChecksum());
+ EXPECT_EQ(boot_image_checksums, checksums);
}
}
}
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 3160422..1014c0e 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -37,14 +37,15 @@
mirror::Object* ref,
accounting::ContinuousSpaceBitmap* bitmap) {
if (kEnableGenerationalConcurrentCopyingCollection
- && young_gen_
&& !done_scanning_.load(std::memory_order_acquire)) {
- // Everything in the unevac space should be marked for generational CC except for large objects.
- DCHECK(region_space_bitmap_->Test(ref) || region_space_->IsLargeObject(ref)) << ref << " "
+ // Everything in the unevac space should be marked for young generation CC,
+ // except for large objects.
+ DCHECK(!young_gen_ || region_space_bitmap_->Test(ref) || region_space_->IsLargeObject(ref))
+ << ref << " "
<< ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->PrettyClass();
- // Since the mark bitmap is still filled in from last GC, we can not use that or else the
- // mutator may see references to the from space. Instead, use the baker pointer itself as
- // the mark bit.
+ // Since the mark bitmap is still filled in from last GC (or from marking phase of 2-phase CC,
+ // we can not use that or else the mutator may see references to the from space. Instead, use
+ // the baker pointer itself as the mark bit.
if (ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState())) {
// TODO: We don't actually need to scan this object later, we just need to clear the gray
// bit.
@@ -244,7 +245,7 @@
DCHECK(region_space_->IsInUnevacFromSpace(from_ref));
if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
return true;
- } else if (!(kEnableGenerationalConcurrentCopyingCollection && young_gen_)
+ } else if (!kEnableGenerationalConcurrentCopyingCollection
|| done_scanning_.load(std::memory_order_acquire)) {
// If the card table scanning is not finished yet, then only read-barrier
// state should be checked. Checking the mark bitmap is unreliable as there
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 7736568..861f0d3 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -188,6 +188,11 @@
{
ReaderMutexLock mu(self, *Locks::mutator_lock_);
InitializePhase();
+ // In case of forced evacuation, all regions are evacuated and hence no
+ // need to compute live_bytes.
+ if (kEnableGenerationalConcurrentCopyingCollection && !young_gen_ && !force_evacuate_all_) {
+ MarkingPhase();
+ }
}
if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
// Switch to read barrier mark entrypoints before we gray the objects. This is required in case
@@ -201,7 +206,7 @@
FlipThreadRoots();
{
ReaderMutexLock mu(self, *Locks::mutator_lock_);
- MarkingPhase();
+ CopyingPhase();
}
// Verify no from space refs. This causes a pause.
if (kEnableNoFromSpaceRefsVerification) {
@@ -299,12 +304,22 @@
DCHECK_EQ(space->GetGcRetentionPolicy(), space::kGcRetentionPolicyAlwaysCollect);
space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
}
- // Age all of the cards for the region space so that we know which evac regions to scan.
- Runtime::Current()->GetHeap()->GetCardTable()->ModifyCardsAtomic(
- space->Begin(),
- space->End(),
- AgeCardVisitor(),
- VoidFunctor());
+ if (young_gen_) {
+ // Age all of the cards for the region space so that we know which evac regions to scan.
+ heap_->GetCardTable()->ModifyCardsAtomic(space->Begin(),
+ space->End(),
+ AgeCardVisitor(),
+ VoidFunctor());
+ } else {
+ // In a full-heap GC cycle, the card-table corresponding to region-space and
+ // non-moving space can be cleared, because this cycle only needs to
+ // capture writes during the marking phase of this cycle to catch
+ // objects that skipped marking due to heap mutation. Furthermore,
+ // if the next GC is a young-gen cycle, then it only needs writes to
+ // be captured after the thread-flip of this GC cycle, as that is when
+ // the young-gen for the next GC cycle starts getting populated.
+ heap_->GetCardTable()->ClearCardRange(space->Begin(), space->Limit());
+ }
} else {
if (space == region_space_) {
// It is OK to clear the bitmap with mutators running since the only place it is read is
@@ -381,6 +396,7 @@
if (kEnableGenerationalConcurrentCopyingCollection && !young_gen_) {
region_space_bitmap_->Clear();
}
+ mark_stack_mode_.store(ConcurrentCopying::kMarkStackModeThreadLocal, std::memory_order_relaxed);
// Mark all of the zygote large objects without graying them.
MarkZygoteLargeObjects();
}
@@ -471,7 +487,7 @@
TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
- if (kVerifyNoMissingCardMarks) {
+ if (kVerifyNoMissingCardMarks && cc->young_gen_) {
cc->VerifyNoMissingCardMarks();
}
CHECK_EQ(thread, self);
@@ -485,9 +501,11 @@
}
{
TimingLogger::ScopedTiming split2("(Paused)SetFromSpace", cc->GetTimings());
- // Only change live bytes for full CC.
+ // Only change live bytes for 1-phase full heap CC.
cc->region_space_->SetFromSpace(
- cc->rb_table_, evac_mode, /*clear_live_bytes=*/ !cc->young_gen_);
+ cc->rb_table_,
+ evac_mode,
+ /*clear_live_bytes=*/ !kEnableGenerationalConcurrentCopyingCollection);
}
cc->SwapStacks();
if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
@@ -496,9 +514,7 @@
cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
}
cc->is_marking_ = true;
- cc->mark_stack_mode_.store(ConcurrentCopying::kMarkStackModeThreadLocal,
- std::memory_order_relaxed);
- if (kIsDebugBuild && !cc->young_gen_) {
+ if (kIsDebugBuild && !kEnableGenerationalConcurrentCopyingCollection) {
cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared();
}
if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
@@ -860,13 +876,483 @@
ConcurrentCopying* const collector_;
};
-// Concurrently mark roots that are guarded by read barriers and process the mark stack.
+template <bool kAtomicTestAndSet>
+class ConcurrentCopying::CaptureRootsForMarkingVisitor : public RootVisitor {
+ public:
+ explicit CaptureRootsForMarkingVisitor(ConcurrentCopying* cc, Thread* self)
+ : collector_(cc), self_(self) {}
+
+ void VisitRoots(mirror::Object*** roots,
+ size_t count,
+ const RootInfo& info ATTRIBUTE_UNUSED) override
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ for (size_t i = 0; i < count; ++i) {
+ mirror::Object** root = roots[i];
+ mirror::Object* ref = *root;
+ if (ref != nullptr && !collector_->TestAndSetMarkBitForRef<kAtomicTestAndSet>(ref)) {
+ collector_->PushOntoMarkStack(self_, ref);
+ }
+ }
+ }
+
+ void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+ size_t count,
+ const RootInfo& info ATTRIBUTE_UNUSED) override
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ for (size_t i = 0; i < count; ++i) {
+ mirror::CompressedReference<mirror::Object>* const root = roots[i];
+ if (!root->IsNull()) {
+ mirror::Object* ref = root->AsMirrorPtr();
+ if (!collector_->TestAndSetMarkBitForRef<kAtomicTestAndSet>(ref)) {
+ collector_->PushOntoMarkStack(self_, ref);
+ }
+ }
+ }
+ }
+
+ private:
+ ConcurrentCopying* const collector_;
+ Thread* const self_;
+};
+
+class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
+ public:
+ RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
+ bool disable_weak_ref_access)
+ : concurrent_copying_(concurrent_copying),
+ disable_weak_ref_access_(disable_weak_ref_access) {
+ }
+
+ void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
+ // Note: self is not necessarily equal to thread since thread may be suspended.
+ Thread* const self = Thread::Current();
+ CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
+ << thread->GetState() << " thread " << thread << " self " << self;
+ // Revoke thread local mark stacks.
+ accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
+ if (tl_mark_stack != nullptr) {
+ MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
+ concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
+ thread->SetThreadLocalMarkStack(nullptr);
+ }
+ // Disable weak ref access.
+ if (disable_weak_ref_access_) {
+ thread->SetWeakRefAccessEnabled(false);
+ }
+ // If thread is a running mutator, then act on behalf of the garbage collector.
+ // See the code in ThreadList::RunCheckpoint.
+ concurrent_copying_->GetBarrier().Pass(self);
+ }
+
+ protected:
+ ConcurrentCopying* const concurrent_copying_;
+
+ private:
+ const bool disable_weak_ref_access_;
+};
+
+class ConcurrentCopying::CaptureThreadRootsForMarkingAndCheckpoint :
+ public RevokeThreadLocalMarkStackCheckpoint {
+ public:
+ explicit CaptureThreadRootsForMarkingAndCheckpoint(ConcurrentCopying* cc) :
+ RevokeThreadLocalMarkStackCheckpoint(cc, /* disable_weak_ref_access */ false) {}
+
+ void Run(Thread* thread) override
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ Thread* const self = Thread::Current();
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ // We can use the non-CAS VisitRoots functions below because we update thread-local GC roots
+ // only.
+ CaptureRootsForMarkingVisitor</*kAtomicTestAndSet*/ true> visitor(concurrent_copying_, self);
+ thread->VisitRoots(&visitor, kVisitRootFlagAllRoots);
+ // Barrier handling is done in the base class' Run() below.
+ RevokeThreadLocalMarkStackCheckpoint::Run(thread);
+ }
+};
+
+void ConcurrentCopying::CaptureThreadRootsForMarking() {
+ TimingLogger::ScopedTiming split("CaptureThreadRootsForMarking", GetTimings());
+ if (kVerboseMode) {
+ LOG(INFO) << "time=" << region_space_->Time();
+ region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
+ }
+ Thread* const self = Thread::Current();
+ CaptureThreadRootsForMarkingAndCheckpoint check_point(this);
+ ThreadList* thread_list = Runtime::Current()->GetThreadList();
+ gc_barrier_->Init(self, 0);
+ size_t barrier_count = thread_list->RunCheckpoint(&check_point, /* callback */ nullptr);
+ // If there are no threads to wait which implys that all the checkpoint functions are finished,
+ // then no need to release the mutator lock.
+ if (barrier_count == 0) {
+ return;
+ }
+ Locks::mutator_lock_->SharedUnlock(self);
+ {
+ ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
+ gc_barrier_->Increment(self, barrier_count);
+ }
+ Locks::mutator_lock_->SharedLock(self);
+ if (kVerboseMode) {
+ LOG(INFO) << "time=" << region_space_->Time();
+ region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
+ LOG(INFO) << "GC end of CaptureThreadRootsForMarking";
+ }
+}
+
+// Used to scan ref fields of an object.
+template <bool kHandleInterRegionRefs>
+class ConcurrentCopying::ComputeLiveBytesAndMarkRefFieldsVisitor {
+ public:
+ explicit ComputeLiveBytesAndMarkRefFieldsVisitor(ConcurrentCopying* collector,
+ size_t obj_region_idx)
+ : collector_(collector),
+ obj_region_idx_(obj_region_idx),
+ contains_inter_region_idx_(false) {}
+
+ void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
+ ALWAYS_INLINE
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
+ DCHECK_EQ(collector_->RegionSpace()->RegionIdxForRef(obj), obj_region_idx_);
+ DCHECK(kHandleInterRegionRefs || collector_->immune_spaces_.ContainsObject(obj));
+ CheckReference(obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset));
+ }
+
+ void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
+ REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
+ DCHECK(klass->IsTypeOfReferenceClass());
+ // If the referent is not null, then we must re-visit the object during
+ // copying phase to enqueue it for delayed processing and setting
+ // read-barrier state to gray to ensure that call to GetReferent() triggers
+ // the read-barrier. We use same data structure that is used to remember
+ // objects with inter-region refs for this purpose too.
+ if (kHandleInterRegionRefs
+ && !contains_inter_region_idx_
+ && ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr) {
+ contains_inter_region_idx_ = true;
+ }
+ }
+
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ ALWAYS_INLINE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ ALWAYS_INLINE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ CheckReference(root->AsMirrorPtr());
+ }
+
+ bool ContainsInterRegionRefs() const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
+ return contains_inter_region_idx_;
+ }
+
+ private:
+ void CheckReference(mirror::Object* ref) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (ref == nullptr) {
+ // Nothing to do.
+ return;
+ }
+ if (!collector_->TestAndSetMarkBitForRef(ref)) {
+ collector_->PushOntoLocalMarkStack(ref);
+ }
+ if (kHandleInterRegionRefs && !contains_inter_region_idx_) {
+ size_t ref_region_idx = collector_->RegionSpace()->RegionIdxForRef(ref);
+ // If a region-space object refers to an outside object, we will have a
+ // mismatch of region idx, but the object need not be re-visited in
+ // copying phase.
+ if (ref_region_idx != static_cast<size_t>(-1) && obj_region_idx_ != ref_region_idx) {
+ contains_inter_region_idx_ = true;
+ }
+ }
+ }
+
+ ConcurrentCopying* const collector_;
+ const size_t obj_region_idx_;
+ mutable bool contains_inter_region_idx_;
+};
+
+void ConcurrentCopying::AddLiveBytesAndScanRef(mirror::Object* ref) {
+ DCHECK(ref != nullptr);
+ DCHECK(!immune_spaces_.ContainsObject(ref));
+ DCHECK(TestMarkBitmapForRef(ref));
+ size_t obj_region_idx = static_cast<size_t>(-1);
+ if (LIKELY(region_space_->HasAddress(ref))) {
+ obj_region_idx = region_space_->RegionIdxForRefUnchecked(ref);
+ // Add live bytes to the corresponding region
+ if (!region_space_->IsRegionNewlyAllocated(obj_region_idx)) {
+ // Newly Allocated regions are always chosen for evacuation. So no need
+ // to update live_bytes_.
+ size_t obj_size = ref->SizeOf<kDefaultVerifyFlags>();
+ size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
+ region_space_->AddLiveBytes(ref, alloc_size);
+ }
+ }
+ ComputeLiveBytesAndMarkRefFieldsVisitor</*kHandleInterRegionRefs*/ true>
+ visitor(this, obj_region_idx);
+ ref->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+ visitor, visitor);
+ // Mark the corresponding card dirty if the object contains any
+ // inter-region reference.
+ if (visitor.ContainsInterRegionRefs()) {
+ heap_->GetCardTable()->MarkCard(ref);
+ }
+}
+
+template <bool kAtomic>
+bool ConcurrentCopying::TestAndSetMarkBitForRef(mirror::Object* ref) {
+ accounting::ContinuousSpaceBitmap* bitmap = nullptr;
+ accounting::LargeObjectBitmap* los_bitmap = nullptr;
+ if (LIKELY(region_space_->HasAddress(ref))) {
+ bitmap = region_space_bitmap_;
+ } else if (heap_->GetNonMovingSpace()->HasAddress(ref)) {
+ bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
+ } else if (immune_spaces_.ContainsObject(ref)) {
+ // References to immune space objects are always live.
+ DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref));
+ return true;
+ } else {
+ // Should be a large object. Must be page aligned and the LOS must exist.
+ if (kIsDebugBuild
+ && (!IsAligned<kPageSize>(ref) || heap_->GetLargeObjectsSpace() == nullptr)) {
+ // It must be heap corruption. Remove memory protection and dump data.
+ region_space_->Unprotect();
+ heap_->GetVerification()->LogHeapCorruption(/* obj */ nullptr,
+ MemberOffset(0),
+ ref,
+ /* fatal */ true);
+ }
+ los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
+ }
+ if (kAtomic) {
+ return (bitmap != nullptr) ? bitmap->AtomicTestAndSet(ref) : los_bitmap->AtomicTestAndSet(ref);
+ } else {
+ return (bitmap != nullptr) ? bitmap->Set(ref) : los_bitmap->Set(ref);
+ }
+}
+
+bool ConcurrentCopying::TestMarkBitmapForRef(mirror::Object* ref) {
+ if (LIKELY(region_space_->HasAddress(ref))) {
+ return region_space_bitmap_->Test(ref);
+ } else if (heap_->GetNonMovingSpace()->HasAddress(ref)) {
+ return heap_->GetNonMovingSpace()->GetMarkBitmap()->Test(ref);
+ } else if (immune_spaces_.ContainsObject(ref)) {
+ // References to immune space objects are always live.
+ DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref));
+ return true;
+ } else {
+ // Should be a large object. Must be page aligned and the LOS must exist.
+ if (kIsDebugBuild
+ && (!IsAligned<kPageSize>(ref) || heap_->GetLargeObjectsSpace() == nullptr)) {
+ // It must be heap corruption. Remove memory protection and dump data.
+ region_space_->Unprotect();
+ heap_->GetVerification()->LogHeapCorruption(/* obj */ nullptr,
+ MemberOffset(0),
+ ref,
+ /* fatal */ true);
+ }
+ return heap_->GetLargeObjectsSpace()->GetMarkBitmap()->Test(ref);
+ }
+}
+
+void ConcurrentCopying::PushOntoLocalMarkStack(mirror::Object* ref) {
+ if (kIsDebugBuild) {
+ Thread *self = Thread::Current();
+ DCHECK_EQ(thread_running_gc_, self);
+ DCHECK(self->GetThreadLocalMarkStack() == nullptr);
+ }
+ DCHECK_EQ(mark_stack_mode_.load(std::memory_order_relaxed), kMarkStackModeThreadLocal);
+ gc_mark_stack_->PushBack(ref);
+}
+
+void ConcurrentCopying::ProcessMarkStackForMarkingAndComputeLiveBytes() {
+ // Process thread-local mark stack containing thread roots
+ ProcessThreadLocalMarkStacks(/* disable_weak_ref_access */ false,
+ /* checkpoint_callback */ nullptr,
+ [this] (mirror::Object* ref)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ AddLiveBytesAndScanRef(ref);
+ });
+
+ while (!gc_mark_stack_->IsEmpty()) {
+ mirror::Object* ref = gc_mark_stack_->PopBack();
+ AddLiveBytesAndScanRef(ref);
+ }
+}
+
+class ConcurrentCopying::ImmuneSpaceCaptureRefsVisitor {
+ public:
+ explicit ImmuneSpaceCaptureRefsVisitor(ConcurrentCopying* cc) : collector_(cc) {}
+
+ ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
+ ComputeLiveBytesAndMarkRefFieldsVisitor</*kHandleInterRegionRefs*/ false>
+ visitor(collector_, /*obj_region_idx*/ static_cast<size_t>(-1));
+ obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+ visitor, visitor);
+ }
+
+ static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
+ reinterpret_cast<ImmuneSpaceScanObjVisitor*>(arg)->operator()(obj);
+ }
+
+ private:
+ ConcurrentCopying* const collector_;
+};
+
+/* Invariants for two-phase CC
+ * ===========================
+ * A) Definitions
+ * ---------------
+ * 1) Black: marked in bitmap, rb_state is non-gray, and not in mark stack
+ * 2) Black-clean: marked in bitmap, and corresponding card is clean/aged
+ * 3) Black-dirty: marked in bitmap, and corresponding card is dirty
+ * 4) Gray: marked in bitmap, and exists in mark stack
+ * 5) Gray-dirty: marked in bitmap, rb_state is gray, corresponding card is
+ * dirty, and exists in mark stack
+ * 6) White: unmarked in bitmap, rb_state is non-gray, and not in mark stack
+ *
+ * B) Before marking phase
+ * -----------------------
+ * 1) All objects are white
+ * 2) Cards are either clean or aged (cannot be asserted without a STW pause)
+ * 3) Mark bitmap is cleared
+ * 4) Mark stack is empty
+ *
+ * C) During marking phase
+ * ------------------------
+ * 1) If a black object holds an inter-region or white reference, then its
+ * corresponding card is dirty. In other words, it changes from being
+ * black-clean to black-dirty
+ * 2) No black-clean object points to a white object
+ *
+ * D) After marking phase
+ * -----------------------
+ * 1) There are no gray objects
+ * 2) All newly allocated objects are in from space
+ * 3) No white object can be reachable, directly or otherwise, from a
+ * black-clean object
+ *
+ * E) During copying phase
+ * ------------------------
+ * 1) Mutators cannot observe white and black-dirty objects
+ * 2) New allocations are in to-space (newly allocated regions are part of to-space)
+ * 3) An object in mark stack must have its rb_state = Gray
+ *
+ * F) During card table scan
+ * --------------------------
+ * 1) Referents corresponding to root references are gray or in to-space
+ * 2) Every path from an object that is read or written by a mutator during
+ * this period to a dirty black object goes through some gray object.
+ * Mutators preserve this by graying black objects as needed during this
+ * period. Ensures that a mutator never encounters a black dirty object.
+ *
+ * G) After card table scan
+ * ------------------------
+ * 1) There are no black-dirty objects
+ * 2) Referents corresponding to root references are gray, black-clean or in
+ * to-space
+ *
+ * H) After copying phase
+ * -----------------------
+ * 1) Mark stack is empty
+ * 2) No references into evacuated from-space
+ * 3) No reference to an object which is unmarked and is also not in newly
+ * allocated region. In other words, no reference to white objects.
+*/
+
void ConcurrentCopying::MarkingPhase() {
TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
if (kVerboseMode) {
LOG(INFO) << "GC MarkingPhase";
}
+ accounting::CardTable* const card_table = heap_->GetCardTable();
+ Thread* const self = Thread::Current();
+ // Clear live_bytes_ of every non-free region, except the ones that are newly
+ // allocated.
+ region_space_->SetAllRegionLiveBytesZero();
+ if (kIsDebugBuild) {
+ region_space_->AssertAllRegionLiveBytesZeroOrCleared();
+ }
+ // Scan immune spaces
+ {
+ TimingLogger::ScopedTiming split2("ScanImmuneSpaces", GetTimings());
+ for (auto& space : immune_spaces_.GetSpaces()) {
+ DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
+ accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+ ImmuneSpaceCaptureRefsVisitor visitor(this);
+ if (table != nullptr) {
+ table->VisitObjects(ImmuneSpaceCaptureRefsVisitor::Callback, &visitor);
+ } else {
+ WriterMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ card_table->Scan<false>(
+ live_bitmap,
+ space->Begin(),
+ space->Limit(),
+ visitor,
+ accounting::CardTable::kCardDirty - 1);
+ }
+ }
+ }
+ // Scan runtime roots
+ {
+ TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
+ CaptureRootsForMarkingVisitor visitor(this, self);
+ Runtime::Current()->VisitConcurrentRoots(&visitor, kVisitRootFlagAllRoots);
+ }
+ {
+ // TODO: don't visit the transaction roots if it's not active.
+ TimingLogger::ScopedTiming split2("VisitNonThreadRoots", GetTimings());
+ CaptureRootsForMarkingVisitor visitor(this, self);
+ Runtime::Current()->VisitNonThreadRoots(&visitor);
+ }
+ // Capture thread roots
+ CaptureThreadRootsForMarking();
+ // Process mark stack
+ ProcessMarkStackForMarkingAndComputeLiveBytes();
+
+ // Age the cards.
+ for (space::ContinuousSpace* space : GetHeap()->GetContinuousSpaces()) {
+ if (space->IsImageSpace() || space->IsZygoteSpace()) {
+ // Image and zygote spaces are already handled since we gray the objects in the pause.
+ continue;
+ }
+ card_table->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), VoidFunctor());
+ }
+
+ if (kVerboseMode) {
+ LOG(INFO) << "GC end of MarkingPhase";
+ }
+}
+
+template <bool kNoUnEvac>
+void ConcurrentCopying::ScanDirtyObject(mirror::Object* obj) {
+ Scan<kNoUnEvac>(obj);
+ // Set the read-barrier state of a reference-type object to gray if its
+ // referent is not marked yet. This is to ensure that if GetReferent() is
+ // called, it triggers the read-barrier to process the referent before use.
+ if (UNLIKELY((obj->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass()))) {
+ mirror::Object* referent =
+ obj->AsReference<kVerifyNone, kWithoutReadBarrier>()->GetReferent<kWithoutReadBarrier>();
+ if (referent != nullptr && !IsInToSpace(referent)) {
+ obj->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState());
+ }
+ }
+}
+
+// Concurrently mark roots that are guarded by read barriers and process the mark stack.
+void ConcurrentCopying::CopyingPhase() {
+ TimingLogger::ScopedTiming split("CopyingPhase", GetTimings());
+ if (kVerboseMode) {
+ LOG(INFO) << "GC CopyingPhase";
+ }
Thread* self = Thread::Current();
+ accounting::CardTable* const card_table = heap_->GetCardTable();
if (kIsDebugBuild) {
MutexLock mu(self, *Locks::thread_list_lock_);
CHECK(weak_ref_access_enabled_);
@@ -879,7 +1365,7 @@
if (kUseBakerReadBarrier) {
gc_grays_immune_objects_ = false;
}
- if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+ if (kEnableGenerationalConcurrentCopyingCollection) {
if (kVerboseMode) {
LOG(INFO) << "GC ScanCardsForSpace";
}
@@ -897,34 +1383,45 @@
continue;
}
// Scan all of the objects on dirty cards in unevac from space, and non moving space. These
- // are from previous GCs and may reference things in the from space.
+ // are from previous GCs (or from marking phase of 2-phase full GC) and may reference things
+ // in the from space.
//
// Note that we do not need to process the large-object space (the only discontinuous space)
// as it contains only large string objects and large primitive array objects, that have no
// reference to other objects, except their class. There is no need to scan these large
// objects, as the String class and the primitive array classes are expected to never move
- // during a minor (young-generation) collection:
+ // during a collection:
// - In the case where we run with a boot image, these classes are part of the image space,
// which is an immune space.
// - In the case where we run without a boot image, these classes are allocated in the
// non-moving space (see art::ClassLinker::InitWithoutImage).
- Runtime::Current()->GetHeap()->GetCardTable()->Scan<false>(
+ card_table->Scan<false>(
space->GetMarkBitmap(),
space->Begin(),
space->End(),
[this, space](mirror::Object* obj)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
- // Don't push or gray unevac refs.
- if (kIsDebugBuild && space == region_space_) {
- // We may get unevac large objects.
- if (!region_space_->IsInUnevacFromSpace(obj)) {
- CHECK(region_space_bitmap_->Test(obj));
- region_space_->DumpRegionForObject(LOG_STREAM(FATAL_WITHOUT_ABORT), obj);
- LOG(FATAL) << "Scanning " << obj << " not in unevac space";
+ // TODO: This code may be refactored to avoid scanning object while
+ // done_scanning_ is false by setting rb_state to gray, and pushing the
+ // object on mark stack. However, it will also require clearing the
+ // corresponding mark-bit and, for region space objects,
+ // decrementing the object's size from the corresponding region's
+ // live_bytes.
+ if (young_gen_) {
+ // Don't push or gray unevac refs.
+ if (kIsDebugBuild && space == region_space_) {
+ // We may get unevac large objects.
+ if (!region_space_->IsInUnevacFromSpace(obj)) {
+ CHECK(region_space_bitmap_->Test(obj));
+ region_space_->DumpRegionForObject(LOG_STREAM(FATAL_WITHOUT_ABORT), obj);
+ LOG(FATAL) << "Scanning " << obj << " not in unevac space";
+ }
}
+ ScanDirtyObject</*kNoUnEvac*/ true>(obj);
+ } else if (space != region_space_ || region_space_->IsInUnevacFromSpace(obj)) {
+ ScanDirtyObject</*kNoUnEvac*/ false>(obj);
}
- Scan<true>(obj);
},
accounting::CardTable::kCardDirty - 1);
}
@@ -947,10 +1444,13 @@
if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects && table != nullptr) {
table->VisitObjects(ImmuneSpaceScanObjVisitor::Callback, &visitor);
} else {
- // TODO: Scan only the aged cards.
- live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
- reinterpret_cast<uintptr_t>(space->Limit()),
- visitor);
+ WriterMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ card_table->Scan<false>(
+ live_bitmap,
+ space->Begin(),
+ space->Limit(),
+ visitor,
+ accounting::CardTable::kCardDirty - 1);
}
}
}
@@ -1059,7 +1559,7 @@
CHECK(weak_ref_access_enabled_);
}
if (kVerboseMode) {
- LOG(INFO) << "GC end of MarkingPhase";
+ LOG(INFO) << "GC end of CopyingPhase";
}
}
@@ -1419,40 +1919,6 @@
ConcurrentCopying* const collector_;
};
-class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
- public:
- RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
- bool disable_weak_ref_access)
- : concurrent_copying_(concurrent_copying),
- disable_weak_ref_access_(disable_weak_ref_access) {
- }
-
- void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
- // Note: self is not necessarily equal to thread since thread may be suspended.
- Thread* self = Thread::Current();
- CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
- << thread->GetState() << " thread " << thread << " self " << self;
- // Revoke thread local mark stacks.
- accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
- if (tl_mark_stack != nullptr) {
- MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
- concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
- thread->SetThreadLocalMarkStack(nullptr);
- }
- // Disable weak ref access.
- if (disable_weak_ref_access_) {
- thread->SetWeakRefAccessEnabled(false);
- }
- // If thread is a running mutator, then act on behalf of the garbage collector.
- // See the code in ThreadList::RunCheckpoint.
- concurrent_copying_->GetBarrier().Pass(self);
- }
-
- private:
- ConcurrentCopying* const concurrent_copying_;
- const bool disable_weak_ref_access_;
-};
-
void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access,
Closure* checkpoint_callback) {
Thread* self = Thread::Current();
@@ -1510,7 +1976,11 @@
if (mark_stack_mode == kMarkStackModeThreadLocal) {
// Process the thread-local mark stacks and the GC mark stack.
count += ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ false,
- /* checkpoint_callback= */ nullptr);
+ /* checkpoint_callback= */ nullptr,
+ [this] (mirror::Object* ref)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ProcessMarkStackRef(ref);
+ });
while (!gc_mark_stack_->IsEmpty()) {
mirror::Object* to_ref = gc_mark_stack_->PopBack();
ProcessMarkStackRef(to_ref);
@@ -1566,8 +2036,10 @@
return count == 0;
}
+template <typename Processor>
size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access,
- Closure* checkpoint_callback) {
+ Closure* checkpoint_callback,
+ const Processor& processor) {
// Run a checkpoint to collect all thread local mark stacks and iterate over them all.
RevokeThreadLocalMarkStacks(disable_weak_ref_access, checkpoint_callback);
size_t count = 0;
@@ -1581,7 +2053,7 @@
for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) {
for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) {
mirror::Object* to_ref = p->AsMirrorPtr();
- ProcessMarkStackRef(to_ref);
+ processor(to_ref);
++count;
}
{
@@ -1632,6 +2104,12 @@
perform_scan = true;
// Only add to the live bytes if the object was not already marked and we are not the young
// GC.
+ // Why add live bytes even after 2-phase GC?
+ // We need to ensure that if there is a unevac region with any live
+ // objects, then its live_bytes must be non-zero. Otherwise,
+ // ClearFromSpace() will clear the region. Considering, that we may skip
+ // live objects during marking phase of 2-phase GC, we have to take care
+ // of such objects here.
add_to_live_bytes = true;
}
break;
@@ -1773,7 +2251,12 @@
DisableWeakRefAccessCallback dwrac(this);
// Process the thread local mark stacks one last time after switching to the shared mark stack
// mode and disable weak ref accesses.
- ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ true, &dwrac);
+ ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ true,
+ &dwrac,
+ [this] (mirror::Object* ref)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ProcessMarkStackRef(ref);
+ });
if (kVerboseMode) {
LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
}
@@ -2039,7 +2522,7 @@
uint64_t cleared_objects;
{
TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
- region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects);
+ region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects, /*clear_bitmap*/ !young_gen_);
// `cleared_bytes` and `cleared_objects` may be greater than the from space equivalents since
// RegionSpace::ClearFromSpace may clear empty unevac regions.
CHECK_GE(cleared_bytes, from_bytes);
@@ -2348,7 +2831,7 @@
DCHECK(!immune_spaces_.ContainsObject(from_ref)) << "ref=" << from_ref;
if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
return true;
- } else if (!(kEnableGenerationalConcurrentCopyingCollection && young_gen_)
+ } else if (!kEnableGenerationalConcurrentCopyingCollection
|| done_scanning_.load(std::memory_order_acquire)) {
// Read the comment in IsMarkedInUnevacFromSpace()
accounting::ContinuousSpaceBitmap* mark_bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
@@ -2939,7 +3422,7 @@
los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
DCHECK(los_bitmap->HasAddress(ref));
}
- if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+ if (kEnableGenerationalConcurrentCopyingCollection) {
// The sticky-bit CC collector is only compatible with Baker-style read barriers.
DCHECK(kUseBakerReadBarrier);
// Not done scanning, use AtomicSetReadBarrierPointer.
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 237e070..4442ad5 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -79,6 +79,8 @@
void InitializePhase() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !immune_gray_stack_lock_);
void MarkingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_);
+ void CopyingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
void ReclaimPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
void FinishPhase() REQUIRES(!mark_stack_lock_,
@@ -161,6 +163,13 @@
template <bool kNoUnEvac>
void Scan(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
+ // Scan the reference fields of object 'obj' in the dirty cards during
+ // card-table scan. In addition to visiting the references, it also sets the
+ // read-barrier state to gray for Reference-type objects to ensure that
+ // GetReferent() called on these objects calls the read-barrier on the referent.
+ template <bool kNoUnEvac>
+ void ScanDirtyObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_);
// Process a field.
template <bool kNoUnEvac>
void Process(mirror::Object* obj, MemberOffset offset)
@@ -198,7 +207,10 @@
void VerifyNoMissingCardMarks()
REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
- size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
+ template <typename Processor>
+ size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access,
+ Closure* checkpoint_callback,
+ const Processor& processor)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -295,6 +307,15 @@
// Set the read barrier mark entrypoints to non-null.
void ActivateReadBarrierEntrypoints();
+ void CaptureThreadRootsForMarking() REQUIRES_SHARED(Locks::mutator_lock_);
+ void AddLiveBytesAndScanRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
+ bool TestMarkBitmapForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
+ template <bool kAtomic = false>
+ bool TestAndSetMarkBitForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
+ void PushOntoLocalMarkStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
+ void ProcessMarkStackForMarkingAndComputeLiveBytes() REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_);
+
space::RegionSpace* region_space_; // The underlying region space.
std::unique_ptr<Barrier> gc_barrier_;
std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
@@ -375,7 +396,7 @@
// Generational "sticky", only trace through dirty objects in region space.
const bool young_gen_;
// If true, the GC thread is done scanning marked objects on dirty and aged
- // card (see ConcurrentCopying::MarkingPhase).
+ // card (see ConcurrentCopying::CopyingPhase).
Atomic<bool> done_scanning_;
// The skipped blocks are memory blocks/chucks that were copies of
@@ -441,6 +462,10 @@
class VerifyNoFromSpaceRefsFieldVisitor;
class VerifyNoFromSpaceRefsVisitor;
class VerifyNoMissingCardMarkVisitor;
+ class ImmuneSpaceCaptureRefsVisitor;
+ template <bool kAtomicTestAndSet = false> class CaptureRootsForMarkingVisitor;
+ class CaptureThreadRootsForMarkingAndCheckpoint;
+ template <bool kHandleInterRegionRefs> class ComputeLiveBytesAndMarkRefFieldsVisitor;
DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
};
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 9e1ba35..1c09b5c 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -214,7 +214,7 @@
if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent()) {
// New_num_bytes_allocated is zero if we didn't update num_bytes_allocated_.
// That's fine.
- CheckConcurrentGC(self, new_num_bytes_allocated, &obj);
+ CheckConcurrentGCForJava(self, new_num_bytes_allocated, &obj);
}
VerifyObject(obj);
self->VerifyStack();
@@ -254,8 +254,8 @@
size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated) {
- if (allocator_type != kAllocatorTypeTLAB &&
- allocator_type != kAllocatorTypeRegionTLAB &&
+ if (allocator_type != kAllocatorTypeRegionTLAB &&
+ allocator_type != kAllocatorTypeTLAB &&
allocator_type != kAllocatorTypeRosAlloc &&
UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, alloc_size, kGrow))) {
return nullptr;
@@ -396,30 +396,46 @@
inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
size_t alloc_size,
bool grow) {
- size_t new_footprint = num_bytes_allocated_.load(std::memory_order_relaxed) + alloc_size;
- if (UNLIKELY(new_footprint > max_allowed_footprint_)) {
- if (UNLIKELY(new_footprint > growth_limit_)) {
+ size_t old_target = target_footprint_.load(std::memory_order_relaxed);
+ while (true) {
+ size_t old_allocated = num_bytes_allocated_.load(std::memory_order_relaxed);
+ size_t new_footprint = old_allocated + alloc_size;
+ // Tests against heap limits are inherently approximate, since multiple allocations may
+ // race, and this is not atomic with the allocation.
+ if (UNLIKELY(new_footprint <= old_target)) {
+ return false;
+ } else if (UNLIKELY(new_footprint > growth_limit_)) {
return true;
}
- if (!AllocatorMayHaveConcurrentGC(allocator_type) || !IsGcConcurrent()) {
- if (!grow) {
+ // We are between target_footprint_ and growth_limit_ .
+ if (AllocatorMayHaveConcurrentGC(allocator_type) && IsGcConcurrent()) {
+ return false;
+ } else {
+ if (grow) {
+ if (target_footprint_.compare_exchange_weak(/*inout ref*/old_target, new_footprint,
+ std::memory_order_relaxed)) {
+ VlogHeapGrowth(old_target, new_footprint, alloc_size);
+ return false;
+ } // else try again.
+ } else {
return true;
}
- // TODO: Grow for allocation is racy, fix it.
- VlogHeapGrowth(max_allowed_footprint_, new_footprint, alloc_size);
- max_allowed_footprint_ = new_footprint;
}
}
- return false;
}
-// Request a GC if new_num_bytes_allocated is sufficiently large.
-// A call with new_num_bytes_allocated == 0 is a fast no-op.
-inline void Heap::CheckConcurrentGC(Thread* self,
+inline bool Heap::ShouldConcurrentGCForJava(size_t new_num_bytes_allocated) {
+ // For a Java allocation, we only check whether the number of Java allocated bytes excceeds a
+ // threshold. By not considering native allocation here, we (a) ensure that Java heap bounds are
+ // maintained, and (b) reduce the cost of the check here.
+ return new_num_bytes_allocated >= concurrent_start_bytes_;
+}
+
+inline void Heap::CheckConcurrentGCForJava(Thread* self,
size_t new_num_bytes_allocated,
ObjPtr<mirror::Object>* obj) {
- if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
- RequestConcurrentGCAndSaveObject(self, false, obj);
+ if (UNLIKELY(ShouldConcurrentGCForJava(new_num_bytes_allocated))) {
+ RequestConcurrentGCAndSaveObject(self, false /* force_full */, obj);
}
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index dc79731..d47aca9 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -17,6 +17,9 @@
#include "heap.h"
#include <limits>
+#if defined(__BIONIC__) || defined(__GLIBC__)
+#include <malloc.h> // For mallinfo()
+#endif
#include <memory>
#include <vector>
@@ -187,7 +190,7 @@
bool low_memory_mode,
size_t long_pause_log_threshold,
size_t long_gc_log_threshold,
- bool ignore_max_footprint,
+ bool ignore_target_footprint,
bool use_tlab,
bool verify_pre_gc_heap,
bool verify_pre_sweeping_heap,
@@ -218,7 +221,7 @@
post_gc_last_process_cpu_time_ns_(process_cpu_start_time_ns_),
pre_gc_weighted_allocated_bytes_(0.0),
post_gc_weighted_allocated_bytes_(0.0),
- ignore_max_footprint_(ignore_max_footprint),
+ ignore_target_footprint_(ignore_target_footprint),
zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
zygote_space_(nullptr),
large_object_threshold_(large_object_threshold),
@@ -231,13 +234,14 @@
next_gc_type_(collector::kGcTypePartial),
capacity_(capacity),
growth_limit_(growth_limit),
- max_allowed_footprint_(initial_size),
+ target_footprint_(initial_size),
concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
total_bytes_freed_ever_(0),
total_objects_freed_ever_(0),
num_bytes_allocated_(0),
- new_native_bytes_allocated_(0),
+ native_bytes_registered_(0),
old_native_bytes_allocated_(0),
+ native_objects_notified_(0),
num_bytes_freed_revoke_(0),
verify_missing_card_marks_(false),
verify_system_weaks_(false),
@@ -616,11 +620,11 @@
task_processor_.reset(new TaskProcessor());
reference_processor_.reset(new ReferenceProcessor());
pending_task_lock_ = new Mutex("Pending task lock");
- if (ignore_max_footprint_) {
+ if (ignore_target_footprint_) {
SetIdealFootprint(std::numeric_limits<size_t>::max());
concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
}
- CHECK_NE(max_allowed_footprint_, 0U);
+ CHECK_NE(target_footprint_.load(std::memory_order_relaxed), 0U);
// Create our garbage collectors.
for (size_t i = 0; i < 2; ++i) {
const bool concurrent = i != 0;
@@ -1158,10 +1162,11 @@
rosalloc_space_->DumpStats(os);
}
- os << "Registered native bytes allocated: "
- << (old_native_bytes_allocated_.load(std::memory_order_relaxed) +
- new_native_bytes_allocated_.load(std::memory_order_relaxed))
- << "\n";
+ os << "Native bytes total: " << GetNativeBytes()
+ << " registered: " << native_bytes_registered_.load(std::memory_order_relaxed) << "\n";
+
+ os << "Total native bytes at last GC: "
+ << old_native_bytes_allocated_.load(std::memory_order_relaxed) << "\n";
BaseMutex::DumpAll(os);
}
@@ -1337,7 +1342,8 @@
size_t total_bytes_free = GetFreeMemory();
oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
<< " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM,"
- << " max allowed footprint " << max_allowed_footprint_ << ", growth limit "
+ << " target footprint " << target_footprint_.load(std::memory_order_relaxed)
+ << ", growth limit "
<< growth_limit_;
// If the allocation failed due to fragmentation, print out the largest continuous allocation.
if (total_bytes_free >= byte_count) {
@@ -1872,7 +1878,7 @@
}
void Heap::SetTargetHeapUtilization(float target) {
- DCHECK_GT(target, 0.0f); // asserted in Java code
+ DCHECK_GT(target, 0.1f); // asserted in Java code
DCHECK_LT(target, 1.0f);
target_utilization_ = target;
}
@@ -2286,8 +2292,8 @@
}
if (IsGcConcurrent()) {
concurrent_start_bytes_ =
- std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) -
- kMinConcurrentRemainingBytes;
+ UnsignedDifference(target_footprint_.load(std::memory_order_relaxed),
+ kMinConcurrentRemainingBytes);
} else {
concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
}
@@ -2616,6 +2622,39 @@
ATRACE_INT("Heap size (KB)", heap_size / KB);
}
+size_t Heap::GetNativeBytes() {
+ size_t malloc_bytes;
+#if defined(__BIONIC__) || defined(__GLIBC__)
+ size_t mmapped_bytes;
+ struct mallinfo mi = mallinfo();
+ // In spite of the documentation, the jemalloc version of this call seems to do what we want,
+ // and it is thread-safe.
+ if (sizeof(size_t) > sizeof(mi.uordblks) && sizeof(size_t) > sizeof(mi.hblkhd)) {
+ // Shouldn't happen, but glibc declares uordblks as int.
+ // Avoiding sign extension gets us correct behavior for another 2 GB.
+ malloc_bytes = (unsigned int)mi.uordblks;
+ mmapped_bytes = (unsigned int)mi.hblkhd;
+ } else {
+ malloc_bytes = mi.uordblks;
+ mmapped_bytes = mi.hblkhd;
+ }
+ // From the spec, we clearly have mmapped_bytes <= malloc_bytes. Reality is sometimes
+ // dramatically different. (b/119580449) If so, fudge it.
+ if (mmapped_bytes > malloc_bytes) {
+ malloc_bytes = mmapped_bytes;
+ }
+#else
+ // We should hit this case only in contexts in which GC triggering is not critical. Effectively
+ // disable GC triggering based on malloc().
+ malloc_bytes = 1000;
+#endif
+ return malloc_bytes + native_bytes_registered_.load(std::memory_order_relaxed);
+ // An alternative would be to get RSS from /proc/self/statm. Empirically, that's no
+ // more expensive, and it would allow us to count memory allocated by means other than malloc.
+ // However it would change as pages are unmapped and remapped due to memory pressure, among
+ // other things. It seems risky to trigger GCs as a result of such changes.
+}
+
collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
GcCause gc_cause,
bool clear_soft_references) {
@@ -2666,16 +2705,7 @@
++runtime->GetStats()->gc_for_alloc_count;
++self->GetStats()->gc_for_alloc_count;
}
- const uint64_t bytes_allocated_before_gc = GetBytesAllocated();
-
- if (gc_type == NonStickyGcType()) {
- // Move all bytes from new_native_bytes_allocated_ to
- // old_native_bytes_allocated_ now that GC has been triggered, resetting
- // new_native_bytes_allocated_ to zero in the process.
- old_native_bytes_allocated_.fetch_add(
- new_native_bytes_allocated_.exchange(0, std::memory_order_relaxed),
- std::memory_order_relaxed);
- }
+ const size_t bytes_allocated_before_gc = GetBytesAllocated();
DCHECK_LT(gc_type, collector::kGcTypeMax);
DCHECK_NE(gc_type, collector::kGcTypeNone);
@@ -2747,6 +2777,9 @@
FinishGC(self, gc_type);
// Inform DDMS that a GC completed.
Dbg::GcDidFinish();
+
+ old_native_bytes_allocated_.store(GetNativeBytes());
+
// Unload native libraries for class unloading. We do this after calling FinishGC to prevent
// deadlocks in case the JNI_OnUnload function does allocations.
{
@@ -3521,16 +3554,17 @@
}
size_t Heap::GetPercentFree() {
- return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / max_allowed_footprint_);
+ return static_cast<size_t>(100.0f * static_cast<float>(
+ GetFreeMemory()) / target_footprint_.load(std::memory_order_relaxed));
}
-void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
- if (max_allowed_footprint > GetMaxMemory()) {
- VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
+void Heap::SetIdealFootprint(size_t target_footprint) {
+ if (target_footprint > GetMaxMemory()) {
+ VLOG(gc) << "Clamp target GC heap from " << PrettySize(target_footprint) << " to "
<< PrettySize(GetMaxMemory());
- max_allowed_footprint = GetMaxMemory();
+ target_footprint = GetMaxMemory();
}
- max_allowed_footprint_ = max_allowed_footprint;
+ target_footprint_.store(target_footprint, std::memory_order_relaxed);
}
bool Heap::IsMovableObject(ObjPtr<mirror::Object> obj) const {
@@ -3563,10 +3597,10 @@
}
void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
- uint64_t bytes_allocated_before_gc) {
+ size_t bytes_allocated_before_gc) {
// We know what our utilization is at this moment.
// This doesn't actually resize any memory. It just lets the heap grow more when necessary.
- const uint64_t bytes_allocated = GetBytesAllocated();
+ const size_t bytes_allocated = GetBytesAllocated();
// Trace the new heap size after the GC is finished.
TraceHeapSize(bytes_allocated);
uint64_t target_size;
@@ -3574,16 +3608,18 @@
// Use the multiplier to grow more for foreground.
const double multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for
// foreground.
- const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier);
- const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier);
+ const size_t adjusted_min_free = static_cast<size_t>(min_free_ * multiplier);
+ const size_t adjusted_max_free = static_cast<size_t>(max_free_ * multiplier);
if (gc_type != collector::kGcTypeSticky) {
// Grow the heap for non sticky GC.
- ssize_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
- CHECK_GE(delta, 0) << "bytes_allocated=" << bytes_allocated
- << " target_utilization_=" << target_utilization_;
+ uint64_t delta = bytes_allocated * (1.0 / GetTargetHeapUtilization() - 1.0);
+ DCHECK_LE(delta, std::numeric_limits<size_t>::max()) << "bytes_allocated=" << bytes_allocated
+ << " target_utilization_=" << target_utilization_;
target_size = bytes_allocated + delta * multiplier;
- target_size = std::min(target_size, bytes_allocated + adjusted_max_free);
- target_size = std::max(target_size, bytes_allocated + adjusted_min_free);
+ target_size = std::min(target_size,
+ static_cast<uint64_t>(bytes_allocated + adjusted_max_free));
+ target_size = std::max(target_size,
+ static_cast<uint64_t>(bytes_allocated + adjusted_min_free));
next_gc_type_ = collector::kGcTypeSticky;
} else {
collector::GcType non_sticky_gc_type = NonStickyGcType();
@@ -3600,22 +3636,24 @@
// We also check that the bytes allocated aren't over the footprint limit in order to prevent a
// pathological case where dead objects which aren't reclaimed by sticky could get accumulated
// if the sticky GC throughput always remained >= the full/partial throughput.
+ size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >=
non_sticky_collector->GetEstimatedMeanThroughput() &&
non_sticky_collector->NumberOfIterations() > 0 &&
- bytes_allocated <= max_allowed_footprint_) {
+ bytes_allocated <= target_footprint) {
next_gc_type_ = collector::kGcTypeSticky;
} else {
next_gc_type_ = non_sticky_gc_type;
}
// If we have freed enough memory, shrink the heap back down.
- if (bytes_allocated + adjusted_max_free < max_allowed_footprint_) {
+ if (bytes_allocated + adjusted_max_free < target_footprint) {
target_size = bytes_allocated + adjusted_max_free;
} else {
- target_size = std::max(bytes_allocated, static_cast<uint64_t>(max_allowed_footprint_));
+ target_size = std::max(bytes_allocated, target_footprint);
}
}
- if (!ignore_max_footprint_) {
+ CHECK_LE(target_size, std::numeric_limits<size_t>::max());
+ if (!ignore_target_footprint_) {
SetIdealFootprint(target_size);
if (IsGcConcurrent()) {
const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() +
@@ -3624,26 +3662,25 @@
// Bytes allocated will shrink by freed_bytes after the GC runs, so if we want to figure out
// how many bytes were allocated during the GC we need to add freed_bytes back on.
CHECK_GE(bytes_allocated + freed_bytes, bytes_allocated_before_gc);
- const uint64_t bytes_allocated_during_gc = bytes_allocated + freed_bytes -
+ const size_t bytes_allocated_during_gc = bytes_allocated + freed_bytes -
bytes_allocated_before_gc;
// Calculate when to perform the next ConcurrentGC.
// Estimate how many remaining bytes we will have when we need to start the next GC.
size_t remaining_bytes = bytes_allocated_during_gc;
remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
- if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
+ size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
+ if (UNLIKELY(remaining_bytes > target_footprint)) {
// A never going to happen situation that from the estimated allocation rate we will exceed
// the applications entire footprint with the given estimated allocation rate. Schedule
// another GC nearly straight away.
- remaining_bytes = kMinConcurrentRemainingBytes;
+ remaining_bytes = std::min(kMinConcurrentRemainingBytes, target_footprint);
}
- DCHECK_LE(remaining_bytes, max_allowed_footprint_);
- DCHECK_LE(max_allowed_footprint_, GetMaxMemory());
+ DCHECK_LE(target_footprint_.load(std::memory_order_relaxed), GetMaxMemory());
// Start a concurrent GC when we get close to the estimated remaining bytes. When the
// allocation rate is very high, remaining_bytes could tell us that we should start a GC
// right away.
- concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes,
- static_cast<size_t>(bytes_allocated));
+ concurrent_start_bytes_ = std::max(target_footprint - remaining_bytes, bytes_allocated);
}
}
}
@@ -3671,11 +3708,11 @@
}
void Heap::ClearGrowthLimit() {
- if (max_allowed_footprint_ == growth_limit_ && growth_limit_ < capacity_) {
- max_allowed_footprint_ = capacity_;
+ if (target_footprint_.load(std::memory_order_relaxed) == growth_limit_
+ && growth_limit_ < capacity_) {
+ target_footprint_.store(capacity_, std::memory_order_relaxed);
concurrent_start_bytes_ =
- std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) -
- kMinConcurrentRemainingBytes;
+ UnsignedDifference(capacity_, kMinConcurrentRemainingBytes);
}
growth_limit_ = capacity_;
ScopedObjectAccess soa(Thread::Current());
@@ -3915,40 +3952,101 @@
static_cast<jlong>(timeout));
}
-void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
- size_t old_value = new_native_bytes_allocated_.fetch_add(bytes, std::memory_order_relaxed);
+// For GC triggering purposes, we count old (pre-last-GC) and new native allocations as
+// different fractions of Java allocations.
+// For now, we essentially do not count old native allocations at all, so that we can preserve the
+// existing behavior of not limiting native heap size. If we seriously considered it, we would
+// have to adjust collection thresholds when we encounter large amounts of old native memory,
+// and handle native out-of-memory situations.
- if (old_value > NativeAllocationGcWatermark() * HeapGrowthMultiplier() &&
- !IsGCRequestPending()) {
- // Trigger another GC because there have been enough native bytes
- // allocated since the last GC.
+static constexpr size_t kOldNativeDiscountFactor = 65536; // Approximately infinite for now.
+static constexpr size_t kNewNativeDiscountFactor = 2;
+
+// If weighted java + native memory use exceeds our target by kStopForNativeFactor, and
+// newly allocated memory exceeds kHugeNativeAlloc, we wait for GC to complete to avoid
+// running out of memory.
+static constexpr float kStopForNativeFactor = 2.0;
+static constexpr size_t kHugeNativeAllocs = 200*1024*1024;
+
+// Return the ratio of the weighted native + java allocated bytes to its target value.
+// A return value > 1.0 means we should collect. Significantly larger values mean we're falling
+// behind.
+inline float Heap::NativeMemoryOverTarget(size_t current_native_bytes) {
+ // Collection check for native allocation. Does not enforce Java heap bounds.
+ // With adj_start_bytes defined below, effectively checks
+ // <java bytes allocd> + c1*<old native allocd> + c2*<new native allocd) >= adj_start_bytes,
+ // where c3 > 1, and currently c1 and c2 are 1 divided by the values defined above.
+ size_t old_native_bytes = old_native_bytes_allocated_.load(std::memory_order_relaxed);
+ if (old_native_bytes > current_native_bytes) {
+ // Net decrease; skip the check, but update old value.
+ // It's OK to lose an update if two stores race.
+ old_native_bytes_allocated_.store(current_native_bytes, std::memory_order_relaxed);
+ return 0.0;
+ } else {
+ size_t new_native_bytes = UnsignedDifference(current_native_bytes, old_native_bytes);
+ size_t weighted_native_bytes = new_native_bytes / kNewNativeDiscountFactor
+ + old_native_bytes / kOldNativeDiscountFactor;
+ size_t adj_start_bytes = concurrent_start_bytes_
+ + NativeAllocationGcWatermark() / kNewNativeDiscountFactor;
+ return static_cast<float>(GetBytesAllocated() + weighted_native_bytes)
+ / static_cast<float>(adj_start_bytes);
+ }
+}
+
+inline void Heap::CheckConcurrentGCForNative(Thread* self) {
+ size_t current_native_bytes = GetNativeBytes();
+ float gc_urgency = NativeMemoryOverTarget(current_native_bytes);
+ if (UNLIKELY(gc_urgency >= 1.0)) {
if (IsGcConcurrent()) {
- RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAlloc, /*force_full=*/true);
+ RequestConcurrentGC(self, kGcCauseForNativeAlloc, /*force_full=*/true);
+ if (gc_urgency > kStopForNativeFactor
+ && current_native_bytes > kHugeNativeAllocs) {
+ // We're in danger of running out of memory due to rampant native allocation.
+ if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+ LOG(INFO) << "Stopping for native allocation, urgency: " << gc_urgency;
+ }
+ WaitForGcToComplete(kGcCauseForAlloc, self);
+ }
} else {
CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
}
}
}
-void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) {
- // Take the bytes freed out of new_native_bytes_allocated_ first. If
- // new_native_bytes_allocated_ reaches zero, take the remaining bytes freed
- // out of old_native_bytes_allocated_ to ensure all freed bytes are
- // accounted for.
- size_t allocated;
- size_t new_freed_bytes;
- do {
- allocated = new_native_bytes_allocated_.load(std::memory_order_relaxed);
- new_freed_bytes = std::min(allocated, bytes);
- } while (!new_native_bytes_allocated_.CompareAndSetWeakRelaxed(allocated,
- allocated - new_freed_bytes));
- if (new_freed_bytes < bytes) {
- old_native_bytes_allocated_.fetch_sub(bytes - new_freed_bytes, std::memory_order_relaxed);
+// About kNotifyNativeInterval allocations have occurred. Check whether we should garbage collect.
+void Heap::NotifyNativeAllocations(JNIEnv* env) {
+ native_objects_notified_.fetch_add(kNotifyNativeInterval, std::memory_order_relaxed);
+ CheckConcurrentGCForNative(ThreadForEnv(env));
+}
+
+// Register a native allocation with an explicit size.
+// This should only be done for large allocations of non-malloc memory, which we wouldn't
+// otherwise see.
+void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
+ native_bytes_registered_.fetch_add(bytes, std::memory_order_relaxed);
+ uint32_t objects_notified =
+ native_objects_notified_.fetch_add(1, std::memory_order_relaxed);
+ if (objects_notified % kNotifyNativeInterval == kNotifyNativeInterval - 1
+ || bytes > kCheckImmediatelyThreshold) {
+ CheckConcurrentGCForNative(ThreadForEnv(env));
}
}
+void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) {
+ size_t allocated;
+ size_t new_freed_bytes;
+ do {
+ allocated = native_bytes_registered_.load(std::memory_order_relaxed);
+ new_freed_bytes = std::min(allocated, bytes);
+ // We should not be registering more free than allocated bytes.
+ // But correctly keep going in non-debug builds.
+ DCHECK_EQ(new_freed_bytes, bytes);
+ } while (!native_bytes_registered_.CompareAndSetWeakRelaxed(allocated,
+ allocated - new_freed_bytes));
+}
+
size_t Heap::GetTotalMemory() const {
- return std::max(max_allowed_footprint_, GetBytesAllocated());
+ return std::max(target_footprint_.load(std::memory_order_relaxed), GetBytesAllocated());
}
void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
@@ -4250,8 +4348,8 @@
return verification_.get();
}
-void Heap::VlogHeapGrowth(size_t max_allowed_footprint, size_t new_footprint, size_t alloc_size) {
- VLOG(heap) << "Growing heap from " << PrettySize(max_allowed_footprint) << " to "
+void Heap::VlogHeapGrowth(size_t old_footprint, size_t new_footprint, size_t alloc_size) {
+ VLOG(heap) << "Growing heap from " << PrettySize(old_footprint) << " to "
<< PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation";
}
@@ -4262,20 +4360,21 @@
gc::Heap* heap = Runtime::Current()->GetHeap();
// Trigger a GC, if not already done. The first GC after fork, whenever it
// takes place, will adjust the thresholds to normal levels.
- if (heap->max_allowed_footprint_ == heap->growth_limit_) {
+ if (heap->target_footprint_.load(std::memory_order_relaxed) == heap->growth_limit_) {
heap->RequestConcurrentGC(self, kGcCauseBackground, false);
}
}
};
void Heap::PostForkChildAction(Thread* self) {
- // Temporarily increase max_allowed_footprint_ and concurrent_start_bytes_ to
+ // Temporarily increase target_footprint_ and concurrent_start_bytes_ to
// max values to avoid GC during app launch.
if (collector_type_ == kCollectorTypeCC && !IsLowMemoryMode()) {
- // Set max_allowed_footprint_ to the largest allowed value.
+ // Set target_footprint_ to the largest allowed value.
SetIdealFootprint(growth_limit_);
// Set concurrent_start_bytes_ to half of the heap size.
- concurrent_start_bytes_ = std::max(max_allowed_footprint_ / 2, GetBytesAllocated());
+ size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
+ concurrent_start_bytes_ = std::max(target_footprint / 2, GetBytesAllocated());
GetTaskProcessor()->AddTask(
self, new TriggerPostForkCCGcTask(NanoTime() + MsToNs(kPostForkMaxHeapDurationMS)));
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 504eff2..de65f023 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -126,7 +126,6 @@
class Heap {
public:
- // If true, measure the total allocation time.
static constexpr size_t kDefaultStartingSize = kPageSize;
static constexpr size_t kDefaultInitialSize = 2 * MB;
static constexpr size_t kDefaultMaximumSize = 256 * MB;
@@ -155,6 +154,16 @@
// Used so that we don't overflow the allocation time atomic integer.
static constexpr size_t kTimeAdjust = 1024;
+ // Client should call NotifyNativeAllocation every kNotifyNativeInterval allocations.
+ // Should be chosen so that time_to_call_mallinfo / kNotifyNativeInterval is on the same order
+ // as object allocation time. time_to_call_mallinfo seems to be on the order of 1 usec.
+ static constexpr uint32_t kNotifyNativeInterval = 32;
+
+ // RegisterNativeAllocation checks immediately whether GC is needed if size exceeds the
+ // following. kCheckImmediatelyThreshold * kNotifyNativeInterval should be small enough to
+ // make it safe to allocate that many bytes between checks.
+ static constexpr size_t kCheckImmediatelyThreshold = 300000;
+
// How often we allow heap trimming to happen (nanoseconds).
static constexpr uint64_t kHeapTrimWait = MsToNs(5000);
// How long we wait after a transition request to perform a collector transition (nanoseconds).
@@ -187,7 +196,7 @@
bool low_memory_mode,
size_t long_pause_threshold,
size_t long_gc_threshold,
- bool ignore_max_footprint,
+ bool ignore_target_footprint,
bool use_tlab,
bool verify_pre_gc_heap,
bool verify_pre_sweeping_heap,
@@ -269,10 +278,22 @@
void CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Inform the garbage collector of a non-malloc allocated native memory that might become
+ // reclaimable in the future as a result of Java garbage collection.
void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
void RegisterNativeFree(JNIEnv* env, size_t bytes);
+ // Notify the garbage collector of malloc allocations that might be reclaimable
+ // as a result of Java garbage collection. Each such call represents approximately
+ // kNotifyNativeInterval such allocations.
+ void NotifyNativeAllocations(JNIEnv* env)
+ REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
+
+ uint32_t GetNotifyNativeInterval() {
+ return kNotifyNativeInterval;
+ }
+
// Change the allocator, updates entrypoints.
void ChangeAllocator(AllocatorType allocator)
REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_);
@@ -536,21 +557,20 @@
// Returns approximately how much free memory we have until the next GC happens.
size_t GetFreeMemoryUntilGC() const {
- return max_allowed_footprint_ - GetBytesAllocated();
+ return UnsignedDifference(target_footprint_.load(std::memory_order_relaxed),
+ GetBytesAllocated());
}
// Returns approximately how much free memory we have until the next OOME happens.
size_t GetFreeMemoryUntilOOME() const {
- return growth_limit_ - GetBytesAllocated();
+ return UnsignedDifference(growth_limit_, GetBytesAllocated());
}
// Returns how much free memory we have until we need to grow the heap to perform an allocation.
// Similar to GetFreeMemoryUntilGC. Implements java.lang.Runtime.freeMemory.
size_t GetFreeMemory() const {
- size_t byte_allocated = num_bytes_allocated_.load(std::memory_order_relaxed);
- size_t total_memory = GetTotalMemory();
- // Make sure we don't get a negative number.
- return total_memory - std::min(total_memory, byte_allocated);
+ return UnsignedDifference(GetTotalMemory(),
+ num_bytes_allocated_.load(std::memory_order_relaxed));
}
// Get the space that corresponds to an object's address. Current implementation searches all
@@ -877,12 +897,16 @@
return main_space_backup_ != nullptr;
}
+ static ALWAYS_INLINE size_t UnsignedDifference(size_t x, size_t y) {
+ return x > y ? x - y : 0;
+ }
+
static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
return
+ allocator_type != kAllocatorTypeRegionTLAB &&
allocator_type != kAllocatorTypeBumpPointer &&
allocator_type != kAllocatorTypeTLAB &&
- allocator_type != kAllocatorTypeRegion &&
- allocator_type != kAllocatorTypeRegionTLAB;
+ allocator_type != kAllocatorTypeRegion;
}
static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) {
if (kUseReadBarrier) {
@@ -890,24 +914,30 @@
return true;
}
return
- allocator_type != kAllocatorTypeBumpPointer &&
- allocator_type != kAllocatorTypeTLAB;
+ allocator_type != kAllocatorTypeTLAB &&
+ allocator_type != kAllocatorTypeBumpPointer;
}
static bool IsMovingGc(CollectorType collector_type) {
return
+ collector_type == kCollectorTypeCC ||
collector_type == kCollectorTypeSS ||
collector_type == kCollectorTypeGSS ||
- collector_type == kCollectorTypeCC ||
collector_type == kCollectorTypeCCBackground ||
collector_type == kCollectorTypeHomogeneousSpaceCompact;
}
bool ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const
REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE void CheckConcurrentGC(Thread* self,
- size_t new_num_bytes_allocated,
- ObjPtr<mirror::Object>* obj)
+
+ // Checks whether we should garbage collect:
+ ALWAYS_INLINE bool ShouldConcurrentGCForJava(size_t new_num_bytes_allocated);
+ float NativeMemoryOverTarget(size_t current_native_bytes);
+ ALWAYS_INLINE void CheckConcurrentGCForJava(Thread* self,
+ size_t new_num_bytes_allocated,
+ ObjPtr<mirror::Object>* obj)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
+ void CheckConcurrentGCForNative(Thread* self)
+ REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
accounting::ObjectStack* GetMarkStack() {
return mark_stack_.get();
@@ -968,6 +998,11 @@
void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Are we out of memory, and thus should force a GC or fail?
+ // For concurrent collectors, out of memory is defined by growth_limit_.
+ // For nonconcurrent collectors it is defined by target_footprint_ unless grow is
+ // set. If grow is set, the limit is growth_limit_ and we adjust target_footprint_
+ // to accomodate the allocation.
ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
size_t alloc_size,
bool grow);
@@ -1031,7 +1066,7 @@
// collection. bytes_allocated_before_gc is used to measure bytes / second for the period which
// the GC was run.
void GrowForUtilization(collector::GarbageCollector* collector_ran,
- uint64_t bytes_allocated_before_gc = 0);
+ size_t bytes_allocated_before_gc = 0);
size_t GetPercentFree();
@@ -1065,8 +1100,8 @@
// What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
// sweep GC, false for other GC types.
bool IsGcConcurrent() const ALWAYS_INLINE {
- return collector_type_ == kCollectorTypeCMS ||
- collector_type_ == kCollectorTypeCC ||
+ return collector_type_ == kCollectorTypeCC ||
+ collector_type_ == kCollectorTypeCMS ||
collector_type_ == kCollectorTypeCCBackground;
}
@@ -1095,15 +1130,19 @@
return HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
}
- // How large new_native_bytes_allocated_ can grow before we trigger a new
- // GC.
+ // Return the amount of space we allow for native memory when deciding whether to
+ // collect. We collect when a weighted sum of Java memory plus native memory exceeds
+ // the similarly weighted sum of the Java heap size target and this value.
ALWAYS_INLINE size_t NativeAllocationGcWatermark() const {
- // Reuse max_free_ for the native allocation gc watermark, so that the
- // native heap is treated in the same way as the Java heap in the case
- // where the gc watermark update would exceed max_free_. Using max_free_
- // instead of the target utilization means the watermark doesn't depend on
- // the current number of registered native allocations.
- return max_free_;
+ // It probably makes most sense to use a constant multiple of target_footprint_ .
+ // This is a good indication of the live data size, together with the
+ // intended space-time trade-off, as expressed by SetTargetHeapUtilization.
+ // For a fixed target utilization, the amount of GC effort per native
+ // allocated byte remains roughly constant as the Java heap size changes.
+ // But we previously triggered on max_free_ native allocation which is often much
+ // smaller. To avoid unexpected growth, we partially keep that limit in place for now.
+ // TODO: Consider HeapGrowthMultiplier(). Maybe.
+ return std::min(target_footprint_.load(std::memory_order_relaxed), 2 * max_free_);
}
ALWAYS_INLINE void IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke);
@@ -1113,6 +1152,11 @@
// Remove a vlog code from heap-inl.h which is transitively included in half the world.
static void VlogHeapGrowth(size_t max_allowed_footprint, size_t new_footprint, size_t alloc_size);
+ // Return our best approximation of the number of bytes of native memory that
+ // are currently in use, and could possibly be reclaimed as an indirect result
+ // of a garbage collection.
+ size_t GetNativeBytes();
+
// All-known continuous spaces, where objects lie within fixed bounds.
std::vector<space::ContinuousSpace*> continuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
@@ -1192,9 +1236,9 @@
double pre_gc_weighted_allocated_bytes_;
double post_gc_weighted_allocated_bytes_;
- // If we ignore the max footprint it lets the heap grow until it hits the heap capacity, this is
- // useful for benchmarking since it reduces time spent in GC to a low %.
- const bool ignore_max_footprint_;
+ // If we ignore the target footprint it lets the heap grow until it hits the heap capacity, this
+ // is useful for benchmarking since it reduces time spent in GC to a low %.
+ const bool ignore_target_footprint_;
// Lock which guards zygote space creation.
Mutex zygote_creation_lock_;
@@ -1243,14 +1287,18 @@
// The size the heap is limited to. This is initially smaller than capacity, but for largeHeap
// programs it is "cleared" making it the same as capacity.
+ // Only weakly enforced for simultaneous allocations.
size_t growth_limit_;
- // When the number of bytes allocated exceeds the footprint TryAllocate returns null indicating
- // a GC should be triggered.
- size_t max_allowed_footprint_;
+ // Target size (as in maximum allocatable bytes) for the heap. Weakly enforced as a limit for
+ // non-concurrent GC. Used as a guideline for computing concurrent_start_bytes_ in the
+ // concurrent GC case.
+ Atomic<size_t> target_footprint_;
// When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
// it completes ahead of an allocation failing.
+ // A multiple of this is also used to determine when to trigger a GC in response to native
+ // allocation.
size_t concurrent_start_bytes_;
// Since the heap was created, how many bytes have been freed.
@@ -1263,19 +1311,18 @@
// TLABS in their entirety, even if they have not yet been parceled out.
Atomic<size_t> num_bytes_allocated_;
- // Number of registered native bytes allocated since the last time GC was
- // triggered. Adjusted after each RegisterNativeAllocation and
- // RegisterNativeFree. Used to determine when to trigger GC for native
- // allocations.
- // See the REDESIGN section of go/understanding-register-native-allocation.
- Atomic<size_t> new_native_bytes_allocated_;
+ // Number of registered native bytes allocated. Adjusted after each RegisterNativeAllocation and
+ // RegisterNativeFree. Used to help determine when to trigger GC for native allocations. Should
+ // not include bytes allocated through the system malloc, since those are implicitly included.
+ Atomic<size_t> native_bytes_registered_;
- // Number of registered native bytes allocated prior to the last time GC was
- // triggered, for debugging purposes. The current number of registered
- // native bytes is determined by taking the sum of
- // old_native_bytes_allocated_ and new_native_bytes_allocated_.
+ // Approximately the smallest value of GetNativeBytes() we've seen since the last GC.
Atomic<size_t> old_native_bytes_allocated_;
+ // Total number of native objects of which we were notified since the beginning of time, mod 2^32.
+ // Allows us to check for GC only roughly every kNotifyNativeInterval allocations.
+ Atomic<uint32_t> native_objects_notified_;
+
// Number of bytes freed by thread local buffer revokes. This will
// cancel out the ahead-of-time bulk counting of bytes allocated in
// rosalloc thread-local buffers. It is temporarily accumulated
@@ -1360,10 +1407,10 @@
// Minimum free guarantees that you always have at least min_free_ free bytes after growing for
// utilization, regardless of target utilization ratio.
- size_t min_free_;
+ const size_t min_free_;
// The ideal maximum free size, when we grow the heap for utilization.
- size_t max_free_;
+ const size_t max_free_;
// Target ideal heap utilization ratio.
double target_utilization_;
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 4c2074d..66db063 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -28,6 +28,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/array_ref.h"
#include "base/bit_memory_region.h"
#include "base/callee_save_type.h"
#include "base/enums.h"
@@ -44,6 +45,7 @@
#include "dex/dex_file_loader.h"
#include "exec_utils.h"
#include "gc/accounting/space_bitmap-inl.h"
+#include "gc/task_processor.h"
#include "image-inl.h"
#include "image_space_fs.h"
#include "intern_table-inl.h"
@@ -59,6 +61,7 @@
namespace gc {
namespace space {
+using android::base::StringAppendF;
using android::base::StringPrintf;
Atomic<uint32_t> ImageSpace::bitmap_index_(0);
@@ -654,6 +657,22 @@
const CodeVisitor code_visitor_;
};
+template <typename ReferenceVisitor>
+class ImageSpace::ClassTableVisitor final {
+ public:
+ explicit ClassTableVisitor(const ReferenceVisitor& reference_visitor)
+ : reference_visitor_(reference_visitor) {}
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(root->AsMirrorPtr() != nullptr);
+ root->Assign(reference_visitor_(root->AsMirrorPtr()));
+ }
+
+ private:
+ ReferenceVisitor reference_visitor_;
+};
+
// Helper class encapsulating loading, so we can access private ImageSpace members (this is a
// nested class), but not declare functions in the header.
class ImageSpace::Loader {
@@ -666,30 +685,39 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
- const bool create_thread_pool = true;
std::unique_ptr<ThreadPool> thread_pool;
- if (create_thread_pool) {
- TimingLogger::ScopedTiming timing("CreateThreadPool", &logger);
- ScopedThreadStateChange stsc(Thread::Current(), kNative);
- constexpr size_t kStackSize = 64 * KB;
- constexpr size_t kMaxRuntimeWorkers = 4u;
- const size_t num_workers =
- std::min(static_cast<size_t>(std::thread::hardware_concurrency()), kMaxRuntimeWorkers);
- thread_pool.reset(new ThreadPool("Runtime", num_workers, /*create_peers=*/false, kStackSize));
- thread_pool->StartWorkers(Thread::Current());
- }
-
std::unique_ptr<ImageSpace> space = Init(image_filename,
image_location,
oat_file,
&logger,
- thread_pool.get(),
+ &thread_pool,
image_reservation,
error_msg);
if (thread_pool != nullptr) {
- TimingLogger::ScopedTiming timing("CreateThreadPool", &logger);
- ScopedThreadStateChange stsc(Thread::Current(), kNative);
- thread_pool.reset();
+ // Delay the thread pool deletion to prevent the deletion slowing down the startup by causing
+ // preemption. TODO: Just do this in heap trim.
+ static constexpr uint64_t kThreadPoolDeleteDelay = MsToNs(5000);
+
+ class DeleteThreadPoolTask : public HeapTask {
+ public:
+ explicit DeleteThreadPoolTask(std::unique_ptr<ThreadPool>&& thread_pool)
+ : HeapTask(NanoTime() + kThreadPoolDeleteDelay), thread_pool_(std::move(thread_pool)) {}
+
+ void Run(Thread* self) override {
+ ScopedTrace trace("DestroyThreadPool");
+ ScopedThreadStateChange stsc(self, kNative);
+ thread_pool_.reset();
+ }
+
+ private:
+ std::unique_ptr<ThreadPool> thread_pool_;
+ };
+ gc::TaskProcessor* const processor = Runtime::Current()->GetHeap()->GetTaskProcessor();
+ // The thread pool is already done being used since Init has finished running. Deleting the
+ // thread pool is done async since it takes a non-trivial amount of time to do.
+ if (processor != nullptr) {
+ processor->AddTask(Thread::Current(), new DeleteThreadPoolTask(std::move(thread_pool)));
+ }
}
if (space != nullptr) {
uint32_t expected_reservation_size =
@@ -701,11 +729,22 @@
TimingLogger::ScopedTiming timing("RelocateImage", &logger);
ImageHeader* image_header = reinterpret_cast<ImageHeader*>(space->GetMemMap()->Begin());
- if (!RelocateInPlace(*image_header,
- space->GetMemMap()->Begin(),
- space->GetLiveBitmap(),
- oat_file,
- error_msg)) {
+ const PointerSize pointer_size = image_header->GetPointerSize();
+ bool result;
+ if (pointer_size == PointerSize::k64) {
+ result = RelocateInPlace<PointerSize::k64>(*image_header,
+ space->GetMemMap()->Begin(),
+ space->GetLiveBitmap(),
+ oat_file,
+ error_msg);
+ } else {
+ result = RelocateInPlace<PointerSize::k32>(*image_header,
+ space->GetMemMap()->Begin(),
+ space->GetLiveBitmap(),
+ oat_file,
+ error_msg);
+ }
+ if (!result) {
return nullptr;
}
Runtime* runtime = Runtime::Current();
@@ -740,7 +779,7 @@
const char* image_location,
const OatFile* oat_file,
TimingLogger* logger,
- ThreadPool* thread_pool,
+ std::unique_ptr<ThreadPool>* thread_pool,
/*inout*/MemMap* image_reservation,
/*out*/std::string* error_msg)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -817,6 +856,18 @@
return nullptr;
}
+ const size_t kMinBlocks = 2;
+ if (thread_pool != nullptr && image_header->GetBlockCount() >= kMinBlocks) {
+ TimingLogger::ScopedTiming timing("CreateThreadPool", logger);
+ ScopedThreadStateChange stsc(Thread::Current(), kNative);
+ constexpr size_t kStackSize = 64 * KB;
+ constexpr size_t kMaxRuntimeWorkers = 4u;
+ const size_t num_workers =
+ std::min(static_cast<size_t>(std::thread::hardware_concurrency()), kMaxRuntimeWorkers);
+ thread_pool->reset(new ThreadPool("Image", num_workers, /*create_peers=*/false, kStackSize));
+ thread_pool->get()->StartWorkers(Thread::Current());
+ }
+
// GetImageBegin is the preferred address to map the image. If we manage to map the
// image at the image begin, the amount of fixup work required is minimized.
// If it is pic we will retry with error_msg for the failure case. Pass a null error_msg to
@@ -829,7 +880,7 @@
*image_header,
file->Fd(),
logger,
- thread_pool,
+ thread_pool != nullptr ? thread_pool->get() : nullptr,
image_reservation,
error_msg);
if (!map.IsValid()) {
@@ -966,8 +1017,7 @@
const uint64_t start = NanoTime();
Thread* const self = Thread::Current();
- const size_t kMinBlocks = 2;
- const bool use_parallel = pool != nullptr && image_header.GetBlockCount() >= kMinBlocks;
+ const bool use_parallel = pool != nullptr;
for (const ImageHeader::Block& block : image_header.GetBlocks(temp_map.Begin())) {
auto function = [&](Thread*) {
const uint64_t start2 = NanoTime();
@@ -1089,11 +1139,8 @@
class FixupObjectVisitor : public FixupVisitor {
public:
template<typename... Args>
- explicit FixupObjectVisitor(gc::accounting::ContinuousSpaceBitmap* visited,
- const PointerSize pointer_size,
- Args... args)
+ explicit FixupObjectVisitor(gc::accounting::ContinuousSpaceBitmap* visited, Args... args)
: FixupVisitor(args...),
- pointer_size_(pointer_size),
visited_(visited) {}
// Fix up separately since we also need to fix up method entrypoints.
@@ -1105,39 +1152,14 @@
ALWAYS_INLINE void operator()(ObjPtr<mirror::Object> obj,
MemberOffset offset,
-
bool is_static ATTRIBUTE_UNUSED) const
NO_THREAD_SAFETY_ANALYSIS {
- // There could be overlap between ranges, we must avoid visiting the same reference twice.
- // Avoid the class field since we already fixed it up in FixupClassVisitor.
- if (offset.Uint32Value() != mirror::Object::ClassOffset().Uint32Value()) {
- // Space is not yet added to the heap, don't do a read barrier.
- mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
- offset);
- // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
- // image.
- obj->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(offset, ForwardObject(ref));
- }
- }
-
- // Visit a pointer array and forward corresponding native data. Ignores pointer arrays in the
- // boot image. Uses the bitmap to ensure the same array is not visited multiple times.
- template <typename Visitor>
- void UpdatePointerArrayContents(mirror::PointerArray* array, const Visitor& visitor) const
- NO_THREAD_SAFETY_ANALYSIS {
- DCHECK(array != nullptr);
- DCHECK(visitor.IsInAppImage(array));
- // The bit for the array contents is different than the bit for the array. Since we may have
- // already visited the array as a long / int array from walking the bitmap without knowing it
- // was a pointer array.
- static_assert(kObjectAlignment == 8u, "array bit may be in another object");
- mirror::Object* const contents_bit = reinterpret_cast<mirror::Object*>(
- reinterpret_cast<uintptr_t>(array) + kObjectAlignment);
- // If the bit is not set then the contents have not yet been updated.
- if (!visited_->Test(contents_bit)) {
- array->Fixup<kVerifyNone>(array, pointer_size_, visitor);
- visited_->Set(contents_bit);
- }
+ // Space is not yet added to the heap, don't do a read barrier.
+ mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
+ offset);
+ // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
+ // image.
+ obj->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(offset, ForwardObject(ref));
}
// java.lang.ref.Reference visitor.
@@ -1152,81 +1174,16 @@
void operator()(mirror::Object* obj) const
NO_THREAD_SAFETY_ANALYSIS {
- if (visited_->Test(obj)) {
- // Already visited.
- return;
- }
- visited_->Set(obj);
-
- // Handle class specially first since we need it to be updated to properly visit the rest of
- // the instance fields.
- {
- mirror::Class* klass = obj->GetClass<kVerifyNone, kWithoutReadBarrier>();
- DCHECK(klass != nullptr) << "Null class in image";
- // No AsClass since our fields aren't quite fixed up yet.
- mirror::Class* new_klass = down_cast<mirror::Class*>(ForwardObject(klass));
- if (klass != new_klass) {
- obj->SetClass<kVerifyNone>(new_klass);
- }
- if (new_klass != klass && IsInAppImage(new_klass)) {
- // Make sure the klass contents are fixed up since we depend on it to walk the fields.
- operator()(new_klass);
- }
- }
-
- if (obj->IsClass()) {
- mirror::Class* klass = obj->AsClass<kVerifyNone>();
- // Fixup super class before visiting instance fields which require
- // information from their super class to calculate offsets.
- mirror::Class* super_class =
- klass->GetSuperClass<kVerifyNone, kWithoutReadBarrier>().Ptr();
- if (super_class != nullptr) {
- mirror::Class* new_super_class = down_cast<mirror::Class*>(ForwardObject(super_class));
- if (new_super_class != super_class && IsInAppImage(new_super_class)) {
- // Recursively fix all dependencies.
- operator()(new_super_class);
- }
- }
- }
-
- obj->VisitReferences</*visit native roots*/false, kVerifyNone, kWithoutReadBarrier>(
- *this,
- *this);
- // Note that this code relies on no circular dependencies.
- // We want to use our own class loader and not the one in the image.
- if (obj->IsClass<kVerifyNone>()) {
- mirror::Class* as_klass = obj->AsClass<kVerifyNone>();
- FixupObjectAdapter visitor(boot_image_, app_image_, app_oat_);
- as_klass->FixupNativePointers<kVerifyNone>(as_klass, pointer_size_, visitor);
- // Deal with the pointer arrays. Use the helper function since multiple classes can reference
- // the same arrays.
- mirror::PointerArray* const vtable = as_klass->GetVTable<kVerifyNone, kWithoutReadBarrier>();
- if (vtable != nullptr && IsInAppImage(vtable)) {
- operator()(vtable);
- UpdatePointerArrayContents(vtable, visitor);
- }
- mirror::IfTable* iftable = as_klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
- // Ensure iftable arrays are fixed up since we need GetMethodArray to return the valid
- // contents.
- if (IsInAppImage(iftable)) {
- operator()(iftable);
- for (int32_t i = 0, count = iftable->Count(); i < count; ++i) {
- if (iftable->GetMethodArrayCount<kVerifyNone, kWithoutReadBarrier>(i) > 0) {
- mirror::PointerArray* methods =
- iftable->GetMethodArray<kVerifyNone, kWithoutReadBarrier>(i);
- if (visitor.IsInAppImage(methods)) {
- operator()(methods);
- DCHECK(methods != nullptr);
- UpdatePointerArrayContents(methods, visitor);
- }
- }
- }
- }
+ if (!visited_->Set(obj)) {
+ // Not already visited.
+ obj->VisitReferences</*visit native roots*/false, kVerifyNone, kWithoutReadBarrier>(
+ *this,
+ *this);
+ CHECK(!obj->IsClass());
}
}
private:
- const PointerSize pointer_size_;
gc::accounting::ContinuousSpaceBitmap* const visited_;
};
@@ -1306,6 +1263,7 @@
// Relocate an image space mapped at target_base which possibly used to be at a different base
// address. In place means modifying a single ImageSpace in place rather than relocating from
// one ImageSpace to another.
+ template <PointerSize kPointerSize>
static bool RelocateInPlace(ImageHeader& image_header,
uint8_t* target_base,
accounting::ContinuousSpaceBitmap* bitmap,
@@ -1317,7 +1275,6 @@
uint32_t boot_image_end = 0;
uint32_t boot_oat_begin = 0;
uint32_t boot_oat_end = 0;
- const PointerSize pointer_size = image_header.GetPointerSize();
gc::Heap* const heap = Runtime::Current()->GetHeap();
heap->GetBootImagesSize(&boot_image_begin, &boot_image_end, &boot_oat_begin, &boot_oat_end);
if (boot_image_begin == boot_image_end) {
@@ -1359,11 +1316,8 @@
return true;
}
ScopedDebugDisallowReadBarriers sddrb(Thread::Current());
- // Need to update the image to be at the target base.
- const ImageSection& objects_section = image_header.GetObjectsSection();
- uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
- uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
FixupObjectAdapter fixup_adapter(boot_image, app_image, app_oat);
+ PatchObjectVisitor<kPointerSize, FixupObjectAdapter> patch_object_visitor(fixup_adapter);
if (fixup_image) {
// Two pass approach, fix up all classes first, then fix up non class-objects.
// The visited bitmap is used to ensure that pointer arrays are not forwarded twice.
@@ -1371,16 +1325,64 @@
gc::accounting::ContinuousSpaceBitmap::Create("Relocate bitmap",
target_base,
image_header.GetImageSize()));
- FixupObjectVisitor fixup_object_visitor(visited_bitmap.get(),
- pointer_size,
- boot_image,
- app_image,
- app_oat);
- TimingLogger::ScopedTiming timing("Fixup classes", &logger);
- // Fixup objects may read fields in the boot image, use the mutator lock here for sanity. Though
- // its probably not required.
+ FixupObjectVisitor fixup_object_visitor(visited_bitmap.get(), boot_image, app_image, app_oat);
+ {
+ TimingLogger::ScopedTiming timing("Fixup classes", &logger);
+ const auto& class_table_section = image_header.GetClassTableSection();
+ if (class_table_section.Size() > 0u) {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassTableVisitor class_table_visitor(fixup_adapter);
+ size_t read_count = 0u;
+ const uint8_t* data = target_base + class_table_section.Offset();
+ // We avoid making a copy of the data since we want modifications to be propagated to the
+ // memory map.
+ ClassTable::ClassSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
+ for (ClassTable::TableSlot& slot : temp_set) {
+ slot.VisitRoot(class_table_visitor);
+ mirror::Class* klass = slot.Read<kWithoutReadBarrier>();
+ if (!fixup_adapter.IsInAppImage(klass)) {
+ continue;
+ }
+ const bool already_marked = visited_bitmap->Set(klass);
+ CHECK(!already_marked) << "App image class already visited";
+ patch_object_visitor.VisitClass(klass);
+ // Then patch the non-embedded vtable and iftable.
+ mirror::PointerArray* vtable = klass->GetVTable<kVerifyNone, kWithoutReadBarrier>();
+ if (vtable != nullptr &&
+ fixup_object_visitor.IsInAppImage(vtable) &&
+ !visited_bitmap->Set(vtable)) {
+ patch_object_visitor.VisitPointerArray(vtable);
+ }
+ auto* iftable = klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
+ if (iftable != nullptr && fixup_object_visitor.IsInAppImage(iftable)) {
+ // Avoid processing the fields of iftable since we will process them later anyways
+ // below.
+ int32_t ifcount = klass->GetIfTableCount<kVerifyNone>();
+ for (int32_t i = 0; i != ifcount; ++i) {
+ mirror::PointerArray* unpatched_ifarray =
+ iftable->GetMethodArrayOrNull<kVerifyNone, kWithoutReadBarrier>(i);
+ if (unpatched_ifarray != nullptr) {
+ // The iftable has not been patched, so we need to explicitly adjust the pointer.
+ mirror::PointerArray* ifarray = fixup_adapter(unpatched_ifarray);
+ if (fixup_object_visitor.IsInAppImage(ifarray) &&
+ !visited_bitmap->Set(ifarray)) {
+ patch_object_visitor.VisitPointerArray(ifarray);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Fixup objects may read fields in the boot image, use the mutator lock here for sanity.
+ // Though its probably not required.
+ TimingLogger::ScopedTiming timing("Fixup cobjects", &logger);
ScopedObjectAccess soa(Thread::Current());
- timing.NewTiming("Fixup objects");
+ // Need to update the image to be at the target base.
+ const ImageSection& objects_section = image_header.GetObjectsSection();
+ uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
+ uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
bitmap->VisitMarkedRange(objects_begin, objects_end, fixup_object_visitor);
// Fixup image roots.
CHECK(app_image.InSource(reinterpret_cast<uintptr_t>(
@@ -1392,96 +1394,19 @@
AsObjectArray<mirror::DexCache, kVerifyNone>();
for (int32_t i = 0, count = dex_caches->GetLength(); i < count; ++i) {
mirror::DexCache* dex_cache = dex_caches->Get<kVerifyNone, kWithoutReadBarrier>(i);
- // Fix up dex cache pointers.
- mirror::StringDexCacheType* strings = dex_cache->GetStrings();
- if (strings != nullptr) {
- mirror::StringDexCacheType* new_strings = fixup_adapter.ForwardObject(strings);
- if (strings != new_strings) {
- dex_cache->SetStrings(new_strings);
- }
- dex_cache->FixupStrings<kWithoutReadBarrier>(new_strings, fixup_adapter);
- }
- mirror::TypeDexCacheType* types = dex_cache->GetResolvedTypes();
- if (types != nullptr) {
- mirror::TypeDexCacheType* new_types = fixup_adapter.ForwardObject(types);
- if (types != new_types) {
- dex_cache->SetResolvedTypes(new_types);
- }
- dex_cache->FixupResolvedTypes<kWithoutReadBarrier>(new_types, fixup_adapter);
- }
- mirror::MethodDexCacheType* methods = dex_cache->GetResolvedMethods();
- if (methods != nullptr) {
- mirror::MethodDexCacheType* new_methods = fixup_adapter.ForwardObject(methods);
- if (methods != new_methods) {
- dex_cache->SetResolvedMethods(new_methods);
- }
- for (size_t j = 0, num = dex_cache->NumResolvedMethods(); j != num; ++j) {
- auto pair = mirror::DexCache::GetNativePairPtrSize(new_methods, j, pointer_size);
- ArtMethod* orig = pair.object;
- ArtMethod* copy = fixup_adapter.ForwardObject(orig);
- if (orig != copy) {
- pair.object = copy;
- mirror::DexCache::SetNativePairPtrSize(new_methods, j, pair, pointer_size);
- }
- }
- }
- mirror::FieldDexCacheType* fields = dex_cache->GetResolvedFields();
- if (fields != nullptr) {
- mirror::FieldDexCacheType* new_fields = fixup_adapter.ForwardObject(fields);
- if (fields != new_fields) {
- dex_cache->SetResolvedFields(new_fields);
- }
- for (size_t j = 0, num = dex_cache->NumResolvedFields(); j != num; ++j) {
- mirror::FieldDexCachePair orig =
- mirror::DexCache::GetNativePairPtrSize(new_fields, j, pointer_size);
- mirror::FieldDexCachePair copy(fixup_adapter.ForwardObject(orig.object), orig.index);
- if (orig.object != copy.object) {
- mirror::DexCache::SetNativePairPtrSize(new_fields, j, copy, pointer_size);
- }
- }
- }
-
- mirror::MethodTypeDexCacheType* method_types = dex_cache->GetResolvedMethodTypes();
- if (method_types != nullptr) {
- mirror::MethodTypeDexCacheType* new_method_types =
- fixup_adapter.ForwardObject(method_types);
- if (method_types != new_method_types) {
- dex_cache->SetResolvedMethodTypes(new_method_types);
- }
- dex_cache->FixupResolvedMethodTypes<kWithoutReadBarrier>(new_method_types, fixup_adapter);
- }
- GcRoot<mirror::CallSite>* call_sites = dex_cache->GetResolvedCallSites();
- if (call_sites != nullptr) {
- GcRoot<mirror::CallSite>* new_call_sites = fixup_adapter.ForwardObject(call_sites);
- if (call_sites != new_call_sites) {
- dex_cache->SetResolvedCallSites(new_call_sites);
- }
- dex_cache->FixupResolvedCallSites<kWithoutReadBarrier>(new_call_sites, fixup_adapter);
- }
-
- GcRoot<mirror::String>* preresolved_strings = dex_cache->GetPreResolvedStrings();
- if (preresolved_strings != nullptr) {
- GcRoot<mirror::String>* new_array = fixup_adapter.ForwardObject(preresolved_strings);
- if (preresolved_strings != new_array) {
- dex_cache->SetPreResolvedStrings(new_array);
- }
- const size_t num_preresolved_strings = dex_cache->NumPreResolvedStrings();
- for (size_t j = 0; j < num_preresolved_strings; ++j) {
- new_array[j] = GcRoot<mirror::String>(
- fixup_adapter(new_array[j].Read<kWithoutReadBarrier>()));
- }
- }
+ CHECK(dex_cache != nullptr);
+ patch_object_visitor.VisitDexCacheArrays(dex_cache);
}
}
{
// Only touches objects in the app image, no need for mutator lock.
TimingLogger::ScopedTiming timing("Fixup methods", &logger);
FixupArtMethodVisitor method_visitor(fixup_image,
- pointer_size,
+ kPointerSize,
boot_image,
app_image,
app_oat);
- image_header.VisitPackedArtMethods(&method_visitor, target_base, pointer_size);
+ image_header.VisitPackedArtMethods(&method_visitor, target_base, kPointerSize);
}
if (fixup_image) {
{
@@ -1492,26 +1417,14 @@
}
{
TimingLogger::ScopedTiming timing("Fixup imt", &logger);
- image_header.VisitPackedImTables(fixup_adapter, target_base, pointer_size);
+ image_header.VisitPackedImTables(fixup_adapter, target_base, kPointerSize);
}
{
TimingLogger::ScopedTiming timing("Fixup conflict tables", &logger);
- image_header.VisitPackedImtConflictTables(fixup_adapter, target_base, pointer_size);
+ image_header.VisitPackedImtConflictTables(fixup_adapter, target_base, kPointerSize);
}
// In the app image case, the image methods are actually in the boot image.
image_header.RelocateImageMethods(boot_image.Delta());
- const auto& class_table_section = image_header.GetClassTableSection();
- if (class_table_section.Size() > 0u) {
- // Note that we require that ReadFromMemory does not make an internal copy of the elements.
- // This also relies on visit roots not doing any verification which could fail after we update
- // the roots to be the image addresses.
- ScopedObjectAccess soa(Thread::Current());
- WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- ClassTable temp_table;
- temp_table.ReadFromMemory(target_base + class_table_section.Offset());
- FixupRootVisitor root_visitor(boot_image, app_image, app_oat);
- temp_table.VisitRoots(root_visitor);
- }
// Fix up the intern table.
const auto& intern_table_section = image_header.GetInternedStringsSection();
if (intern_table_section.Size() > 0u) {
@@ -1654,8 +1567,10 @@
*error_msg = StringPrintf("Cannot read header of %s", filename.c_str());
return false;
}
- if (system_hdr.GetComponentCount() != boot_class_path_.size()) {
- *error_msg = StringPrintf("Unexpected component count in %s, received %u, expected %zu",
+ if (system_hdr.GetComponentCount() == 0u ||
+ system_hdr.GetComponentCount() > boot_class_path_.size()) {
+ *error_msg = StringPrintf("Unexpected component count in %s, received %u, "
+ "expected non-zero and <= %zu",
filename.c_str(),
system_hdr.GetComponentCount(),
boot_class_path_.size());
@@ -1672,10 +1587,12 @@
return false;
}
+ ArrayRef<const std::string> provided_locations(boot_class_path_locations_.data(),
+ system_hdr.GetComponentCount());
std::vector<std::string> locations =
- ExpandMultiImageLocations(boot_class_path_locations_, image_location_);
+ ExpandMultiImageLocations(provided_locations, image_location_);
std::vector<std::string> filenames =
- ExpandMultiImageLocations(boot_class_path_locations_, filename);
+ ExpandMultiImageLocations(provided_locations, filename);
DCHECK_EQ(locations.size(), filenames.size());
std::vector<std::unique_ptr<ImageSpace>> spaces;
spaces.reserve(locations.size());
@@ -1694,7 +1611,7 @@
}
for (size_t i = 0u, size = spaces.size(); i != size; ++i) {
std::string expected_boot_class_path =
- (i == 0u) ? android::base::Join(boot_class_path_locations_, ':') : std::string();
+ (i == 0u) ? android::base::Join(provided_locations, ':') : std::string();
if (!OpenOatFile(spaces[i].get(),
boot_class_path_[i],
expected_boot_class_path,
@@ -1766,22 +1683,6 @@
BitMemoryRegion visited_objects_;
};
- template <typename ReferenceVisitor>
- class ClassTableVisitor final {
- public:
- explicit ClassTableVisitor(const ReferenceVisitor& reference_visitor)
- : reference_visitor_(reference_visitor) {}
-
- void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(root->AsMirrorPtr() != nullptr);
- root->Assign(reference_visitor_(root->AsMirrorPtr()));
- }
-
- private:
- ReferenceVisitor reference_visitor_;
- };
-
template <PointerSize kPointerSize>
static void DoRelocateSpaces(const std::vector<std::unique_ptr<ImageSpace>>& spaces,
uint32_t diff) REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -2444,9 +2345,113 @@
return true;
}
+std::string ImageSpace::GetBootClassPathChecksums(const std::vector<std::string>& boot_class_path,
+ const std::string& image_location,
+ InstructionSet image_isa,
+ /*out*/std::string* error_msg) {
+ std::string system_filename;
+ bool has_system = false;
+ std::string cache_filename;
+ bool has_cache = false;
+ bool dalvik_cache_exists = false;
+ bool is_global_cache = false;
+ if (!FindImageFilename(image_location.c_str(),
+ image_isa,
+ &system_filename,
+ &has_system,
+ &cache_filename,
+ &dalvik_cache_exists,
+ &has_cache,
+ &is_global_cache)) {
+ *error_msg = StringPrintf("Unable to find image file for %s and %s",
+ image_location.c_str(),
+ GetInstructionSetString(image_isa));
+ return std::string();
+ }
+
+ DCHECK(has_system || has_cache);
+ const std::string& filename = has_system ? system_filename : cache_filename;
+ std::unique_ptr<ImageHeader> header = ReadSpecificImageHeader(filename.c_str(), error_msg);
+ if (header == nullptr) {
+ return std::string();
+ }
+ if (header->GetComponentCount() == 0u || header->GetComponentCount() > boot_class_path.size()) {
+ *error_msg = StringPrintf("Unexpected component count in %s, received %u, "
+ "expected non-zero and <= %zu",
+ filename.c_str(),
+ header->GetComponentCount(),
+ boot_class_path.size());
+ return std::string();
+ }
+
+ std::string boot_image_checksum =
+ StringPrintf("i;%d/%08x", header->GetComponentCount(), header->GetImageChecksum());
+ ArrayRef<const std::string> boot_class_path_tail =
+ ArrayRef<const std::string>(boot_class_path).SubArray(header->GetComponentCount());
+ for (const std::string& bcp_filename : boot_class_path_tail) {
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ const ArtDexFileLoader dex_file_loader;
+ if (!dex_file_loader.Open(bcp_filename.c_str(),
+ bcp_filename, // The location does not matter here.
+ /*verify=*/ false,
+ /*verify_checksum=*/ false,
+ error_msg,
+ &dex_files)) {
+ return std::string();
+ }
+ DCHECK(!dex_files.empty());
+ StringAppendF(&boot_image_checksum, ":d");
+ for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
+ StringAppendF(&boot_image_checksum, "/%08x", dex_file->GetLocationChecksum());
+ }
+ }
+ return boot_image_checksum;
+}
+
+std::string ImageSpace::GetBootClassPathChecksums(
+ const std::vector<ImageSpace*>& image_spaces,
+ const std::vector<const DexFile*>& boot_class_path) {
+ DCHECK(!image_spaces.empty());
+ const ImageHeader& primary_header = image_spaces.front()->GetImageHeader();
+ uint32_t component_count = primary_header.GetComponentCount();
+ DCHECK_EQ(component_count, image_spaces.size());
+ std::string boot_image_checksum =
+ StringPrintf("i;%d/%08x", component_count, primary_header.GetImageChecksum());
+ size_t pos = 0u;
+ for (const ImageSpace* space : image_spaces) {
+ size_t num_dex_files = space->oat_file_non_owned_->GetOatDexFiles().size();
+ if (kIsDebugBuild) {
+ CHECK_NE(num_dex_files, 0u);
+ CHECK_LE(space->oat_file_non_owned_->GetOatDexFiles().size(), boot_class_path.size() - pos);
+ for (size_t i = 0; i != num_dex_files; ++i) {
+ CHECK_EQ(space->oat_file_non_owned_->GetOatDexFiles()[i]->GetDexFileLocation(),
+ boot_class_path[pos + i]->GetLocation());
+ }
+ }
+ pos += num_dex_files;
+ }
+ ArrayRef<const DexFile* const> boot_class_path_tail =
+ ArrayRef<const DexFile* const>(boot_class_path).SubArray(pos);
+ DCHECK(boot_class_path_tail.empty() ||
+ !DexFileLoader::IsMultiDexLocation(boot_class_path_tail.front()->GetLocation().c_str()));
+ for (const DexFile* dex_file : boot_class_path_tail) {
+ if (!DexFileLoader::IsMultiDexLocation(dex_file->GetLocation().c_str())) {
+ StringAppendF(&boot_image_checksum, ":d");
+ }
+ StringAppendF(&boot_image_checksum, "/%08x", dex_file->GetLocationChecksum());
+ }
+ return boot_image_checksum;
+}
+
std::vector<std::string> ImageSpace::ExpandMultiImageLocations(
const std::vector<std::string>& dex_locations,
const std::string& image_location) {
+ return ExpandMultiImageLocations(ArrayRef<const std::string>(dex_locations), image_location);
+}
+
+std::vector<std::string> ImageSpace::ExpandMultiImageLocations(
+ ArrayRef<const std::string> dex_locations,
+ const std::string& image_location) {
DCHECK(!dex_locations.empty());
// Find the path.
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index dbc12d1..14e364a 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -24,6 +24,8 @@
namespace art {
+template <typename T> class ArrayRef;
+class DexFile;
class OatFile;
namespace gc {
@@ -124,6 +126,19 @@
bool* has_data,
bool *is_global_cache);
+ // Returns the checksums for the boot image and extra boot class path dex files,
+ // based on the boot class path, image location and ISA (may differ from the ISA of an
+ // initialized Runtime). The boot image and dex files do not need to be loaded in memory.
+ static std::string GetBootClassPathChecksums(const std::vector<std::string>& boot_class_path,
+ const std::string& image_location,
+ InstructionSet image_isa,
+ /*out*/std::string* error_msg);
+
+ // Returns the checksums for the boot image and extra boot class path dex files,
+ // based on the boot image and boot class path dex files loaded in memory.
+ static std::string GetBootClassPathChecksums(const std::vector<ImageSpace*>& image_spaces,
+ const std::vector<const DexFile*>& boot_class_path);
+
// Expand a single image location to multi-image locations based on the dex locations.
static std::vector<std::string> ExpandMultiImageLocations(
const std::vector<std::string>& dex_locations,
@@ -188,7 +203,14 @@
friend class Space;
private:
+ // Internal overload that takes ArrayRef<> instead of vector<>.
+ static std::vector<std::string> ExpandMultiImageLocations(
+ ArrayRef<const std::string> dex_locations,
+ const std::string& image_location);
+
class BootImageLoader;
+ template <typename ReferenceVisitor>
+ class ClassTableVisitor;
class Loader;
template <typename PatchObjectVisitor>
class PatchArtFieldVisitor;
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 5ff1270..dbec4ea 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -205,9 +205,10 @@
continue;
}
if (r->IsLarge()) {
- // Avoid visiting dead large objects since they may contain dangling pointers to the
- // from-space.
- DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object";
+ // We may visit a large object with live_bytes = 0 here. However, it is
+ // safe as it cannot contain dangling pointers because corresponding regions
+ // (and regions corresponding to dead referents) cannot be allocated for new
+ // allocations without first clearing regions' live_bytes and state.
mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
DCHECK(obj->GetClass() != nullptr);
visitor(obj);
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 21cae93..98b140e 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -319,6 +319,7 @@
state == RegionState::kRegionStateLarge) &&
type == RegionType::kRegionTypeToSpace);
bool should_evacuate = r->ShouldBeEvacuated(evac_mode);
+ bool is_newly_allocated = r->IsNewlyAllocated();
if (should_evacuate) {
r->SetAsFromSpace();
DCHECK(r->IsInFromSpace());
@@ -329,6 +330,17 @@
if (UNLIKELY(state == RegionState::kRegionStateLarge &&
type == RegionType::kRegionTypeToSpace)) {
prev_large_evacuated = should_evacuate;
+ // In 2-phase full heap GC, this function is called after marking is
+ // done. So, it is possible that some newly allocated large object is
+ // marked but its live_bytes is still -1. We need to clear the
+ // mark-bit otherwise the live_bytes will not be updated in
+ // ConcurrentCopying::ProcessMarkStackRef() and hence will break the
+ // logic.
+ if (kEnableGenerationalConcurrentCopyingCollection
+ && !should_evacuate
+ && is_newly_allocated) {
+ GetMarkBitmap()->Clear(reinterpret_cast<mirror::Object*>(r->Begin()));
+ }
num_expected_large_tails = RoundUp(r->BytesAllocated(), kRegionSize) / kRegionSize - 1;
DCHECK_GT(num_expected_large_tails, 0U);
}
@@ -367,7 +379,8 @@
}
void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes,
- /* out */ uint64_t* cleared_objects) {
+ /* out */ uint64_t* cleared_objects,
+ const bool clear_bitmap) {
DCHECK(cleared_bytes != nullptr);
DCHECK(cleared_objects != nullptr);
*cleared_bytes = 0;
@@ -395,13 +408,18 @@
// (see b/62194020).
uint8_t* clear_block_begin = nullptr;
uint8_t* clear_block_end = nullptr;
- auto clear_region = [&clear_block_begin, &clear_block_end](Region* r) {
+ auto clear_region = [this, &clear_block_begin, &clear_block_end, clear_bitmap](Region* r) {
r->Clear(/*zero_and_release_pages=*/false);
if (clear_block_end != r->Begin()) {
// Region `r` is not adjacent to the current clear block; zero and release
// pages within the current block and restart a new clear block at the
// beginning of region `r`.
ZeroAndProtectRegion(clear_block_begin, clear_block_end);
+ if (clear_bitmap) {
+ GetLiveBitmap()->ClearRange(
+ reinterpret_cast<mirror::Object*>(clear_block_begin),
+ reinterpret_cast<mirror::Object*>(clear_block_end));
+ }
clear_block_begin = r->Begin();
}
// Add region `r` to the clear block.
@@ -426,20 +444,23 @@
// It is also better to clear these regions now instead of at the end of the next GC to
// save RAM. If we don't clear the regions here, they will be cleared next GC by the normal
// live percent evacuation logic.
+ *cleared_bytes += r->BytesAllocated();
+ *cleared_objects += r->ObjectsAllocated();
+ clear_region(r);
size_t free_regions = 1;
// Also release RAM for large tails.
while (i + free_regions < num_regions_ && regions_[i + free_regions].IsLargeTail()) {
- DCHECK(r->IsLarge());
clear_region(®ions_[i + free_regions]);
++free_regions;
}
- *cleared_bytes += r->BytesAllocated();
- *cleared_objects += r->ObjectsAllocated();
num_non_free_regions_ -= free_regions;
- clear_region(r);
- GetLiveBitmap()->ClearRange(
- reinterpret_cast<mirror::Object*>(r->Begin()),
- reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize));
+ // When clear_bitmap is true, this clearing of bitmap is taken care in
+ // clear_region().
+ if (!clear_bitmap) {
+ GetLiveBitmap()->ClearRange(
+ reinterpret_cast<mirror::Object*>(r->Begin()),
+ reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize));
+ }
continue;
}
r->SetUnevacFromSpaceAsToSpace();
@@ -519,6 +540,11 @@
}
// Clear pages for the last block since clearing happens when a new block opens.
ZeroAndReleasePages(clear_block_begin, clear_block_end - clear_block_begin);
+ if (clear_bitmap) {
+ GetLiveBitmap()->ClearRange(
+ reinterpret_cast<mirror::Object*>(clear_block_begin),
+ reinterpret_cast<mirror::Object*>(clear_block_end));
+ }
// Update non_free_region_index_limit_.
SetNonFreeRegionLimit(new_non_free_region_index_limit);
evac_region_ = nullptr;
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 8810f8c..0d5ebcc 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -228,6 +228,11 @@
return false;
}
+ bool IsRegionNewlyAllocated(size_t idx) const NO_THREAD_SAFETY_ANALYSIS {
+ DCHECK_LT(idx, num_regions_);
+ return regions_[idx].IsNewlyAllocated();
+ }
+
bool IsInNewlyAllocatedRegion(mirror::Object* ref) {
if (HasAddress(ref)) {
Region* r = RefToRegionUnlocked(ref);
@@ -291,7 +296,9 @@
size_t FromSpaceSize() REQUIRES(!region_lock_);
size_t UnevacFromSpaceSize() REQUIRES(!region_lock_);
size_t ToSpaceSize() REQUIRES(!region_lock_);
- void ClearFromSpace(/* out */ uint64_t* cleared_bytes, /* out */ uint64_t* cleared_objects)
+ void ClearFromSpace(/* out */ uint64_t* cleared_bytes,
+ /* out */ uint64_t* cleared_objects,
+ const bool clear_bitmap)
REQUIRES(!region_lock_);
void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
@@ -310,6 +317,40 @@
}
}
+ void SetAllRegionLiveBytesZero() REQUIRES(!region_lock_) {
+ MutexLock mu(Thread::Current(), region_lock_);
+ const size_t iter_limit = kUseTableLookupReadBarrier
+ ? num_regions_
+ : std::min(num_regions_, non_free_region_index_limit_);
+ for (size_t i = 0; i < iter_limit; ++i) {
+ Region* r = ®ions_[i];
+ // Newly allocated regions don't need up-to-date live_bytes_ for deciding
+ // whether to be evacuated or not. See Region::ShouldBeEvacuated().
+ if (!r->IsFree() && !r->IsNewlyAllocated()) {
+ r->ZeroLiveBytes();
+ }
+ }
+ }
+
+ size_t RegionIdxForRefUnchecked(mirror::Object* ref) const NO_THREAD_SAFETY_ANALYSIS {
+ DCHECK(HasAddress(ref));
+ uintptr_t offset = reinterpret_cast<uintptr_t>(ref) - reinterpret_cast<uintptr_t>(Begin());
+ size_t reg_idx = offset / kRegionSize;
+ DCHECK_LT(reg_idx, num_regions_);
+ Region* reg = ®ions_[reg_idx];
+ DCHECK_EQ(reg->Idx(), reg_idx);
+ DCHECK(reg->Contains(ref));
+ return reg_idx;
+ }
+ // Return -1 as region index for references outside this region space.
+ size_t RegionIdxForRef(mirror::Object* ref) const NO_THREAD_SAFETY_ANALYSIS {
+ if (HasAddress(ref)) {
+ return RegionIdxForRefUnchecked(ref);
+ } else {
+ return static_cast<size_t>(-1);
+ }
+ }
+
void RecordAlloc(mirror::Object* ref) REQUIRES(!region_lock_);
bool AllocNewTlab(Thread* self, size_t min_bytes) REQUIRES(!region_lock_);
@@ -515,11 +556,10 @@
ALWAYS_INLINE bool ShouldBeEvacuated(EvacMode evac_mode);
void AddLiveBytes(size_t live_bytes) {
- DCHECK(IsInUnevacFromSpace());
+ DCHECK(kEnableGenerationalConcurrentCopyingCollection || IsInUnevacFromSpace());
DCHECK(!IsLargeTail());
DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
- // For large allocations, we always consider all bytes in the
- // regions live.
+ // For large allocations, we always consider all bytes in the regions live.
live_bytes_ += IsLarge() ? Top() - begin_ : live_bytes;
DCHECK_LE(live_bytes_, BytesAllocated());
}
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index d004d64..2e41a9d 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -366,6 +366,13 @@
}
} else {
// Enter the "with access check" interpreter.
+
+ // The boot classpath should really not have to run access checks.
+ DCHECK(method->GetDeclaringClass()->GetClassLoader() != nullptr
+ || Runtime::Current()->IsVerificationSoftFail()
+ || Runtime::Current()->IsAotCompiler())
+ << method->PrettyMethod();
+
if (kInterpreterImplKind == kMterpImplKind) {
// No access check variants for Mterp. Just use the switch version.
if (transaction_active) {
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index e43d771..03c97f4 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -58,7 +58,7 @@
void* Jit::jit_compiler_handle_ = nullptr;
void* (*Jit::jit_load_)(void) = nullptr;
void (*Jit::jit_unload_)(void*) = nullptr;
-bool (*Jit::jit_compile_method_)(void*, ArtMethod*, Thread*, bool) = nullptr;
+bool (*Jit::jit_compile_method_)(void*, ArtMethod*, Thread*, bool, bool) = nullptr;
void (*Jit::jit_types_loaded_)(void*, mirror::Class**, size_t count) = nullptr;
bool (*Jit::jit_generate_debug_info_)(void*) = nullptr;
void (*Jit::jit_update_options_)(void*) = nullptr;
@@ -242,7 +242,7 @@
return true;
}
-bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool osr) {
+bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool baseline, bool osr) {
DCHECK(Runtime::Current()->UseJitCompilation());
DCHECK(!method->IsRuntimeMethod());
@@ -272,7 +272,7 @@
VLOG(jit) << "Compiling method "
<< ArtMethod::PrettyMethod(method_to_compile)
<< " osr=" << std::boolalpha << osr;
- bool success = jit_compile_method_(jit_compiler_handle_, method_to_compile, self, osr);
+ bool success = jit_compile_method_(jit_compiler_handle_, method_to_compile, self, baseline, osr);
code_cache_->DoneCompiling(method_to_compile, self, osr);
if (!success) {
VLOG(jit) << "Failed to compile method "
@@ -291,6 +291,12 @@
return success;
}
+void Jit::WaitForWorkersToBeCreated() {
+ if (thread_pool_ != nullptr) {
+ thread_pool_->WaitForWorkersToBeCreated();
+ }
+}
+
void Jit::DeleteThreadPool() {
Thread* self = Thread::Current();
DCHECK(Runtime::Current()->IsShuttingDown(self));
@@ -549,6 +555,7 @@
enum class TaskKind {
kAllocateProfile,
kCompile,
+ kCompileBaseline,
kCompileOsr,
};
@@ -568,10 +575,12 @@
ScopedObjectAccess soa(self);
switch (kind_) {
case TaskKind::kCompile:
+ case TaskKind::kCompileBaseline:
case TaskKind::kCompileOsr: {
Runtime::Current()->GetJit()->CompileMethod(
method_,
self,
+ /* baseline= */ (kind_ == TaskKind::kCompileBaseline),
/* osr= */ (kind_ == TaskKind::kCompileOsr));
break;
}
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 7ce5f07..e5c9766 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -161,7 +161,7 @@
// Create JIT itself.
static Jit* Create(JitCodeCache* code_cache, JitOptions* options);
- bool CompileMethod(ArtMethod* method, Thread* self, bool osr)
+ bool CompileMethod(ArtMethod* method, Thread* self, bool baseline, bool osr)
REQUIRES_SHARED(Locks::mutator_lock_);
const JitCodeCache* GetCodeCache() const {
@@ -174,6 +174,7 @@
void CreateThreadPool();
void DeleteThreadPool();
+ void WaitForWorkersToBeCreated();
// Dump interesting info: #methods compiled, code vs data size, compile / verify cumulative
// loggers.
@@ -304,7 +305,7 @@
static void* jit_compiler_handle_;
static void* (*jit_load_)(void);
static void (*jit_unload_)(void*);
- static bool (*jit_compile_method_)(void*, ArtMethod*, Thread*, bool);
+ static bool (*jit_compile_method_)(void*, ArtMethod*, Thread*, bool, bool);
static void (*jit_types_loaded_)(void*, mirror::Class**, size_t count);
static void (*jit_update_options_)(void*);
static bool (*jit_generate_debug_info_)(void*);
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 185ae3b..679ca43 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -871,6 +871,9 @@
}
inline void Class::SetAccessFlags(uint32_t new_access_flags) {
+ if (kIsDebugBuild) {
+ SetAccessFlagsDCheck(new_access_flags);
+ }
// Called inside a transaction when setting pre-verified flag during boot image compilation.
if (Runtime::Current()->IsActiveTransaction()) {
SetField32<true>(AccessFlagsOffset(), new_access_flags);
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 83d76a9..c5ed1bf 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -206,6 +206,10 @@
}
}
+ if (kIsDebugBuild && new_status >= ClassStatus::kInitialized) {
+ CHECK(h_this->WasVerificationAttempted()) << h_this->PrettyClassAndClassLoader();
+ }
+
if (!class_linker_initialized) {
// When the class linker is being initialized its single threaded and by definition there can be
// no waiters. During initialization classes may appear temporary but won't be retired as their
@@ -1461,5 +1465,12 @@
template void Class::GetAccessFlagsDCheck<kVerifyWrites>();
template void Class::GetAccessFlagsDCheck<kVerifyAll>();
+void Class::SetAccessFlagsDCheck(uint32_t new_access_flags) {
+ uint32_t old_access_flags = GetField32<kVerifyNone>(AccessFlagsOffset());
+ // kAccVerificationAttempted is retained.
+ CHECK((old_access_flags & kAccVerificationAttempted) == 0 ||
+ (new_access_flags & kAccVerificationAttempted) != 0);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 66b1405..d5aa514 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -1306,6 +1306,8 @@
template<VerifyObjectFlags kVerifyFlags>
void GetAccessFlagsDCheck() REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetAccessFlagsDCheck(uint32_t new_access_flags) REQUIRES_SHARED(Locks::mutator_lock_);
+
// Check that the pointer size matches the one in the class linker.
ALWAYS_INLINE static void CheckPointerSize(PointerSize pointer_size);
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 3e5003c..892d4cc 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -271,7 +271,7 @@
#endif
}
-static void VMRuntime_registerNativeAllocation(JNIEnv* env, jobject, jint bytes) {
+static void VMRuntime_registerNativeAllocationInternal(JNIEnv* env, jobject, jint bytes) {
if (UNLIKELY(bytes < 0)) {
ScopedObjectAccess soa(env);
ThrowRuntimeException("allocation size negative %d", bytes);
@@ -280,11 +280,7 @@
Runtime::Current()->GetHeap()->RegisterNativeAllocation(env, static_cast<size_t>(bytes));
}
-static void VMRuntime_registerSensitiveThread(JNIEnv*, jobject) {
- Runtime::Current()->RegisterSensitiveThread();
-}
-
-static void VMRuntime_registerNativeFree(JNIEnv* env, jobject, jint bytes) {
+static void VMRuntime_registerNativeFreeInternal(JNIEnv* env, jobject, jint bytes) {
if (UNLIKELY(bytes < 0)) {
ScopedObjectAccess soa(env);
ThrowRuntimeException("allocation size negative %d", bytes);
@@ -293,6 +289,18 @@
Runtime::Current()->GetHeap()->RegisterNativeFree(env, static_cast<size_t>(bytes));
}
+static jint VMRuntime_getNotifyNativeInterval(JNIEnv*, jclass) {
+ return Runtime::Current()->GetHeap()->GetNotifyNativeInterval();
+}
+
+static void VMRuntime_notifyNativeAllocationsInternal(JNIEnv* env, jobject) {
+ Runtime::Current()->GetHeap()->NotifyNativeAllocations(env);
+}
+
+static void VMRuntime_registerSensitiveThread(JNIEnv*, jobject) {
+ Runtime::Current()->RegisterSensitiveThread();
+}
+
static void VMRuntime_updateProcessState(JNIEnv*, jobject, jint process_state) {
Runtime* runtime = Runtime::Current();
runtime->UpdateProcessState(static_cast<ProcessState>(process_state));
@@ -710,9 +718,11 @@
FAST_NATIVE_METHOD(VMRuntime, newUnpaddedArray, "(Ljava/lang/Class;I)Ljava/lang/Object;"),
NATIVE_METHOD(VMRuntime, properties, "()[Ljava/lang/String;"),
NATIVE_METHOD(VMRuntime, setTargetSdkVersionNative, "(I)V"),
- NATIVE_METHOD(VMRuntime, registerNativeAllocation, "(I)V"),
+ NATIVE_METHOD(VMRuntime, registerNativeAllocationInternal, "(I)V"),
+ NATIVE_METHOD(VMRuntime, registerNativeFreeInternal, "(I)V"),
+ NATIVE_METHOD(VMRuntime, getNotifyNativeInterval, "()I"),
+ NATIVE_METHOD(VMRuntime, notifyNativeAllocationsInternal, "()V"),
NATIVE_METHOD(VMRuntime, registerSensitiveThread, "()V"),
- NATIVE_METHOD(VMRuntime, registerNativeFree, "(I)V"),
NATIVE_METHOD(VMRuntime, requestConcurrentGC, "()V"),
NATIVE_METHOD(VMRuntime, requestHeapTrim, "()V"),
NATIVE_METHOD(VMRuntime, runHeapTasks, "()V"),
diff --git a/runtime/oat.cc b/runtime/oat.cc
index e931b28..d7c968f 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -79,8 +79,7 @@
quick_generic_jni_trampoline_offset_(0),
quick_imt_conflict_trampoline_offset_(0),
quick_resolution_trampoline_offset_(0),
- quick_to_interpreter_bridge_offset_(0),
- boot_image_checksum_(0) {
+ quick_to_interpreter_bridge_offset_(0) {
// Don't want asserts in header as they would be checked in each file that includes it. But the
// fields are private, so we check inside a method.
static_assert(sizeof(magic_) == sizeof(kOatMagic),
@@ -316,16 +315,6 @@
quick_to_interpreter_bridge_offset_ = offset;
}
-uint32_t OatHeader::GetBootImageChecksum() const {
- CHECK(IsValid());
- return boot_image_checksum_;
-}
-
-void OatHeader::SetBootImageChecksum(uint32_t boot_image_checksum) {
- CHECK(IsValid());
- boot_image_checksum_ = boot_image_checksum;
-}
-
uint32_t OatHeader::GetKeyValueStoreSize() const {
CHECK(IsValid());
return key_value_store_size_;
diff --git a/runtime/oat.h b/runtime/oat.h
index b09c81e..ded1489 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -31,8 +31,8 @@
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- // Last oat version changed reason: Pass boot class path to LoadBootImage.
- static constexpr uint8_t kOatVersion[] = { '1', '6', '5', '\0' };
+ // Last oat version changed reason: Partial boot image.
+ static constexpr uint8_t kOatVersion[] = { '1', '6', '6', '\0' };
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
static constexpr const char* kDebuggableKey = "debuggable";
@@ -40,6 +40,7 @@
static constexpr const char* kCompilerFilter = "compiler-filter";
static constexpr const char* kClassPathKey = "classpath";
static constexpr const char* kBootClassPathKey = "bootclasspath";
+ static constexpr const char* kBootClassPathChecksumsKey = "bootclasspath-checksums";
static constexpr const char* kConcurrentCopying = "concurrent-copying";
static constexpr const char* kCompilationReasonKey = "compilation-reason";
@@ -93,9 +94,6 @@
InstructionSet GetInstructionSet() const;
uint32_t GetInstructionSetFeaturesBitmap() const;
- uint32_t GetBootImageChecksum() const;
- void SetBootImageChecksum(uint32_t boot_image_checksum);
-
uint32_t GetKeyValueStoreSize() const;
const uint8_t* GetKeyValueStore() const;
const char* GetStoreValueByKey(const char* key) const;
@@ -137,8 +135,6 @@
uint32_t quick_resolution_trampoline_offset_;
uint32_t quick_to_interpreter_bridge_offset_;
- uint32_t boot_image_checksum_;
-
uint32_t key_value_store_size_;
uint8_t key_value_store_[0]; // note variable width data at end
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 6f32b98..8b81bb9 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -419,7 +419,7 @@
// starts up.
LOG(WARNING) << "Dex location " << dex_location_ << " does not seem to include dex file. "
<< "Allow oat file use. This is potentially dangerous.";
- } else if (file.GetOatHeader().GetBootImageChecksum() != image_info->boot_image_checksum) {
+ } else if (!image_info->ValidateBootClassPathChecksums(file)) {
VLOG(oat) << "Oat image checksum does not match image checksum.";
return kOatBootImageOutOfDate;
}
@@ -560,6 +560,13 @@
return required_dex_checksums_found_ ? &cached_required_dex_checksums_ : nullptr;
}
+bool OatFileAssistant::ImageInfo::ValidateBootClassPathChecksums(const OatFile& oat_file) const {
+ const char* oat_boot_class_path_checksums =
+ oat_file.GetOatHeader().GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey);
+ return oat_boot_class_path_checksums != nullptr &&
+ oat_boot_class_path_checksums == boot_class_path_checksums;
+}
+
std::unique_ptr<OatFileAssistant::ImageInfo>
OatFileAssistant::ImageInfo::GetRuntimeImageInfo(InstructionSet isa, std::string* error_msg) {
CHECK(error_msg != nullptr);
@@ -567,14 +574,11 @@
Runtime* runtime = Runtime::Current();
std::unique_ptr<ImageInfo> info(new ImageInfo());
info->location = runtime->GetImageLocation();
-
- std::unique_ptr<ImageHeader> image_header(
- gc::space::ImageSpace::ReadImageHeader(info->location.c_str(), isa, error_msg));
- if (image_header == nullptr) {
+ info->boot_class_path_checksums = gc::space::ImageSpace::GetBootClassPathChecksums(
+ runtime->GetBootClassPath(), info->location, isa, error_msg);
+ if (info->boot_class_path_checksums.empty()) {
return nullptr;
}
-
- info->boot_image_checksum = image_header->GetImageChecksum();
return info;
}
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 09c9d3b..def55b8 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -246,8 +246,10 @@
private:
struct ImageInfo {
- uint32_t boot_image_checksum = 0;
+ bool ValidateBootClassPathChecksums(const OatFile& oat_file) const;
+
std::string location;
+ std::string boot_class_path_checksums;
static std::unique_ptr<ImageInfo> GetRuntimeImageInfo(InstructionSet isa,
std::string* error_msg);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index d2c915e..55bc2ec 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -349,6 +349,9 @@
}
if (jit_ != nullptr) {
+ // Wait for the workers to be created since there can't be any threads attaching during
+ // shutdown.
+ jit_->WaitForWorkersToBeCreated();
// Stop the profile saver thread before marking the runtime as shutting down.
// The saver will try to dump the profiles before being sopped and that
// requires holding the mutator lock.
@@ -971,8 +974,8 @@
VLOG(startup) << "Runtime::StartDaemonThreads exiting";
}
-static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames,
- const std::vector<std::string>& dex_locations,
+static size_t OpenDexFiles(ArrayRef<const std::string> dex_filenames,
+ ArrayRef<const std::string> dex_locations,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
DCHECK(dex_files != nullptr) << "OpenDexFiles: out-param is nullptr";
size_t failure_count = 0;
@@ -1429,6 +1432,21 @@
GetInternTable()->AddImageStringsToTable(image_space, VoidFunctor());
}
}
+ if (heap_->GetBootImageSpaces().size() != GetBootClassPath().size()) {
+ // The boot image did not contain all boot class path components. Load the rest.
+ DCHECK_LT(heap_->GetBootImageSpaces().size(), GetBootClassPath().size());
+ size_t start = heap_->GetBootImageSpaces().size();
+ DCHECK_LT(start, GetBootClassPath().size());
+ std::vector<std::unique_ptr<const DexFile>> extra_boot_class_path;
+ if (runtime_options.Exists(Opt::BootClassPathDexList)) {
+ extra_boot_class_path.swap(*runtime_options.GetOrDefault(Opt::BootClassPathDexList));
+ } else {
+ OpenDexFiles(ArrayRef<const std::string>(GetBootClassPath()).SubArray(start),
+ ArrayRef<const std::string>(GetBootClassPathLocations()).SubArray(start),
+ &extra_boot_class_path);
+ }
+ class_linker_->AddExtraBootDexFiles(self, std::move(extra_boot_class_path));
+ }
if (IsJavaDebuggable()) {
// Now that we have loaded the boot image, deoptimize its methods if we are running
// debuggable, as the code may have been compiled non-debuggable.
@@ -1439,7 +1457,9 @@
if (runtime_options.Exists(Opt::BootClassPathDexList)) {
boot_class_path.swap(*runtime_options.GetOrDefault(Opt::BootClassPathDexList));
} else {
- OpenDexFiles(GetBootClassPath(), GetBootClassPathLocations(), &boot_class_path);
+ OpenDexFiles(ArrayRef<const std::string>(GetBootClassPath()),
+ ArrayRef<const std::string>(GetBootClassPathLocations()),
+ &boot_class_path);
}
if (!class_linker_->InitWithoutImage(std::move(boot_class_path), &error_msg)) {
LOG(ERROR) << "Could not initialize without image: " << error_msg;
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index de698c2..e1c756d 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -1,3 +1,4 @@
+
/*
* Copyright (C) 2012 The Android Open Source Project
*
@@ -86,7 +87,7 @@
void ThreadPoolWorker::Run() {
Thread* self = Thread::Current();
Task* task = nullptr;
- thread_pool_->creation_barier_.Wait(self);
+ thread_pool_->creation_barier_.Pass(self);
while ((task = thread_pool_->GetTask(self)) != nullptr) {
task->Run(self);
task->Finalize();
@@ -150,7 +151,7 @@
MutexLock mu(self, task_queue_lock_);
shutting_down_ = false;
// Add one since the caller of constructor waits on the barrier too.
- creation_barier_.Init(self, max_active_workers_ + 1);
+ creation_barier_.Init(self, max_active_workers_);
while (GetThreadCount() < max_active_workers_) {
const std::string worker_name = StringPrintf("%s worker thread %zu", name_.c_str(),
GetThreadCount());
@@ -158,8 +159,16 @@
new ThreadPoolWorker(this, worker_name, worker_stack_size_));
}
}
- // Wait for all of the threads to attach.
- creation_barier_.Wait(Thread::Current());
+}
+
+void ThreadPool::WaitForWorkersToBeCreated() {
+ creation_barier_.Increment(Thread::Current(), 0);
+}
+
+const std::vector<ThreadPoolWorker*>& ThreadPool::GetWorkers() {
+ // Wait for all the workers to be created before returning them.
+ WaitForWorkersToBeCreated();
+ return threads_;
}
void ThreadPool::DeleteThreads() {
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index f55d72e..0a2a50c 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -101,9 +101,7 @@
return threads_.size();
}
- const std::vector<ThreadPoolWorker*>& GetWorkers() const {
- return threads_;
- }
+ const std::vector<ThreadPoolWorker*>& GetWorkers();
// Broadcast to the workers and tell them to empty out the work queue.
void StartWorkers(Thread* self) REQUIRES(!task_queue_lock_);
@@ -154,6 +152,9 @@
// Set the "nice" priorty for threads in the pool.
void SetPthreadPriority(int priority);
+ // Wait for workers to be created.
+ void WaitForWorkersToBeCreated();
+
protected:
// get a task to run, blocks if there are no tasks left
virtual Task* GetTask(Thread* self) REQUIRES(!task_queue_lock_);
diff --git a/test/175-alloc-big-bignums/expected.txt b/test/175-alloc-big-bignums/expected.txt
new file mode 100644
index 0000000..f75da10
--- /dev/null
+++ b/test/175-alloc-big-bignums/expected.txt
@@ -0,0 +1 @@
+Test complete
diff --git a/test/175-alloc-big-bignums/info.txt b/test/175-alloc-big-bignums/info.txt
new file mode 100644
index 0000000..8f6bcc3
--- /dev/null
+++ b/test/175-alloc-big-bignums/info.txt
@@ -0,0 +1,11 @@
+Allocate large numbers of huge BigIntegers in rapid succession. Most of the
+associated memory will be in the C++ heap. This makes sure that we trigger
+the garbage collector often enough to prevent us from running out of memory.
+
+The test allocates roughly 10GB of native memory, approximately 1MB of which
+will be live at any point. Basically all native memory deallocation is
+triggered by Java garbage collection.
+
+This test is a lot nastier than it looks. In particular, failure on target tends
+to exhaust device memory, and kill off all processes on the device, including the
+adb daemon :-( .
diff --git a/test/175-alloc-big-bignums/src/Main.java b/test/175-alloc-big-bignums/src/Main.java
new file mode 100644
index 0000000..5fbeb46
--- /dev/null
+++ b/test/175-alloc-big-bignums/src/Main.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.math.BigInteger;
+
+// This is motivated by the assumption that BigInteger allocates malloc memory
+// underneath. That's true (in 2018) on Android.
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ final int nIters = 20_000; // Presumed < 1_000_000.
+ final BigInteger big2_20 = BigInteger.valueOf(1024*1024); // 2^20
+ BigInteger huge = BigInteger.valueOf(1).shiftLeft(4_000_000); // ~0.5MB
+ for (int i = 0; i < nIters; ++i) { // 10 GB total
+ huge = huge.add(BigInteger.ONE);
+ }
+ if (huge.bitLength() != 4_000_001) {
+ System.out.println("Wrong answer length: " + huge.bitLength());
+ } else if (huge.mod(big2_20).compareTo(BigInteger.valueOf(nIters)) != 0) {
+ System.out.println("Wrong answer: ..." + huge.mod(big2_20));
+ } else {
+ System.out.println("Test complete");
+ }
+ }
+}
diff --git a/test/1934-jvmti-signal-thread/signal_threads.cc b/test/1934-jvmti-signal-thread/signal_threads.cc
index 726a7a86..dfb08c1 100644
--- a/test/1934-jvmti-signal-thread/signal_threads.cc
+++ b/test/1934-jvmti-signal-thread/signal_threads.cc
@@ -47,19 +47,19 @@
jvmti_env,
jvmti_env->Allocate(sizeof(NativeMonitor),
reinterpret_cast<unsigned char**>(&mon)))) {
- return -1l;
+ return -1L;
}
if (JvmtiErrorToException(env,
jvmti_env,
jvmti_env->CreateRawMonitor("test-1934 start",
&mon->start_monitor))) {
- return -1l;
+ return -1L;
}
if (JvmtiErrorToException(env,
jvmti_env,
jvmti_env->CreateRawMonitor("test-1934 continue",
&mon->continue_monitor))) {
- return -1l;
+ return -1L;
}
mon->should_continue = false;
mon->should_start = false;
@@ -92,7 +92,7 @@
while (!mon->should_continue) {
if (JvmtiErrorToException(env,
jvmti_env,
- jvmti_env->RawMonitorWait(mon->continue_monitor, -1l))) {
+ jvmti_env->RawMonitorWait(mon->continue_monitor, -1L))) {
JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorExit(mon->continue_monitor));
return;
}
@@ -112,7 +112,7 @@
while (!mon->should_start) {
if (JvmtiErrorToException(env,
jvmti_env,
- jvmti_env->RawMonitorWait(mon->start_monitor, -1l))) {
+ jvmti_env->RawMonitorWait(mon->start_monitor, -1L))) {
return;
}
}
diff --git a/test/566-polymorphic-inlining/polymorphic_inline.cc b/test/566-polymorphic-inlining/polymorphic_inline.cc
index 17ccd9a..00827cf 100644
--- a/test/566-polymorphic-inlining/polymorphic_inline.cc
+++ b/test/566-polymorphic-inlining/polymorphic_inline.cc
@@ -46,7 +46,7 @@
usleep(1000);
}
// Will either ensure it's compiled or do the compilation itself.
- jit->CompileMethod(method, soa.Self(), /* osr */ false);
+ jit->CompileMethod(method, soa.Self(), /*baseline=*/ false, /*osr=*/ false);
}
CodeInfo info(header);
diff --git a/test/570-checker-osr/osr.cc b/test/570-checker-osr/osr.cc
index b2b3634..dc0e94c 100644
--- a/test/570-checker-osr/osr.cc
+++ b/test/570-checker-osr/osr.cc
@@ -128,7 +128,7 @@
// Sleep to yield to the compiler thread.
usleep(1000);
// Will either ensure it's compiled or do the compilation itself.
- jit->CompileMethod(m, Thread::Current(), /* osr */ true);
+ jit->CompileMethod(m, Thread::Current(), /*baseline=*/ false, /*osr=*/ true);
}
});
}
diff --git a/test/626-const-class-linking/clear_dex_cache_types.cc b/test/626-const-class-linking/clear_dex_cache_types.cc
index 52367c7..82c82c6 100644
--- a/test/626-const-class-linking/clear_dex_cache_types.cc
+++ b/test/626-const-class-linking/clear_dex_cache_types.cc
@@ -41,6 +41,7 @@
if (status == ClassStatus::kResolved) {
ObjectLock<mirror::Class> lock(soa.Self(), klass);
klass->SetStatus(klass, ClassStatus::kVerified, soa.Self());
+ klass->SetVerificationAttempted();
} else {
LOG(ERROR) << klass->PrettyClass() << " has unexpected status: " << status;
}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 5d07601..e3157ef 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -47,12 +47,8 @@
# Also need libopenjdkjvmti.
ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += libopenjdkjvmti-target libopenjdkjvmtid-target
-ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar
-ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar
-ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/core-simple-testdex.jar
-ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/conscrypt-testdex.jar
-ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/okhttp-testdex.jar
-ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/bouncycastle-testdex.jar
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += \
+ $(foreach jar,$(TARGET_TEST_CORE_JARS),$(TARGET_OUT_JAVA_LIBRARIES)/$(jar).jar)
# All tests require the host executables. The tests also depend on the core images, but on
# specific version depending on the compiler.
@@ -74,6 +70,7 @@
$(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$(ART_HOST_SHLIB_EXTENSION) \
$(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkjvmti$(ART_HOST_SHLIB_EXTENSION) \
$(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkjvmtid$(ART_HOST_SHLIB_EXTENSION) \
+ $(HOST_CORE_DEX_LOCATIONS) \
ifneq ($(HOST_PREFER_32_BIT),true)
ART_TEST_HOST_RUN_TEST_DEPENDENCIES += \
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index 65127fc..55631a9 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -227,7 +227,7 @@
// Make sure there is a profiling info, required by the compiler.
ProfilingInfo::Create(self, method, /* retry_allocation */ true);
// Will either ensure it's compiled or do the compilation itself.
- jit->CompileMethod(method, self, /* osr */ false);
+ jit->CompileMethod(method, self, /*baseline=*/ false, /*osr=*/ false);
}
}
}
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 16106ab..4c31ee5 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -548,7 +548,10 @@
exit
fi
-bpath_modules="core-oj core-libart core-simple conscrypt okhttp bouncycastle"
+# Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
+# because that's what we use for compiling the core.art image.
+# It may contain additional modules from TEST_CORE_JARS.
+bpath_modules="core-oj core-libart core-simple okhttp bouncycastle conscrypt"
if [ "${HOST}" = "y" ]; then
framework="${ANDROID_HOST_OUT}/framework"
if [ "${ANDROID_HOST_OUT:0:${#ANDROID_BUILD_TOP}+1}" = "${ANDROID_BUILD_TOP}/" ]; then
diff --git a/test/knownfailures.json b/test/knownfailures.json
index ae20557..5bcd1c0 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -447,19 +447,63 @@
},
{
"tests": [
- "137-cfi",
- "595-profile-saving",
- "900-hello-plugin",
- "909-attach-agent",
- "981-dedup-original-dex",
- "1900-track-alloc"
+ "004-ThreadStress",
+ "130-hprof",
+ "579-inline-infinite",
+ "1946-list-descriptors"
],
- "description": ["Tests that require exact knowledge of the number of plugins and agents."],
+ "description": ["Too slow to finish in the timeout"],
"variant": "jvmti-stress | redefine-stress | trace-stress | field-stress | step-stress"
},
{
"tests": [
+ "911-get-stack-trace"
+ ],
+ "description": ["Tests that fail when run with step-stress for unknown reasons."],
+ "bug": "b/120995005",
+ "variant": "jvmti-stress | step-stress"
+ },
+ {
+ "tests": [
+ "004-SignalTest",
+ "004-StackWalk",
+ "064-field-access",
+ "083-compiler-regressions",
+ "098-ddmc",
+ "107-int-math2",
+ "129-ThreadGetId",
+ "135-MirandaDispatch",
"132-daemon-locks-shutdown",
+ "163-app-image-methods",
+ "607-daemon-stress",
+ "674-hiddenapi",
+ "687-deopt",
+ "904-object-allocation"
+ ],
+ "description": ["Tests that sometimes fail when run with jvmti-stress for unknown reasons."],
+ "bug": "b/120995005",
+ "variant": "jvmti-stress | trace-stress | field-stress | step-stress"
+ },
+ {
+ "tests": [
+ "018-stack-overflow",
+ "137-cfi",
+ "595-profile-saving",
+ "597-deopt-busy-loop",
+ "597-deopt-new-string",
+ "660-clinit",
+ "900-hello-plugin",
+ "909-attach-agent",
+ "924-threads",
+ "981-dedup-original-dex",
+ "1900-track-alloc"
+ ],
+ "description": ["Tests that require exact knowledge of the deoptimization state, the ",
+ "number of plugins and agents, or breaks other openjdkjvmti assumptions."],
+ "variant": "jvmti-stress | redefine-stress | trace-stress | field-stress | step-stress"
+ },
+ {
+ "tests": [
"607-daemon-stress",
"602-deoptimizeable",
"121-simple-suspend-check",
@@ -569,6 +613,12 @@
"env_vars": {"SANITIZE_HOST": "address"}
},
{
+ "tests": "175-alloc-big-bignums",
+ "description": "ASAN runs out of memory due to huge allocations.",
+ "variant": "host",
+ "env_vars": {"SANITIZE_HOST": "address"}
+ },
+ {
"tests": "202-thread-oome",
"description": "ASAN aborts when large thread stacks are requested.",
"variant": "host",
diff --git a/test/ti-stress/stress.cc b/test/ti-stress/stress.cc
index e123e9f..cd7af10 100644
--- a/test/ti-stress/stress.cc
+++ b/test/ti-stress/stress.cc
@@ -157,14 +157,12 @@
: jvmtienv_(jvmtienv),
class_(c),
name_(nullptr),
- generic_(nullptr),
file_(nullptr),
debug_ext_(nullptr) {}
~ScopedClassInfo() {
if (class_ != nullptr) {
jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_));
- jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(file_));
jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(debug_ext_));
}
@@ -173,12 +171,11 @@
bool Init() {
if (class_ == nullptr) {
name_ = const_cast<char*>("<NONE>");
- generic_ = const_cast<char*>("<NONE>");
return true;
} else {
jvmtiError ret1 = jvmtienv_->GetSourceFileName(class_, &file_);
jvmtiError ret2 = jvmtienv_->GetSourceDebugExtension(class_, &debug_ext_);
- return jvmtienv_->GetClassSignature(class_, &name_, &generic_) == JVMTI_ERROR_NONE &&
+ return jvmtienv_->GetClassSignature(class_, &name_, nullptr) == JVMTI_ERROR_NONE &&
ret1 != JVMTI_ERROR_MUST_POSSESS_CAPABILITY &&
ret1 != JVMTI_ERROR_INVALID_CLASS &&
ret2 != JVMTI_ERROR_MUST_POSSESS_CAPABILITY &&
@@ -192,9 +189,6 @@
const char* GetName() const {
return name_;
}
- const char* GetGeneric() const {
- return generic_;
- }
const char* GetSourceDebugExtension() const {
if (debug_ext_ == nullptr) {
return "<UNKNOWN_SOURCE_DEBUG_EXTENSION>";
@@ -214,7 +208,6 @@
jvmtiEnv* jvmtienv_;
jclass class_;
char* name_;
- char* generic_;
char* file_;
char* debug_ext_;
};
@@ -229,14 +222,12 @@
class_info_(nullptr),
name_(nullptr),
signature_(nullptr),
- generic_(nullptr),
first_line_(-1) {}
~ScopedMethodInfo() {
DeleteLocalRef(env_, declaring_class_);
jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_));
jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(signature_));
- jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
}
bool Init() {
@@ -257,7 +248,7 @@
return false;
}
return class_info_->Init() &&
- (jvmtienv_->GetMethodName(method_, &name_, &signature_, &generic_) == JVMTI_ERROR_NONE);
+ (jvmtienv_->GetMethodName(method_, &name_, &signature_, nullptr) == JVMTI_ERROR_NONE);
}
const ScopedClassInfo& GetDeclaringClassInfo() const {
@@ -276,10 +267,6 @@
return signature_;
}
- const char* GetGeneric() const {
- return generic_;
- }
-
jint GetFirstLine() const {
return first_line_;
}
@@ -292,7 +279,6 @@
std::unique_ptr<ScopedClassInfo> class_info_;
char* name_;
char* signature_;
- char* generic_;
jint first_line_;
friend std::ostream& operator<<(std::ostream &os, ScopedMethodInfo const& m);
@@ -306,20 +292,18 @@
field_(field),
class_info_(nullptr),
name_(nullptr),
- type_(nullptr),
- generic_(nullptr) {}
+ type_(nullptr) {}
~ScopedFieldInfo() {
jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_));
jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(type_));
- jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
}
bool Init() {
class_info_.reset(new ScopedClassInfo(jvmtienv_, declaring_class_));
return class_info_->Init() &&
(jvmtienv_->GetFieldName(
- declaring_class_, field_, &name_, &type_, &generic_) == JVMTI_ERROR_NONE);
+ declaring_class_, field_, &name_, &type_, nullptr) == JVMTI_ERROR_NONE);
}
const ScopedClassInfo& GetDeclaringClassInfo() const {
@@ -338,10 +322,6 @@
return type_;
}
- const char* GetGeneric() const {
- return generic_;
- }
-
private:
jvmtiEnv* jvmtienv_;
jclass declaring_class_;
@@ -349,7 +329,6 @@
std::unique_ptr<ScopedClassInfo> class_info_;
char* name_;
char* type_;
- char* generic_;
friend std::ostream& operator<<(std::ostream &os, ScopedFieldInfo const& m);
};
diff --git a/tools/bootjars.sh b/tools/bootjars.sh
index ad6ee6b..9f22827 100755
--- a/tools/bootjars.sh
+++ b/tools/bootjars.sh
@@ -72,8 +72,10 @@
# FIXME: The soong invocation we're using for getting the variables does not give us anything
# defined in Android.common_path.mk, otherwise we would just use HOST-/TARGET_TEST_CORE_JARS.
- # The core_jars_list must match the TEST_CORE_JARS variable in the Android.common_path.mk .
- core_jars_list="core-oj core-libart core-simple conscrypt okhttp bouncycastle"
+ # Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
+ # because that's what we use for compiling the core.art image.
+ # It may contain additional modules from TEST_CORE_JARS.
+ core_jars_list="core-oj core-libart core-simple"
core_jars_suffix=
if [[ $mode == target ]]; then
core_jars_suffix=-testdex
diff --git a/tools/dist_linunx_bionic.sh b/tools/dist_linux_bionic.sh
similarity index 100%
rename from tools/dist_linunx_bionic.sh
rename to tools/dist_linux_bionic.sh
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index f4a2dc1..c85a5ed 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -55,9 +55,10 @@
done
}
-# Note: This must match the TEST_CORE_JARS in Android.common_path.mk
+# Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
# because that's what we use for compiling the core.art image.
-BOOT_CLASSPATH_JARS="core-oj core-libart core-simple conscrypt okhttp bouncycastle"
+# It may contain additional modules from TEST_CORE_JARS.
+BOOT_CLASSPATH_JARS="core-oj core-libart core-simple okhttp bouncycastle conscrypt"
vm_args=""
art="$android_root/bin/art"
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index 63f1fce..63fe81b 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -57,9 +57,10 @@
done
}
-# Note: This must match the TEST_CORE_JARS in Android.common_path.mk
+# Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
# because that's what we use for compiling the core.art image.
-BOOT_CLASSPATH_JARS="core-oj core-libart core-simple conscrypt okhttp bouncycastle"
+# It may contain additional modules from TEST_CORE_JARS.
+BOOT_CLASSPATH_JARS="core-oj core-libart core-simple okhttp bouncycastle conscrypt"
DEPS="core-tests jsr166-tests mockito-target"