Merge "ART: add dump region info runtime option"
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index 5208d64..c73b988 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -74,7 +74,8 @@
TARGET_CORE_IMG_LOCATION := $(ART_TARGET_TEST_OUT)/core.art
# Modules to compile for core.art.
-CORE_IMG_JARS := core-oj core-libart core-simple okhttp bouncycastle apache-xml
+# TODO: Move conscrypt from CORE_IMG_JARS to TEST_CORE_JARS and adjust scripts to fix Golem.
+CORE_IMG_JARS := core-oj core-libart core-simple okhttp bouncycastle apache-xml conscrypt
HOST_CORE_IMG_JARS := $(addsuffix -hostdex,$(CORE_IMG_JARS))
TARGET_CORE_IMG_JARS := $(addsuffix -testdex,$(CORE_IMG_JARS))
HOST_CORE_IMG_DEX_LOCATIONS := $(foreach jar,$(HOST_CORE_IMG_JARS), $(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar)
@@ -87,7 +88,7 @@
TARGET_CORE_IMG_DEX_FILES := $(foreach jar,$(TARGET_CORE_IMG_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar)
# Jar files for the boot class path for testing. Must start with CORE_IMG_JARS.
-TEST_CORE_JARS := $(CORE_IMG_JARS) conscrypt
+TEST_CORE_JARS := $(CORE_IMG_JARS)
HOST_TEST_CORE_JARS := $(addsuffix -hostdex,$(TEST_CORE_JARS))
TARGET_TEST_CORE_JARS := $(addsuffix -testdex,$(TEST_CORE_JARS))
HOST_CORE_DEX_LOCATIONS := $(foreach jar,$(HOST_TEST_CORE_JARS), $(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar)
diff --git a/build/apex/ld.config.txt b/build/apex/ld.config.txt
index fddb17e..ac4d1eb 100644
--- a/build/apex/ld.config.txt
+++ b/build/apex/ld.config.txt
@@ -1,24 +1 @@
-# Copyright (C) 2018 The Android Open Source Project
-#
-# Bionic loader config file for the Runtime APEX.
-#
-# There are no versioned APEX paths here - this APEX module does not support
-# having several versions mounted.
-
-dir.runtime = /apex/com.android.runtime/bin/
-
-[runtime]
-additional.namespaces = platform
-
-# Keep in sync with runtime namespace in /system/etc/ld.config.txt.
-namespace.default.isolated = true
-namespace.default.search.paths = /apex/com.android.runtime/${LIB}
-namespace.default.links = platform
-# TODO(b/119867084): Restrict fallback to platform namespace to PALette library.
-namespace.default.link.platform.allow_all_shared_libs = true
-
-# Keep in sync with default namespace in /system/etc/ld.config.txt.
-namespace.platform.isolated = true
-namespace.platform.search.paths = /system/${LIB}
-namespace.platform.links = default
-namespace.platform.link.default.shared_libs = libc.so:libdl.so:libm.so
+# TODO: Write me.
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 1688ea7..0b17c9d 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -2927,7 +2927,7 @@
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
// Lower the invoke of CRC32.update(int crc, int b).
@@ -2945,9 +2945,13 @@
// result = crc32_for_byte(crc, b)
// crc = ~result
// It is directly lowered to three instructions.
- __ Mvn(out, crc);
- __ Crc32b(out, out, val);
- __ Mvn(out, out);
+
+ UseScratchRegisterScope temps(masm);
+ Register tmp = temps.AcquireSameSizeAs(out);
+
+ __ Mvn(tmp, crc);
+ __ Crc32b(tmp, tmp, val);
+ __ Mvn(out, tmp);
}
// The threshold for sizes of arrays to use the library provided implementation
diff --git a/libartbase/base/time_utils.cc b/libartbase/base/time_utils.cc
index 89a1109..cb30246 100644
--- a/libartbase/base/time_utils.cc
+++ b/libartbase/base/time_utils.cc
@@ -128,7 +128,7 @@
timespec now;
clock_gettime(CLOCK_MONOTONIC, &now);
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000) + now.tv_nsec / UINT64_C(1000000);
-#else // __APPLE__
+#else
timeval now;
gettimeofday(&now, nullptr);
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000) + now.tv_usec / UINT64_C(1000);
@@ -140,7 +140,7 @@
timespec now;
clock_gettime(CLOCK_MONOTONIC, &now);
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_nsec / UINT64_C(1000);
-#else // __APPLE__
+#else
timeval now;
gettimeofday(&now, nullptr);
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_usec;
@@ -152,7 +152,7 @@
timespec now;
clock_gettime(CLOCK_MONOTONIC, &now);
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_nsec;
-#else // __APPLE__
+#else
timeval now;
gettimeofday(&now, nullptr);
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_usec * UINT64_C(1000);
@@ -164,7 +164,7 @@
timespec now;
clock_gettime(CLOCK_THREAD_CPUTIME_ID, &now);
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_nsec;
-#else // __APPLE__
+#else
UNIMPLEMENTED(WARNING);
return -1;
#endif
@@ -176,8 +176,13 @@
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &now);
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_nsec;
#else
- UNIMPLEMENTED(WARNING);
- return -1;
+ // We cannot use clock_gettime() here. Return the process wall clock time
+ // (using art::NanoTime, which relies on gettimeofday()) as approximation of
+ // the process CPU time instead.
+ //
+ // Note: clock_gettime() is available from macOS 10.12 (Darwin 16), but we try
+ // to keep things simple here.
+ return NanoTime();
#endif
}
diff --git a/libdexfile/external/dex_file_ext.cc b/libdexfile/external/dex_file_ext.cc
index 3c193f4..5c353b5 100644
--- a/libdexfile/external/dex_file_ext.cc
+++ b/libdexfile/external/dex_file_ext.cc
@@ -55,7 +55,6 @@
int32_t offset; // Offset relative to the start of the dex file header.
int32_t len;
int32_t index; // Method index.
- std::string name; // Method name. Not filled in for all cache entries.
};
class MappedFileContainer : public DexFileContainer {
@@ -133,8 +132,7 @@
int32_t offset = reinterpret_cast<const uint8_t*>(code.Insns()) - dex_file_->Begin();
int32_t len = code.InsnsSizeInBytes();
int32_t index = method.GetIndex();
- auto res =
- method_cache_.emplace(offset + len, art::MethodCacheEntry{offset, len, index, ""});
+ auto res = method_cache_.emplace(offset + len, art::MethodCacheEntry{offset, len, index});
if (offset <= dex_offset && dex_offset < offset + len) {
return &res.first->second;
}
@@ -143,13 +141,6 @@
return nullptr;
}
-
- const std::string& GetMethodName(art::MethodCacheEntry& entry) {
- if (entry.name.empty()) {
- entry.name = dex_file_->PrettyMethod(entry.index, false);
- }
- return entry.name;
- }
};
int ExtDexFileOpenFromMemory(const void* addr,
@@ -312,7 +303,8 @@
if (entry != nullptr) {
method_info->offset = entry->offset;
method_info->len = entry->len;
- method_info->name = new ExtDexFileString{ext_dex_file->GetMethodName(*entry)};
+ method_info->name =
+ new ExtDexFileString{ext_dex_file->dex_file_->PrettyMethod(entry->index, false)};
return true;
}
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 4780f16..71c5b74 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -345,6 +345,11 @@
static_libs: [
"libz", // For adler32.
],
+ cflags: [
+ // ART is allowed to link to libicuuc directly
+ // since they are in the same module
+ "-DANDROID_LINK_SHARED_ICU4C",
+ ],
},
android_arm: {
ldflags: JIT_DEBUG_REGISTER_CODE_LDFLAGS,
@@ -377,12 +382,12 @@
export_generated_headers: ["cpp-define-generator-asm-support"],
include_dirs: [
"art/sigchainlib",
- "external/icu/icu4c/source/common",
"external/zlib",
],
header_libs: [
"art_cmdlineparser_headers",
"cpp-define-generator-definitions",
+ "libicuuc_headers",
"libnativehelper_header_only",
"jni_platform_headers",
],
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 2c80f93..f7f3a8d 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -95,6 +95,7 @@
weak_ref_access_enabled_(true),
copied_live_bytes_ratio_sum_(0.f),
gc_count_(0),
+ inter_region_bitmap_(nullptr),
reclaimed_bytes_ratio_sum_(0.f),
young_gen_(young_gen),
skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
@@ -288,6 +289,9 @@
void ConcurrentCopying::BindBitmaps() {
Thread* self = Thread::Current();
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ uintptr_t continuous_spaces_begin = UINTPTR_MAX;
+ uintptr_t continuous_spaces_limit = 0;
+ DCHECK(inter_region_bitmap_ == nullptr);
// Mark all of the spaces we never collect as immune.
for (const auto& space : heap_->GetContinuousSpaces()) {
if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
@@ -319,6 +323,11 @@
// be captured after the thread-flip of this GC cycle, as that is when
// the young-gen for the next GC cycle starts getting populated.
heap_->GetCardTable()->ClearCardRange(space->Begin(), space->Limit());
+
+ continuous_spaces_begin =
+ std::min(continuous_spaces_begin, reinterpret_cast<uintptr_t>(space->Begin()));
+ continuous_spaces_limit =
+ std::max(continuous_spaces_limit, reinterpret_cast<uintptr_t>(space->Limit()));
}
} else {
if (space == region_space_) {
@@ -330,10 +339,18 @@
}
}
}
- if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
- for (const auto& space : GetHeap()->GetDiscontinuousSpaces()) {
- CHECK(space->IsLargeObjectSpace());
- space->AsLargeObjectSpace()->CopyLiveToMarked();
+ if (kEnableGenerationalConcurrentCopyingCollection) {
+ if (young_gen_) {
+ for (const auto& space : GetHeap()->GetDiscontinuousSpaces()) {
+ CHECK(space->IsLargeObjectSpace());
+ space->AsLargeObjectSpace()->CopyLiveToMarked();
+ }
+ } else {
+ inter_region_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
+ "inter region ref bitmap",
+ reinterpret_cast<uint8_t*>(continuous_spaces_begin),
+ continuous_spaces_limit - continuous_spaces_begin));
+ CHECK(inter_region_bitmap_ != nullptr) << "Couldn't allocate inter region ref bitmap";
}
}
}
@@ -1100,7 +1117,7 @@
// Mark the corresponding card dirty if the object contains any
// inter-region reference.
if (visitor.ContainsInterRegionRefs()) {
- heap_->GetCardTable()->MarkCard(ref);
+ inter_region_bitmap_->Set(ref);
}
}
@@ -1316,15 +1333,6 @@
// Process mark stack
ProcessMarkStackForMarkingAndComputeLiveBytes();
- // Age the cards.
- for (space::ContinuousSpace* space : GetHeap()->GetContinuousSpaces()) {
- if (space->IsImageSpace() || space->IsZygoteSpace()) {
- // Image and zygote spaces are already handled since we gray the objects in the pause.
- continue;
- }
- card_table->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), VoidFunctor());
- }
-
if (kVerboseMode) {
LOG(INFO) << "GC end of MarkingPhase";
}
@@ -1420,10 +1428,28 @@
}
ScanDirtyObject</*kNoUnEvac*/ true>(obj);
} else if (space != region_space_ || region_space_->IsInUnevacFromSpace(obj)) {
+ // We need to process un-evac references as they may be unprocessed,
+ // if they skipped the marking phase due to heap mutation.
ScanDirtyObject</*kNoUnEvac*/ false>(obj);
+ inter_region_bitmap_->Clear(obj);
}
},
- accounting::CardTable::kCardDirty - 1);
+ accounting::CardTable::kCardAged);
+
+ if (!young_gen_) {
+ auto visitor = [this](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+ // We don't need to process un-evac references as any unprocessed
+ // ones will be taken care of in the card-table scan above.
+ ScanDirtyObject</*kNoUnEvac*/ true>(obj);
+ };
+ if (space == region_space_) {
+ region_space_->ScanUnevacFromSpace(inter_region_bitmap_.get(), visitor);
+ } else {
+ inter_region_bitmap_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
+ reinterpret_cast<uintptr_t>(space->End()),
+ visitor);
+ }
+ }
}
// Done scanning unevac space.
done_scanning_.store(true, std::memory_order_release);
@@ -3500,6 +3526,8 @@
TimingLogger::ScopedTiming split("ClearRegionSpaceCards", GetTimings());
// We do not currently use the region space cards at all, madvise them away to save ram.
heap_->GetCardTable()->ClearCardRange(region_space_->Begin(), region_space_->Limit());
+ } else if (kEnableGenerationalConcurrentCopyingCollection && !young_gen_) {
+ inter_region_bitmap_.reset();
}
{
MutexLock mu(self, skipped_blocks_lock_);
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 4442ad5..aabfc8e 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -25,7 +25,7 @@
#include "mirror/object_reference.h"
#include "offsets.h"
-#include <unordered_map>
+#include <memory>
#include <vector>
namespace art {
@@ -389,6 +389,9 @@
// possible for minor GC if all allocated objects are in non-moving
// space.)
size_t gc_count_;
+ // Bit is set if the corresponding object has inter-region references that
+ // were found during the marking phase of two-phase full-heap GC cycle.
+ std::unique_ptr<accounting::ContinuousSpaceBitmap> inter_region_bitmap_;
// reclaimed_bytes_ratio = reclaimed_bytes/num_allocated_bytes per GC cycle
float reclaimed_bytes_ratio_sum_;
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index dbec4ea..9f5c117 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -193,6 +193,40 @@
return bytes;
}
+template <typename Visitor>
+inline void RegionSpace::ScanUnevacFromSpace(accounting::ContinuousSpaceBitmap* bitmap,
+ Visitor&& visitor) {
+ const size_t iter_limit = kUseTableLookupReadBarrier
+ ? num_regions_ : std::min(num_regions_, non_free_region_index_limit_);
+ // Instead of region-wise scan, find contiguous blocks of un-evac regions and then
+ // visit them. Everything before visit_block_begin has been processed, while
+ // [visit_block_begin, visit_block_end) still needs to be visited.
+ uint8_t* visit_block_begin = nullptr;
+ uint8_t* visit_block_end = nullptr;
+ for (size_t i = 0; i < iter_limit; ++i) {
+ Region* r = ®ions_[i];
+ if (r->IsInUnevacFromSpace()) {
+ // visit_block_begin set to nullptr means a new visit block needs to be stated.
+ if (visit_block_begin == nullptr) {
+ visit_block_begin = r->Begin();
+ }
+ visit_block_end = r->End();
+ } else if (visit_block_begin != nullptr) {
+ // Visit the block range as r is not adjacent to current visit block.
+ bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(visit_block_begin),
+ reinterpret_cast<uintptr_t>(visit_block_end),
+ visitor);
+ visit_block_begin = nullptr;
+ }
+ }
+ // Visit last block, if not processed yet.
+ if (visit_block_begin != nullptr) {
+ bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(visit_block_begin),
+ reinterpret_cast<uintptr_t>(visit_block_end),
+ visitor);
+ }
+}
+
template<bool kToSpaceOnly, typename Visitor>
inline void RegionSpace::WalkInternal(Visitor&& visitor) {
// TODO: MutexLock on region_lock_ won't work due to lock order
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 0d5ebcc..75c99ec 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -209,6 +209,15 @@
template <typename Visitor>
ALWAYS_INLINE void WalkToSpace(Visitor&& visitor) REQUIRES(Locks::mutator_lock_);
+ // Scans regions and calls visitor for objects in unevac-space corresponding
+ // to the bits set in 'bitmap'.
+ // Cannot acquire region_lock_ as visitor may need to acquire it for allocation.
+ // Should not be called concurrently with functions (like SetFromSpace()) which
+ // change regions' type.
+ template <typename Visitor>
+ ALWAYS_INLINE void ScanUnevacFromSpace(accounting::ContinuousSpaceBitmap* bitmap,
+ Visitor&& visitor) NO_THREAD_SAFETY_ANALYSIS;
+
accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override {
return nullptr;
}
diff --git a/runtime/intrinsics_list.h b/runtime/intrinsics_list.h
index 82ea476..db43b24 100644
--- a/runtime/intrinsics_list.h
+++ b/runtime/intrinsics_list.h
@@ -219,7 +219,7 @@
V(VarHandleLoadLoadFence, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "loadLoadFence", "()V") \
V(VarHandleStoreStoreFence, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "storeStoreFence", "()V") \
V(ReachabilityFence, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kNoThrow, "Ljava/lang/ref/Reference;", "reachabilityFence", "(Ljava/lang/Object;)V") \
- V(CRC32Update, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/util/zip/CRC32;", "update", "(II)I") \
+ V(CRC32Update, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/util/zip/CRC32;", "update", "(II)I") \
V(CRC32UpdateBytes, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/util/zip/CRC32;", "updateBytes", "(I[BII)I") \
SIGNATURE_POLYMORPHIC_INTRINSICS_LIST(V)
diff --git a/test/918-fields/expected.txt b/test/918-fields/expected.txt
index af78615..0114ccc 100644
--- a/test/918-fields/expected.txt
+++ b/test/918-fields/expected.txt
@@ -2,9 +2,9 @@
class java.lang.Math
25
false
-[value, I, null]
-class java.lang.Integer
-18
+[bytesTransferred, I, null]
+class java.io.InterruptedIOException
+1
false
[this$0, Lart/Test918;, null]
class art.Test918$Foo
@@ -18,3 +18,7 @@
class art.Test918$Generics
0
false
+[privateValue, I, null]
+class art.Test918$Generics
+2
+false
diff --git a/test/918-fields/src/art/Test918.java b/test/918-fields/src/art/Test918.java
index ca23c03..5328b0b 100644
--- a/test/918-fields/src/art/Test918.java
+++ b/test/918-fields/src/art/Test918.java
@@ -16,6 +16,7 @@
package art;
+import java.io.InterruptedIOException;
import java.lang.reflect.Field;
import java.util.Arrays;
@@ -26,10 +27,11 @@
public static void doTest() throws Exception {
testField(Math.class, "PI");
- testField(Integer.class, "value");
+ testField(InterruptedIOException.class, "bytesTransferred");
testField(Foo.class, "this$0");
testField(Bar.class, "VAL");
testField(Generics.class, "generics");
+ testField(Generics.class, "privateValue");
}
private static void testField(Class<?> base, String fieldName)
@@ -71,5 +73,6 @@
private static class Generics<T> {
T generics;
+ private int privateValue = 42;
}
}
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 4e39d09..0674f52 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -51,6 +51,7 @@
STRIP_DEX="n"
SECONDARY_DEX=""
TIME_OUT="gdb" # "n" (disabled), "timeout" (use timeout), "gdb" (use gdb)
+TIMEOUT_DUMPER=timeout_dumper
# Value in seconds
if [ "$ART_USE_READ_BARRIER" != "false" ]; then
TIME_OUT_VALUE=2400 # 40 minutes.
@@ -691,6 +692,8 @@
echo "linux_bionic-x86 target doesn't seem to have been built!" >&2
exit 1
fi
+ # Set timeout_dumper manually so it works even with apex's
+ TIMEOUT_DUMPER=$OUT_DIR/soong/host/linux_bionic-x86/bin/timeout_dumper
fi
# Prevent test from silently falling back to interpreter in no-prebuild mode. This happens
@@ -1029,7 +1032,7 @@
# Note: We first send SIGRTMIN+2 (usually 36) to ART, which will induce a full thread dump
# before abort. However, dumping threads might deadlock, so we also use the "-k"
# option to definitely kill the child.
- cmdline="timeout -k 120s -s SIGRTMIN+2 ${TIME_OUT_VALUE}s timeout_dumper $cmdline"
+ cmdline="timeout -k 120s -s SIGRTMIN+2 ${TIME_OUT_VALUE}s ${TIMEOUT_DUMPER} $cmdline"
fi
if [ "$DEV_MODE" = "y" ]; then
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 5bcd1c0..879f2fd 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -1161,5 +1161,11 @@
"tests": ["454-get-vreg", "457-regs"],
"variant": "baseline",
"description": ["Tests are expected to fail with baseline."]
+ },
+ {
+ "tests": ["708-jit-cache-churn"],
+ "variant": "jit-on-first-use",
+ "bug": "b/120112467",
+ "description": [ "Fails on Android Build hosts with uncaught std::bad_alloc." ]
}
]