Clear inline caches at each full GC.
This fixes occasional failures of 141-class-unload.
Also fix a bug where clearing inline caches also cleared the dex
pc associated with it.
bug:26846185
bug:23128949
Change-Id: I77bf1dee229d7764c3cc21440829c7fba7b37001
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 6ff1e2e..eeb2576 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -168,13 +168,14 @@
compiler_driver_->SetDedupeEnabled(false);
compiler_driver_->SetSupportBootImageFixup(false);
+ size_t thread_count = compiler_driver_->GetThreadCount();
if (compiler_options_->GetGenerateDebugInfo()) {
#ifdef __ANDROID__
const char* prefix = "/data/misc/trace";
#else
const char* prefix = "/tmp";
#endif
- DCHECK_EQ(compiler_driver_->GetThreadCount(), 1u)
+ DCHECK_EQ(thread_count, 1u)
<< "Generating debug info only works with one compiler thread";
std::string perf_filename = std::string(prefix) + "/perf-" + std::to_string(getpid()) + ".map";
perf_file_.reset(OS::CreateEmptyFileWriteOnly(perf_filename.c_str()));
@@ -183,6 +184,10 @@
" Are you on a user build? Perf only works on userdebug/eng builds";
}
}
+
+ size_t inline_depth_limit = compiler_driver_->GetCompilerOptions().GetInlineDepthLimit();
+ DCHECK_LT(thread_count * inline_depth_limit, std::numeric_limits<uint16_t>::max())
+ << "ProfilingInfo's inline counter can potentially overflow";
}
JitCompiler::~JitCompiler() {
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 3e3719e..bbdac26 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -28,6 +28,8 @@
#include "driver/dex_compilation_unit.h"
#include "instruction_simplifier.h"
#include "intrinsics.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
#include "nodes.h"
@@ -220,6 +222,20 @@
return index;
}
+class ScopedProfilingInfoInlineUse {
+ public:
+ explicit ScopedProfilingInfoInlineUse(ArtMethod* method) : method_(method) {
+ Runtime::Current()->GetJit()->GetCodeCache()->NotifyInliningOf(method_, Thread::Current());
+ }
+
+ ~ScopedProfilingInfoInlineUse() {
+ Runtime::Current()->GetJit()->GetCodeCache()->DoneInlining(method_, Thread::Current());
+ }
+
+ private:
+ ArtMethod* const method_;
+};
+
bool HInliner::TryInline(HInvoke* invoke_instruction) {
if (invoke_instruction->IsInvokeUnresolved()) {
return false; // Don't bother to move further if we know the method is unresolved.
@@ -272,29 +288,32 @@
// Check if we can use an inline cache.
ArtMethod* caller = graph_->GetArtMethod();
size_t pointer_size = class_linker->GetImagePointerSize();
- // Under JIT, we should always know the caller.
- DCHECK(!Runtime::Current()->UseJit() || (caller != nullptr));
- if (caller != nullptr && caller->GetProfilingInfo(pointer_size) != nullptr) {
+ if (Runtime::Current()->UseJit()) {
+ // Under JIT, we should always know the caller.
+ DCHECK(caller != nullptr);
+ ScopedProfilingInfoInlineUse spiis(caller);
ProfilingInfo* profiling_info = caller->GetProfilingInfo(pointer_size);
- const InlineCache& ic = *profiling_info->GetInlineCache(invoke_instruction->GetDexPc());
- if (ic.IsUnitialized()) {
- VLOG(compiler) << "Interface or virtual call to "
- << PrettyMethod(method_index, caller_dex_file)
- << " is not hit and not inlined";
- return false;
- } else if (ic.IsMonomorphic()) {
- MaybeRecordStat(kMonomorphicCall);
- return TryInlineMonomorphicCall(invoke_instruction, resolved_method, ic);
- } else if (ic.IsPolymorphic()) {
- MaybeRecordStat(kPolymorphicCall);
- return TryInlinePolymorphicCall(invoke_instruction, resolved_method, ic);
- } else {
- DCHECK(ic.IsMegamorphic());
- VLOG(compiler) << "Interface or virtual call to "
- << PrettyMethod(method_index, caller_dex_file)
- << " is megamorphic and not inlined";
- MaybeRecordStat(kMegamorphicCall);
- return false;
+ if (profiling_info != nullptr) {
+ const InlineCache& ic = *profiling_info->GetInlineCache(invoke_instruction->GetDexPc());
+ if (ic.IsUnitialized()) {
+ VLOG(compiler) << "Interface or virtual call to "
+ << PrettyMethod(method_index, caller_dex_file)
+ << " is not hit and not inlined";
+ return false;
+ } else if (ic.IsMonomorphic()) {
+ MaybeRecordStat(kMonomorphicCall);
+ return TryInlineMonomorphicCall(invoke_instruction, resolved_method, ic);
+ } else if (ic.IsPolymorphic()) {
+ MaybeRecordStat(kPolymorphicCall);
+ return TryInlinePolymorphicCall(invoke_instruction, resolved_method, ic);
+ } else {
+ DCHECK(ic.IsMegamorphic());
+ VLOG(compiler) << "Interface or virtual call to "
+ << PrettyMethod(method_index, caller_dex_file)
+ << " is megamorphic and not inlined";
+ MaybeRecordStat(kMegamorphicCall);
+ return false;
+ }
}
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 3480483..01dff19 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -59,6 +59,8 @@
#include "heap-inl.h"
#include "image.h"
#include "intern_table.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
@@ -2669,6 +2671,12 @@
// permanantly disabled. b/17942071
concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
}
+
+ if ((gc_type == collector::kGcTypeFull) && runtime->UseJit()) {
+ // It's time to clear all inline caches, in case some classes can be unloaded.
+ runtime->GetJit()->GetCodeCache()->ClearGcRootsInInlineCaches(self);
+ }
+
CHECK(collector != nullptr)
<< "Could not find garbage collector with collector_type="
<< static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 1545cb7..4f87e5b 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -293,6 +293,15 @@
}
}
+void JitCodeCache::ClearGcRootsInInlineCaches(Thread* self) {
+ MutexLock mu(self, lock_);
+ for (ProfilingInfo* info : profiling_infos_) {
+ if (!info->IsInUseByCompiler()) {
+ info->ClearGcRootsInInlineCaches();
+ }
+ }
+}
+
uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
ArtMethod* method,
const uint8_t* mapping_table,
@@ -675,7 +684,7 @@
// Also remove the saved entry point from the ProfilingInfo objects.
for (ProfilingInfo* info : profiling_infos_) {
const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
- if (!ContainsPc(ptr) && !info->IsMethodBeingCompiled()) {
+ if (!ContainsPc(ptr) && !info->IsInUseByCompiler()) {
info->GetMethod()->SetProfilingInfo(nullptr);
}
info->SetSavedEntryPoint(nullptr);
@@ -727,7 +736,7 @@
// code cache collection.
if (ContainsPc(ptr) && info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr) {
// We clear the inline caches as classes in it might be stalled.
- info->ClearInlineCaches();
+ info->ClearGcRootsInInlineCaches();
// Do a fence to make sure the clearing is seen before attaching to the method.
QuasiAtomic::ThreadFenceRelease();
info->GetMethod()->SetProfilingInfo(info);
@@ -915,6 +924,22 @@
return true;
}
+void JitCodeCache::NotifyInliningOf(ArtMethod* method, Thread* self) {
+ MutexLock mu(self, lock_);
+ ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
+ if (info != nullptr) {
+ info->IncrementInlineUse();
+ }
+}
+
+void JitCodeCache::DoneInlining(ArtMethod* method, Thread* self) {
+ MutexLock mu(self, lock_);
+ ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
+ if (info != nullptr) {
+ info->DecrementInlineUse();
+ }
+}
+
void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self ATTRIBUTE_UNUSED) {
ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
DCHECK(info->IsMethodBeingCompiled());
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 0bd4f7d..113bebf 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -71,10 +71,18 @@
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!lock_);
+ void NotifyInliningOf(ArtMethod* method, Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!lock_);
+
void DoneCompiling(ArtMethod* method, Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!lock_);
+ void DoneInlining(ArtMethod* method, Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!lock_);
+
// Allocate and write code and its metadata to the code cache.
uint8_t* CommitCode(Thread* self,
ArtMethod* method,
@@ -143,6 +151,8 @@
REQUIRES(Locks::classlinker_classes_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
+ void ClearGcRootsInInlineCaches(Thread* self) REQUIRES(!lock_);
+
// Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
// will collect and retry if the first allocation is unsuccessful.
ProfilingInfo* AddProfilingInfo(Thread* self,
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
index 3820592..07c8051 100644
--- a/runtime/jit/profiling_info.cc
+++ b/runtime/jit/profiling_info.cc
@@ -97,8 +97,8 @@
}
}
}
- // Unsuccessfull - cache is full, making it megamorphic.
- DCHECK(cache->IsMegamorphic());
+ // Unsuccessfull - cache is full, making it megamorphic. We do not DCHECK it though,
+ // as the garbage collector might clear the entries concurrently.
}
} // namespace art
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index a8c056c..73c1a1e 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -134,8 +134,27 @@
return saved_entry_point_;
}
- void ClearInlineCaches() {
- memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
+ void ClearGcRootsInInlineCaches() {
+ for (size_t i = 0; i < number_of_inline_caches_; ++i) {
+ InlineCache* cache = &cache_[i];
+ memset(&cache->classes_[0],
+ 0,
+ InlineCache::kIndividualCacheSize * sizeof(GcRoot<mirror::Class>));
+ }
+ }
+
+ void IncrementInlineUse() {
+ DCHECK_NE(current_inline_uses_, std::numeric_limits<uint16_t>::max());
+ current_inline_uses_++;
+ }
+
+ void DecrementInlineUse() {
+ DCHECK_GT(current_inline_uses_, 0);
+ current_inline_uses_--;
+ }
+
+ bool IsInUseByCompiler() const {
+ return IsMethodBeingCompiled() || (current_inline_uses_ > 0);
}
private:
@@ -143,8 +162,9 @@
: number_of_inline_caches_(entries.size()),
method_(method),
is_method_being_compiled_(false),
+ current_inline_uses_(0),
saved_entry_point_(nullptr) {
- ClearInlineCaches();
+ memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
for (size_t i = 0; i < number_of_inline_caches_; ++i) {
cache_[i].dex_pc_ = entries[i];
}
@@ -161,6 +181,10 @@
// TODO: Make the JIT code cache lock global.
bool is_method_being_compiled_;
+ // When the compiler inlines the method associated to this ProfilingInfo,
+ // it updates this counter so that the GC does not try to clear the inline caches.
+ uint16_t current_inline_uses_;
+
// Entry point of the corresponding ArtMethod, while the JIT code cache
// is poking for the liveness of compiled code.
const void* saved_entry_point_;
diff --git a/test/141-class-unload/src/Main.java b/test/141-class-unload/src/Main.java
index bcb697a..15683b0 100644
--- a/test/141-class-unload/src/Main.java
+++ b/test/141-class-unload/src/Main.java
@@ -181,6 +181,7 @@
Class intHolder = loader.loadClass("IntHolder");
Method loadLibrary = intHolder.getDeclaredMethod("loadLibrary", String.class);
loadLibrary.invoke(intHolder, nativeLibraryName);
+ waitForCompilation(intHolder);
return new WeakReference(loader);
}