Fix go/lem breakages on baseline configs
- Don't collect profiling info for now, as compiled code reference
them directly.
- Only compile optimized after reaching baseline hotness threshold if
tiered JIT is enabled.
Test: test.py, go/lem benchmarks.
Change-Id: I0d21d5f77825a710588ef5a7c11288a5b9757907
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index ffcee4b..33355a0 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -249,6 +249,15 @@
<< ", compile_threshold=" << options->GetCompileThreshold()
<< ", profile_saver_options=" << options->GetProfileSaverOptions();
+ // We want to know whether the compiler is compiling baseline, as this
+ // affects how we GC ProfilingInfos.
+ for (const std::string& option : Runtime::Current()->GetCompilerOptions()) {
+ if (option == "--baseline") {
+ options->SetUseBaselineCompiler();
+ break;
+ }
+ }
+
// Notify native debugger about the classes already loaded before the creation of the jit.
jit->DumpTypeInfoForLoadedTypes(Runtime::Current()->GetClassLinker());
return jit.release();
@@ -1421,7 +1430,8 @@
// Note: Native method have no "warm" state or profiling info.
if (!method->IsNative() &&
(method->GetProfilingInfo(kRuntimePointerSize) == nullptr) &&
- code_cache_->CanAllocateProfilingInfo()) {
+ code_cache_->CanAllocateProfilingInfo() &&
+ !options_->UseTieredJitCompilation()) {
bool success = ProfilingInfo::Create(self, method, /* retry_allocation= */ false);
if (success) {
VLOG(jit) << "Start profiling " << method->PrettyMethod();
@@ -1452,9 +1462,10 @@
if (old_count < HotMethodThreshold() && new_count >= HotMethodThreshold()) {
if (!code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
DCHECK(thread_pool_ != nullptr);
- JitCompileTask::TaskKind kind = options_->UseTieredJitCompilation()
- ? JitCompileTask::TaskKind::kCompileBaseline
- : JitCompileTask::TaskKind::kCompile;
+ JitCompileTask::TaskKind kind =
+ (options_->UseTieredJitCompilation() || options_->UseBaselineCompiler())
+ ? JitCompileTask::TaskKind::kCompileBaseline
+ : JitCompileTask::TaskKind::kCompile;
thread_pool_->AddTask(self, new JitCompileTask(method, kind));
}
}
@@ -1474,8 +1485,13 @@
}
void Jit::EnqueueOptimizedCompilation(ArtMethod* method, Thread* self) {
- thread_pool_->AddTask(
- self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompile));
+ // We arrive here after a baseline compiled code has reached its baseline
+ // hotness threshold. If tiered compilation is enabled, enqueue a compilation
+ // task that will compile optimize the method.
+ if (options_->UseTieredJitCompilation()) {
+ thread_pool_->AddTask(
+ self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompile));
+ }
}
class ScopedSetRuntimeThread {
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 42adf6b..08a464e 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -118,6 +118,10 @@
return use_tiered_jit_compilation_;
}
+ bool CanCompileBaseline() const {
+ return use_tiered_jit_compilation_ || use_baseline_compiler_;
+ }
+
void SetUseJitCompilation(bool b) {
use_jit_compilation_ = b;
}
@@ -135,6 +139,14 @@
compile_threshold_ = 0;
}
+ void SetUseBaselineCompiler() {
+ use_baseline_compiler_ = true;
+ }
+
+ bool UseBaselineCompiler() const {
+ return use_baseline_compiler_;
+ }
+
private:
// We add the sample in batches of size kJitSamplesBatchSize.
// This method rounds the threshold so that it is multiple of the batch size.
@@ -142,6 +154,7 @@
bool use_jit_compilation_;
bool use_tiered_jit_compilation_;
+ bool use_baseline_compiler_;
size_t code_cache_initial_capacity_;
size_t code_cache_max_capacity_;
uint32_t compile_threshold_;
@@ -155,6 +168,8 @@
JitOptions()
: use_jit_compilation_(false),
+ use_tiered_jit_compilation_(false),
+ use_baseline_compiler_(false),
code_cache_initial_capacity_(0),
code_cache_max_capacity_(0),
compile_threshold_(0),
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 04e8d39..945600a 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -1119,18 +1119,24 @@
// Start polling the liveness of compiled code to prepare for the next full collection.
if (next_collection_will_be_full) {
- // Save the entry point of methods we have compiled, and update the entry
- // point of those methods to the interpreter. If the method is invoked, the
- // interpreter will update its entry point to the compiled code and call it.
- for (ProfilingInfo* info : profiling_infos_) {
- const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
- if (!IsInZygoteDataSpace(info) && ContainsPc(entry_point)) {
- info->SetSavedEntryPoint(entry_point);
- // Don't call Instrumentation::UpdateMethodsCode(), as it can check the declaring
- // class of the method. We may be concurrently running a GC which makes accessing
- // the class unsafe. We know it is OK to bypass the instrumentation as we've just
- // checked that the current entry point is JIT compiled code.
- info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
+ if (Runtime::Current()->GetJITOptions()->CanCompileBaseline()) {
+ for (ProfilingInfo* info : profiling_infos_) {
+ info->SetBaselineHotnessCount(0);
+ }
+ } else {
+ // Save the entry point of methods we have compiled, and update the entry
+ // point of those methods to the interpreter. If the method is invoked, the
+ // interpreter will update its entry point to the compiled code and call it.
+ for (ProfilingInfo* info : profiling_infos_) {
+ const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
+ if (!IsInZygoteDataSpace(info) && ContainsPc(entry_point)) {
+ info->SetSavedEntryPoint(entry_point);
+ // Don't call Instrumentation::UpdateMethodsCode(), as it can check the declaring
+ // class of the method. We may be concurrently running a GC which makes accessing
+ // the class unsafe. We know it is OK to bypass the instrumentation as we've just
+ // checked that the current entry point is JIT compiled code.
+ info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
+ }
}
}
@@ -1219,28 +1225,50 @@
ScopedTrace trace(__FUNCTION__);
{
MutexLock mu(self, *Locks::jit_lock_);
- if (collect_profiling_info) {
- // Clear the profiling info of methods that do not have compiled code as entrypoint.
- // Also remove the saved entry point from the ProfilingInfo objects.
- for (ProfilingInfo* info : profiling_infos_) {
- const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
- if (!ContainsPc(ptr) && !info->IsInUseByCompiler() && !IsInZygoteDataSpace(info)) {
- info->GetMethod()->SetProfilingInfo(nullptr);
- }
- if (info->GetSavedEntryPoint() != nullptr) {
- info->SetSavedEntryPoint(nullptr);
- // We are going to move this method back to interpreter. Clear the counter now to
- // give it a chance to be hot again.
- ClearMethodCounter(info->GetMethod(), /*was_warm=*/ true);
+ if (Runtime::Current()->GetJITOptions()->CanCompileBaseline()) {
+ // Update to interpreter the methods that have baseline entrypoints and whose baseline
+ // hotness count is zero.
+ // Note that these methods may be in thread stack or concurrently revived
+ // between. That's OK, as the thread executing it will mark it.
+ for (ProfilingInfo* info : profiling_infos_) {
+ if (info->GetBaselineHotnessCount() == 0) {
+ const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
+ if (ContainsPc(entry_point)) {
+ OatQuickMethodHeader* method_header =
+ OatQuickMethodHeader::FromEntryPoint(entry_point);
+ if (CodeInfo::IsBaseline(method_header->GetOptimizedCodeInfoPtr())) {
+ info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
+ }
+ }
}
}
- } else if (kIsDebugBuild) {
- // Sanity check that the profiling infos do not have a dangling entry point.
- for (ProfilingInfo* info : profiling_infos_) {
- DCHECK(!Runtime::Current()->IsZygote());
- const void* entry_point = info->GetSavedEntryPoint();
- DCHECK(entry_point == nullptr || IsInZygoteExecSpace(entry_point));
+ // TODO: collect profiling info
+ // TODO: collect optimized code?
+ } else {
+ if (collect_profiling_info) {
+ // Clear the profiling info of methods that do not have compiled code as entrypoint.
+ // Also remove the saved entry point from the ProfilingInfo objects.
+ for (ProfilingInfo* info : profiling_infos_) {
+ const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
+ if (!ContainsPc(ptr) && !info->IsInUseByCompiler() && !IsInZygoteDataSpace(info)) {
+ info->GetMethod()->SetProfilingInfo(nullptr);
+ }
+
+ if (info->GetSavedEntryPoint() != nullptr) {
+ info->SetSavedEntryPoint(nullptr);
+ // We are going to move this method back to interpreter. Clear the counter now to
+ // give it a chance to be hot again.
+ ClearMethodCounter(info->GetMethod(), /*was_warm=*/ true);
+ }
+ }
+ } else if (kIsDebugBuild) {
+ // Sanity check that the profiling infos do not have a dangling entry point.
+ for (ProfilingInfo* info : profiling_infos_) {
+ DCHECK(!Runtime::Current()->IsZygote());
+ const void* entry_point = info->GetSavedEntryPoint();
+ DCHECK(entry_point == nullptr || IsInZygoteExecSpace(entry_point));
+ }
}
}
@@ -1622,6 +1650,12 @@
return new_compilation;
} else {
ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
+ if (CanAllocateProfilingInfo() && baseline && info == nullptr) {
+ // We can retry allocation here as we're the JIT thread.
+ if (ProfilingInfo::Create(self, method, /* retry_allocation= */ true)) {
+ info = method->GetProfilingInfo(kRuntimePointerSize);
+ }
+ }
if (info == nullptr) {
// When prejitting, we don't allocate a profiling info.
if (!prejit && !IsSharedRegion(*region)) {
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index ada1036..14d76d2 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -130,6 +130,14 @@
return MemberOffset(OFFSETOF_MEMBER(ProfilingInfo, baseline_hotness_count_));
}
+ void SetBaselineHotnessCount(uint16_t count) {
+ baseline_hotness_count_ = count;
+ }
+
+ uint16_t GetBaselineHotnessCount() const {
+ return baseline_hotness_count_;
+ }
+
private:
ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries);