Get the baseline information from the graph.
Baseline could be set by the compiler options or the JIT.
Test: test.py
Bug: 119800099
Change-Id: I702bd7642dfd3353c9ad99cb6ac425c090e16101
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 894c7a4..d3ce2db 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4084,7 +4084,7 @@
// We know the destination of an intrinsic, so no need to record inline
// caches.
if (!instruction->GetLocations()->Intrinsified() &&
- GetCompilerOptions().IsBaseline() &&
+ GetGraph()->IsCompilingBaseline() &&
!Runtime::Current()->IsAotCompiler()) {
DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke());
ScopedObjectAccess soa(Thread::Current());
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 49a608e..4932a2c 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -3345,7 +3345,7 @@
// We know the destination of an intrinsic, so no need to record inline
// caches.
if (!instruction->GetLocations()->Intrinsified() &&
- GetCompilerOptions().IsBaseline() &&
+ GetGraph()->IsCompilingBaseline() &&
!Runtime::Current()->IsAotCompiler()) {
DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke());
ScopedObjectAccess soa(Thread::Current());
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 3077be0..c3cd25c 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -2305,7 +2305,8 @@
}
HandleInvoke(invoke);
- if (codegen_->GetCompilerOptions().IsBaseline() && !Runtime::Current()->IsAotCompiler()) {
+
+ if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
// Add one temporary for inline cache update.
invoke->GetLocations()->AddTemp(Location::RegisterLocation(EBP));
}
@@ -2333,7 +2334,7 @@
// Add the hidden argument.
invoke->GetLocations()->AddTemp(Location::FpuRegisterLocation(XMM7));
- if (codegen_->GetCompilerOptions().IsBaseline() && !Runtime::Current()->IsAotCompiler()) {
+ if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
// Add one temporary for inline cache update.
invoke->GetLocations()->AddTemp(Location::RegisterLocation(EBP));
}
@@ -2345,7 +2346,7 @@
// caches (also the intrinsic location builder doesn't request an additional
// temporary).
if (!instruction->GetLocations()->Intrinsified() &&
- GetCompilerOptions().IsBaseline() &&
+ GetGraph()->IsCompilingBaseline() &&
!Runtime::Current()->IsAotCompiler()) {
DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke());
ScopedObjectAccess soa(Thread::Current());
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index dd3a4f4..5d4cfb4 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -2560,7 +2560,7 @@
// We know the destination of an intrinsic, so no need to record inline
// caches.
if (!instruction->GetLocations()->Intrinsified() &&
- GetCompilerOptions().IsBaseline() &&
+ GetGraph()->IsCompilingBaseline() &&
!Runtime::Current()->IsAotCompiler()) {
ScopedObjectAccess soa(Thread::Current());
ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);