Merge "MethodHandles: Implement MethodHandle.asType."
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index 13a3235..a5bb117 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -182,7 +182,7 @@
struct CmdlineType<Memory<Divisor>> : CmdlineTypeParser<Memory<Divisor>> {
using typename CmdlineTypeParser<Memory<Divisor>>::Result;
- Result Parse(const std::string arg) {
+ Result Parse(const std::string& arg) {
CMDLINE_DEBUG_LOG << "Parsing memory: " << arg << std::endl;
size_t val = ParseMemoryOption(arg.c_str(), Divisor);
CMDLINE_DEBUG_LOG << "Memory parsed to size_t value: " << val << std::endl;
diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h
index 34fd88b..db0fdaa 100644
--- a/compiler/dex/quick_compiler_callbacks.h
+++ b/compiler/dex/quick_compiler_callbacks.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_DEX_QUICK_COMPILER_CALLBACKS_H_
#include "compiler_callbacks.h"
+#include "verifier/verifier_deps.h"
namespace art {
@@ -46,16 +47,16 @@
}
verifier::VerifierDeps* GetVerifierDeps() const OVERRIDE {
- return verifier_deps_;
+ return verifier_deps_.get();
}
- void SetVerifierDeps(verifier::VerifierDeps* deps) {
- verifier_deps_ = deps;
+ void SetVerifierDeps(verifier::VerifierDeps* deps) OVERRIDE {
+ verifier_deps_.reset(deps);
}
private:
VerificationResults* const verification_results_;
- verifier::VerifierDeps* verifier_deps_;
+ std::unique_ptr<verifier::VerifierDeps> verifier_deps_;
};
} // namespace art
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 1b87725..223be88 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -39,6 +39,7 @@
#include "compiled_class.h"
#include "compiled_method.h"
#include "compiler.h"
+#include "compiler_callbacks.h"
#include "compiler_driver-inl.h"
#include "dex_compilation_unit.h"
#include "dex_file-inl.h"
@@ -393,6 +394,7 @@
void CompilerDriver::CompileAll(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
+ verifier::VerifierDeps* verifier_deps,
TimingLogger* timings) {
DCHECK(!Runtime::Current()->IsStarted());
@@ -404,7 +406,7 @@
// 2) Resolve all classes
// 3) Attempt to verify all classes
// 4) Attempt to initialize image classes, and trivially initialized classes
- PreCompile(class_loader, dex_files, timings);
+ PreCompile(class_loader, dex_files, verifier_deps, timings);
if (GetCompilerOptions().IsBootImage()) {
// We don't need to setup the intrinsics for non boot image compilation, as
// those compilations will pick up a boot image that have the ArtMethod already
@@ -676,7 +678,7 @@
InitializeThreadPools();
- PreCompile(jclass_loader, dex_files, timings);
+ PreCompile(jclass_loader, dex_files, /* verifier_deps */ nullptr, timings);
// Can we run DEX-to-DEX compiler on this class ?
optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level =
@@ -873,6 +875,7 @@
void CompilerDriver::PreCompile(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
+ verifier::VerifierDeps* verifier_deps,
TimingLogger* timings) {
CheckThreadPools();
@@ -906,7 +909,7 @@
VLOG(compiler) << "Resolve const-strings: " << GetMemoryUsageString(false);
}
- Verify(class_loader, dex_files, timings);
+ Verify(class_loader, dex_files, verifier_deps, timings);
VLOG(compiler) << "Verify: " << GetMemoryUsageString(false);
if (had_hard_verifier_failure_ && GetCompilerOptions().AbortOnHardVerifierFailure()) {
@@ -1522,7 +1525,7 @@
if (!use_dex_cache) {
bool method_in_image = false;
- const std::vector<gc::space::ImageSpace*> image_spaces = heap->GetBootImageSpaces();
+ const std::vector<gc::space::ImageSpace*>& image_spaces = heap->GetBootImageSpaces();
for (gc::space::ImageSpace* image_space : image_spaces) {
const auto& method_section = image_space->GetImageHeader().GetMethodsSection();
if (method_section.Contains(reinterpret_cast<uint8_t*>(method) - image_space->Begin())) {
@@ -1932,15 +1935,61 @@
}
}
-void CompilerDriver::Verify(jobject class_loader,
+void CompilerDriver::Verify(jobject jclass_loader,
const std::vector<const DexFile*>& dex_files,
+ verifier::VerifierDeps* verifier_deps,
TimingLogger* timings) {
+ if (verifier_deps != nullptr) {
+ TimingLogger::ScopedTiming t("Fast Verify", timings);
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader>(jclass_loader)));
+ MutableHandle<mirror::Class> cls(hs.NewHandle<mirror::Class>(nullptr));
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ if (verifier_deps->ValidateDependencies(class_loader, soa.Self())) {
+ // We successfully validated the dependencies, now update class status
+ // of verified classes. Note that the dependencies also record which classes
+ // could not be fully verified; we could try again, but that would hurt verification
+ // time. So instead we assume these classes still need to be verified at
+ // runtime.
+ for (const DexFile* dex_file : dex_files) {
+ // Fetch the list of unverified classes and turn it into a set for faster
+ // lookups.
+ const std::vector<uint16_t>& unverified_classes =
+ verifier_deps->GetUnverifiedClasses(*dex_file);
+ std::set<uint16_t> set(unverified_classes.begin(), unverified_classes.end());
+ for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
+ const char* descriptor = dex_file->GetClassDescriptor(class_def);
+ cls.Assign(class_linker->FindClass(soa.Self(), descriptor, class_loader));
+ if (cls.Get() == nullptr) {
+ CHECK(soa.Self()->IsExceptionPending());
+ soa.Self()->ClearException();
+ } else if (set.find(class_def.class_idx_) == set.end()) {
+ ObjectLock<mirror::Class> lock(soa.Self(), cls);
+ mirror::Class::SetStatus(cls, mirror::Class::kStatusVerified, soa.Self());
+ }
+ }
+ }
+ return;
+ }
+ }
+
+ // If there is no passed `verifier_deps` (because of non-existing vdex), or
+ // the passed `verifier_deps` is not valid anymore, create a new one for
+ // non boot image compilation. The verifier will need it to record the new dependencies.
+ // Then dex2oat can update the vdex file with these new dependencies.
+ if (!GetCompilerOptions().IsBootImage()) {
+ Runtime::Current()->GetCompilerCallbacks()->SetVerifierDeps(
+ new verifier::VerifierDeps(dex_files));
+ }
// Note: verification should not be pulling in classes anymore when compiling the boot image,
// as all should have been resolved before. As such, doing this in parallel should still
// be deterministic.
for (const DexFile* dex_file : dex_files) {
CHECK(dex_file != nullptr);
- VerifyDexFile(class_loader,
+ VerifyDexFile(jclass_loader,
*dex_file,
dex_files,
parallel_thread_pool_.get(),
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 4a48f9c..c8d6cb0 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -50,6 +50,7 @@
namespace verifier {
class MethodVerifier;
+class VerifierDeps;
class VerifierDepsTest;
} // namespace verifier
@@ -117,6 +118,7 @@
void CompileAll(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
+ verifier::VerifierDeps* verifier_deps,
TimingLogger* timings)
REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_, !dex_to_dex_references_lock_);
@@ -415,6 +417,7 @@
private:
void PreCompile(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
+ verifier::VerifierDeps* verifier_deps,
TimingLogger* timings)
REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_);
@@ -437,7 +440,9 @@
void Verify(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
+ verifier::VerifierDeps* verifier_deps,
TimingLogger* timings);
+
void VerifyDexFile(jobject class_loader,
const DexFile& dex_file,
const std::vector<const DexFile*>& dex_files,
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 845028d..9679a79 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -43,6 +43,7 @@
TimingLogger::ScopedTiming t(__FUNCTION__, &timings);
compiler_driver_->CompileAll(class_loader,
GetDexFiles(class_loader),
+ /* verifier_deps */ nullptr,
&timings);
t.NewTiming("MakeAllExecutable");
MakeAllExecutable(class_loader);
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 8fdf6fc..fcb8979 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -189,7 +189,7 @@
TimingLogger timings("ImageTest::WriteRead", false, false);
TimingLogger::ScopedTiming t("CompileAll", &timings);
driver->SetDexFilesForOatFile(class_path);
- driver->CompileAll(class_loader, class_path, &timings);
+ driver->CompileAll(class_loader, class_path, /* verifier_deps */ nullptr, &timings);
t.NewTiming("WriteElf");
SafeMap<std::string, std::string> key_value_store;
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index a37bf4b..51ef440 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -2070,13 +2070,8 @@
void ImageWriter::FixupObject(Object* orig, Object* copy) {
DCHECK(orig != nullptr);
DCHECK(copy != nullptr);
- if (kUseBakerOrBrooksReadBarrier) {
- orig->AssertReadBarrierPointer();
- if (kUseBrooksReadBarrier) {
- // Note the address 'copy' isn't the same as the image address of 'orig'.
- copy->SetReadBarrierPointer(GetImageAddress(orig));
- DCHECK_EQ(copy->GetReadBarrierPointer(), GetImageAddress(orig));
- }
+ if (kUseBakerReadBarrier) {
+ orig->AssertReadBarrierState();
}
auto* klass = orig->GetClass();
if (klass->IsIntArrayClass() || klass->IsLongArrayClass()) {
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 64ee574..102637f 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -377,7 +377,8 @@
if (kCompile) {
TimingLogger timings2("OatTest::WriteRead", false, false);
compiler_driver_->SetDexFilesForOatFile(class_linker->GetBootClassPath());
- compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings2);
+ compiler_driver_->CompileAll(
+ class_loader, class_linker->GetBootClassPath(), /* verifier_deps */ nullptr, &timings2);
}
ScratchFile tmp_oat, tmp_vdex(tmp_oat, ".vdex");
@@ -391,7 +392,8 @@
ASSERT_TRUE(success);
if (kCompile) { // OatWriter strips the code, regenerate to compare
- compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings);
+ compiler_driver_->CompileAll(
+ class_loader, class_linker->GetBootClassPath(), /* verifier_deps */ nullptr, &timings);
}
std::unique_ptr<OatFile> oat_file(OatFile::Open(tmp_oat.GetFilename(),
tmp_oat.GetFilename(),
@@ -515,7 +517,7 @@
soa.Decode<mirror::ClassLoader>(class_loader).Ptr());
}
compiler_driver_->SetDexFilesForOatFile(dex_files);
- compiler_driver_->CompileAll(class_loader, dex_files, &timings);
+ compiler_driver_->CompileAll(class_loader, dex_files, /* verifier_deps */ nullptr, &timings);
ScratchFile tmp_oat, tmp_vdex(tmp_oat, ".vdex");
SafeMap<std::string, std::string> key_value_store;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index be65f89..57823c9 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -6843,7 +6843,7 @@
// uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
// lfence; // Load fence or artificial data dependency to prevent load-load reordering
// HeapReference<Object> ref = *src; // Original reference load.
- // bool is_gray = (rb_state == ReadBarrier::gray_ptr_);
+ // bool is_gray = (rb_state == ReadBarrier::GrayState());
// if (is_gray) {
// ref = ReadBarrier::Mark(ref); // Performed by runtime entrypoint slow path.
// }
@@ -6919,14 +6919,13 @@
}
AddSlowPath(slow_path);
- // if (rb_state == ReadBarrier::gray_ptr_)
+ // if (rb_state == ReadBarrier::GrayState())
// ref = ReadBarrier::Mark(ref);
// Given the numeric representation, it's enough to check the low bit of the
// rb_state. We do that by shifting the bit out of the lock word with LSRS
// which can be a 16-bit instruction unlike the TST immediate.
- static_assert(ReadBarrier::white_ptr_ == 0, "Expecting white to have value 0");
- static_assert(ReadBarrier::gray_ptr_ == 1, "Expecting gray to have value 1");
- static_assert(ReadBarrier::black_ptr_ == 2, "Expecting black to have value 2");
+ static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+ static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
__ Lsrs(temp_reg, temp_reg, LockWord::kReadBarrierStateShift + 1);
__ b(slow_path->GetEntryLabel(), CS); // Carry flag is the last bit shifted out by LSRS.
__ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index b537509..b411a43 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -5426,7 +5426,7 @@
// uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
// lfence; // Load fence or artificial data dependency to prevent load-load reordering
// HeapReference<Object> ref = *src; // Original reference load.
- // bool is_gray = (rb_state == ReadBarrier::gray_ptr_);
+ // bool is_gray = (rb_state == ReadBarrier::GrayState());
// if (is_gray) {
// ref = ReadBarrier::Mark(ref); // Performed by runtime entrypoint slow path.
// }
@@ -5517,12 +5517,11 @@
}
AddSlowPath(slow_path);
- // if (rb_state == ReadBarrier::gray_ptr_)
+ // if (rb_state == ReadBarrier::GrayState())
// ref = ReadBarrier::Mark(ref);
// Given the numeric representation, it's enough to check the low bit of the rb_state.
- static_assert(ReadBarrier::white_ptr_ == 0, "Expecting white to have value 0");
- static_assert(ReadBarrier::gray_ptr_ == 1, "Expecting gray to have value 1");
- static_assert(ReadBarrier::black_ptr_ == 2, "Expecting black to have value 2");
+ static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+ static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
__ Tbnz(temp, LockWord::kReadBarrierStateShift, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 891cf77..e69528e 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -5062,6 +5062,39 @@
}
}
+void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(
+ HInstruction* instruction ATTRIBUTE_UNUSED,
+ Location ref ATTRIBUTE_UNUSED,
+ vixl::aarch32::Register obj ATTRIBUTE_UNUSED,
+ uint32_t offset ATTRIBUTE_UNUSED,
+ Location temp ATTRIBUTE_UNUSED,
+ bool needs_null_check ATTRIBUTE_UNUSED) {
+ TODO_VIXL32(FATAL);
+}
+
+void CodeGeneratorARMVIXL::GenerateReferenceLoadWithBakerReadBarrier(
+ HInstruction* instruction ATTRIBUTE_UNUSED,
+ Location ref ATTRIBUTE_UNUSED,
+ vixl::aarch32::Register obj ATTRIBUTE_UNUSED,
+ uint32_t offset ATTRIBUTE_UNUSED,
+ Location index ATTRIBUTE_UNUSED,
+ ScaleFactor scale_factor ATTRIBUTE_UNUSED,
+ Location temp ATTRIBUTE_UNUSED,
+ bool needs_null_check ATTRIBUTE_UNUSED,
+ bool always_update_field ATTRIBUTE_UNUSED,
+ vixl::aarch32::Register* temp2 ATTRIBUTE_UNUSED) {
+ TODO_VIXL32(FATAL);
+}
+
+void CodeGeneratorARMVIXL::GenerateReadBarrierSlow(HInstruction* instruction ATTRIBUTE_UNUSED,
+ Location out ATTRIBUTE_UNUSED,
+ Location ref ATTRIBUTE_UNUSED,
+ Location obj ATTRIBUTE_UNUSED,
+ uint32_t offset ATTRIBUTE_UNUSED,
+ Location index ATTRIBUTE_UNUSED) {
+ TODO_VIXL32(FATAL);
+}
+
void CodeGeneratorARMVIXL::MaybeGenerateReadBarrierSlow(HInstruction* instruction ATTRIBUTE_UNUSED,
Location out,
Location ref ATTRIBUTE_UNUSED,
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index efd33c7..2f946e4 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -7093,7 +7093,7 @@
// uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
// lfence; // Load fence or artificial data dependency to prevent load-load reordering
// HeapReference<Object> ref = *src; // Original reference load.
- // bool is_gray = (rb_state == ReadBarrier::gray_ptr_);
+ // bool is_gray = (rb_state == ReadBarrier::GrayState());
// if (is_gray) {
// ref = ReadBarrier::Mark(ref); // Performed by runtime entrypoint slow path.
// }
@@ -7111,14 +7111,13 @@
uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
// Given the numeric representation, it's enough to check the low bit of the rb_state.
- static_assert(ReadBarrier::white_ptr_ == 0, "Expecting white to have value 0");
- static_assert(ReadBarrier::gray_ptr_ == 1, "Expecting gray to have value 1");
- static_assert(ReadBarrier::black_ptr_ == 2, "Expecting black to have value 2");
+ static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+ static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
constexpr uint32_t gray_byte_position = LockWord::kReadBarrierStateShift / kBitsPerByte;
constexpr uint32_t gray_bit_position = LockWord::kReadBarrierStateShift % kBitsPerByte;
constexpr int32_t test_value = static_cast<int8_t>(1 << gray_bit_position);
- // if (rb_state == ReadBarrier::gray_ptr_)
+ // if (rb_state == ReadBarrier::GrayState())
// ref = ReadBarrier::Mark(ref);
// At this point, just do the "if" and make sure that flags are preserved until the branch.
__ testb(Address(obj, monitor_offset + gray_byte_position), Immediate(test_value));
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index fcabeea..49f33d2 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -6535,7 +6535,7 @@
// uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
// lfence; // Load fence or artificial data dependency to prevent load-load reordering
// HeapReference<Object> ref = *src; // Original reference load.
- // bool is_gray = (rb_state == ReadBarrier::gray_ptr_);
+ // bool is_gray = (rb_state == ReadBarrier::GrayState());
// if (is_gray) {
// ref = ReadBarrier::Mark(ref); // Performed by runtime entrypoint slow path.
// }
@@ -6553,14 +6553,13 @@
uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
// Given the numeric representation, it's enough to check the low bit of the rb_state.
- static_assert(ReadBarrier::white_ptr_ == 0, "Expecting white to have value 0");
- static_assert(ReadBarrier::gray_ptr_ == 1, "Expecting gray to have value 1");
- static_assert(ReadBarrier::black_ptr_ == 2, "Expecting black to have value 2");
+ static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+ static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
constexpr uint32_t gray_byte_position = LockWord::kReadBarrierStateShift / kBitsPerByte;
constexpr uint32_t gray_bit_position = LockWord::kReadBarrierStateShift % kBitsPerByte;
constexpr int32_t test_value = static_cast<int8_t>(1 << gray_bit_position);
- // if (rb_state == ReadBarrier::gray_ptr_)
+ // if (rb_state == ReadBarrier::GrayState())
// ref = ReadBarrier::Mark(ref);
// At this point, just do the "if" and make sure that flags are preserved until the branch.
__ testb(Address(obj, monitor_offset + gray_byte_position), Immediate(test_value));
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 8790c1e..93a2340 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -1945,7 +1945,7 @@
// if (src_ptr != end_ptr) {
// uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState();
// lfence; // Load fence or artificial data dependency to prevent load-load reordering
- // bool is_gray = (rb_state == ReadBarrier::gray_ptr_);
+ // bool is_gray = (rb_state == ReadBarrier::GrayState());
// if (is_gray) {
// // Slow-path copy.
// do {
@@ -1986,9 +1986,8 @@
// Given the numeric representation, it's enough to check the low bit of the
// rb_state. We do that by shifting the bit out of the lock word with LSRS
// which can be a 16-bit instruction unlike the TST immediate.
- static_assert(ReadBarrier::white_ptr_ == 0, "Expecting white to have value 0");
- static_assert(ReadBarrier::gray_ptr_ == 1, "Expecting gray to have value 1");
- static_assert(ReadBarrier::black_ptr_ == 2, "Expecting black to have value 2");
+ static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+ static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
__ Lsrs(temp2, temp2, LockWord::kReadBarrierStateShift + 1);
// Carry flag is the last bit shifted out by LSRS.
__ b(read_barrier_slow_path->GetEntryLabel(), CS);
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index db1c022..47e6d96 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -2659,7 +2659,7 @@
// if (src_ptr != end_ptr) {
// uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState();
// lfence; // Load fence or artificial data dependency to prevent load-load reordering
- // bool is_gray = (rb_state == ReadBarrier::gray_ptr_);
+ // bool is_gray = (rb_state == ReadBarrier::GrayState());
// if (is_gray) {
// // Slow-path copy.
// do {
@@ -2704,9 +2704,8 @@
codegen_->AddSlowPath(read_barrier_slow_path);
// Given the numeric representation, it's enough to check the low bit of the rb_state.
- static_assert(ReadBarrier::white_ptr_ == 0, "Expecting white to have value 0");
- static_assert(ReadBarrier::gray_ptr_ == 1, "Expecting gray to have value 1");
- static_assert(ReadBarrier::black_ptr_ == 2, "Expecting black to have value 2");
+ static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+ static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
__ Tbnz(tmp, LockWord::kReadBarrierStateShift, read_barrier_slow_path->GetEntryLabel());
// Fast-path copy.
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 2e37a00..6ff0ca4 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -2024,7 +2024,7 @@
// if (src_ptr != end_ptr) {
// uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState();
// lfence; // Load fence or artificial data dependency to prevent load-load reordering
- // bool is_gray = (rb_state == ReadBarrier::gray_ptr_);
+ // bool is_gray = (rb_state == ReadBarrier::GrayState());
// if (is_gray) {
// // Slow-path copy.
// do {
@@ -2065,9 +2065,8 @@
// Given the numeric representation, it's enough to check the low bit of the
// rb_state. We do that by shifting the bit out of the lock word with LSRS
// which can be a 16-bit instruction unlike the TST immediate.
- static_assert(ReadBarrier::white_ptr_ == 0, "Expecting white to have value 0");
- static_assert(ReadBarrier::gray_ptr_ == 1, "Expecting gray to have value 1");
- static_assert(ReadBarrier::black_ptr_ == 2, "Expecting black to have value 2");
+ static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+ static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
__ Lsrs(temp2, temp2, LockWord::kReadBarrierStateShift + 1);
// Carry flag is the last bit shifted out by LSRS.
__ B(cs, read_barrier_slow_path->GetEntryLabel());
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index aae3899..43682c5 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -3200,7 +3200,7 @@
// if (src_ptr != end_ptr) {
// uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState();
// lfence; // Load fence or artificial data dependency to prevent load-load reordering
- // bool is_gray = (rb_state == ReadBarrier::gray_ptr_);
+ // bool is_gray = (rb_state == ReadBarrier::GrayState());
// if (is_gray) {
// // Slow-path copy.
// for (size_t i = 0; i != length; ++i) {
@@ -3222,14 +3222,13 @@
__ j(kEqual, &done);
// Given the numeric representation, it's enough to check the low bit of the rb_state.
- static_assert(ReadBarrier::white_ptr_ == 0, "Expecting white to have value 0");
- static_assert(ReadBarrier::gray_ptr_ == 1, "Expecting gray to have value 1");
- static_assert(ReadBarrier::black_ptr_ == 2, "Expecting black to have value 2");
+ static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+ static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
constexpr uint32_t gray_byte_position = LockWord::kReadBarrierStateShift / kBitsPerByte;
constexpr uint32_t gray_bit_position = LockWord::kReadBarrierStateShift % kBitsPerByte;
constexpr int32_t test_value = static_cast<int8_t>(1 << gray_bit_position);
- // if (rb_state == ReadBarrier::gray_ptr_)
+ // if (rb_state == ReadBarrier::GrayState())
// goto slow_path;
// At this point, just do the "if" and make sure that flags are preserved until the branch.
__ testb(Address(src, monitor_offset + gray_byte_position), Immediate(test_value));
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index cdef22f..de2606c 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1399,7 +1399,7 @@
// if (src_ptr != end_ptr) {
// uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState();
// lfence; // Load fence or artificial data dependency to prevent load-load reordering
- // bool is_gray = (rb_state == ReadBarrier::gray_ptr_);
+ // bool is_gray = (rb_state == ReadBarrier::GrayState());
// if (is_gray) {
// // Slow-path copy.
// do {
@@ -1420,14 +1420,13 @@
__ j(kEqual, &done);
// Given the numeric representation, it's enough to check the low bit of the rb_state.
- static_assert(ReadBarrier::white_ptr_ == 0, "Expecting white to have value 0");
- static_assert(ReadBarrier::gray_ptr_ == 1, "Expecting gray to have value 1");
- static_assert(ReadBarrier::black_ptr_ == 2, "Expecting black to have value 2");
+ static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+ static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
constexpr uint32_t gray_byte_position = LockWord::kReadBarrierStateShift / kBitsPerByte;
constexpr uint32_t gray_bit_position = LockWord::kReadBarrierStateShift % kBitsPerByte;
constexpr int32_t test_value = static_cast<int8_t>(1 << gray_bit_position);
- // if (rb_state == ReadBarrier::gray_ptr_)
+ // if (rb_state == ReadBarrier::GrayState())
// goto slow_path;
// At this point, just do the "if" and make sure that flags are preserved until the branch.
__ testb(Address(src, monitor_offset + gray_byte_position), Immediate(test_value));
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index b91e9e6..5b2cbf7 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -663,27 +663,59 @@
if (predecessors.size() == 0) {
return;
}
+
ArenaVector<HInstruction*>& heap_values = heap_values_for_[block->GetBlockId()];
for (size_t i = 0; i < heap_values.size(); i++) {
- HInstruction* pred0_value = heap_values_for_[predecessors[0]->GetBlockId()][i];
- heap_values[i] = pred0_value;
- if (pred0_value != kUnknownHeapValue) {
- for (size_t j = 1; j < predecessors.size(); j++) {
- HInstruction* pred_value = heap_values_for_[predecessors[j]->GetBlockId()][i];
- if (pred_value != pred0_value) {
- heap_values[i] = kUnknownHeapValue;
- break;
- }
+ HInstruction* merged_value = nullptr;
+ // Whether merged_value is a result that's merged from all predecessors.
+ bool from_all_predecessors = true;
+ ReferenceInfo* ref_info = heap_location_collector_.GetHeapLocation(i)->GetReferenceInfo();
+ HInstruction* singleton_ref = nullptr;
+ if (ref_info->IsSingletonAndNotReturned()) {
+ // We do more analysis of liveness when merging heap values for such
+ // cases since stores into such references may potentially be eliminated.
+ singleton_ref = ref_info->GetReference();
+ }
+
+ for (HBasicBlock* predecessor : predecessors) {
+ HInstruction* pred_value = heap_values_for_[predecessor->GetBlockId()][i];
+ if ((singleton_ref != nullptr) &&
+ !singleton_ref->GetBlock()->Dominates(predecessor)) {
+ // singleton_ref is not live in this predecessor. Skip this predecessor since
+ // it does not really have the location.
+ DCHECK_EQ(pred_value, kUnknownHeapValue);
+ from_all_predecessors = false;
+ continue;
+ }
+ if (merged_value == nullptr) {
+ // First seen heap value.
+ merged_value = pred_value;
+ } else if (pred_value != merged_value) {
+ // There are conflicting values.
+ merged_value = kUnknownHeapValue;
+ break;
}
}
- if (heap_values[i] == kUnknownHeapValue) {
+ if (merged_value == kUnknownHeapValue) {
+ // There are conflicting heap values from different predecessors.
// Keep the last store in each predecessor since future loads cannot be eliminated.
- for (size_t j = 0; j < predecessors.size(); j++) {
- ArenaVector<HInstruction*>& pred_values = heap_values_for_[predecessors[j]->GetBlockId()];
+ for (HBasicBlock* predecessor : predecessors) {
+ ArenaVector<HInstruction*>& pred_values = heap_values_for_[predecessor->GetBlockId()];
KeepIfIsStore(pred_values[i]);
}
}
+
+ if ((merged_value == nullptr) || !from_all_predecessors) {
+ DCHECK(singleton_ref != nullptr);
+ DCHECK((singleton_ref->GetBlock() == block) ||
+ !singleton_ref->GetBlock()->Dominates(block));
+ // singleton_ref is not defined before block or defined only in some of its
+ // predecessors, so block doesn't really have the location at its entry.
+ heap_values[i] = kUnknownHeapValue;
+ } else {
+ heap_values[i] = merged_value;
+ }
}
}
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 51be1d1..55e1a2c 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -28,6 +28,17 @@
instruction->GetBlock()->RemoveInstructionOrPhi(instruction, /*ensure_safety=*/ false);
}
+// Detects a goto block and sets succ to the single successor.
+static bool IsGotoBlock(HBasicBlock* block, /*out*/ HBasicBlock** succ) {
+ if (block->GetPredecessors().size() == 1 &&
+ block->GetSuccessors().size() == 1 &&
+ block->IsSingleGoto()) {
+ *succ = block->GetSingleSuccessor();
+ return true;
+ }
+ return false;
+}
+
//
// Class methods.
//
@@ -178,31 +189,57 @@
}
void HLoopOptimization::SimplifyBlocks(LoopNode* node) {
- for (HBlocksInLoopIterator it(*node->loop_info); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
- // Remove instructions that are dead.
- for (HBackwardInstructionIterator i(block->GetInstructions()); !i.Done(); i.Advance()) {
- HInstruction* instruction = i.Current();
- if (instruction->IsDeadAndRemovable()) {
- block->RemoveInstruction(instruction);
+ // Repeat the block simplifications until no more changes occur. Note that since
+ // each simplification consists of eliminating code (without introducing new code),
+ // this process is always finite.
+ bool changed;
+ do {
+ changed = false;
+ // Iterate over all basic blocks in the loop-body.
+ for (HBlocksInLoopIterator it(*node->loop_info); !it.Done(); it.Advance()) {
+ HBasicBlock* block = it.Current();
+ // Remove dead instructions from the loop-body.
+ for (HBackwardInstructionIterator i(block->GetInstructions()); !i.Done(); i.Advance()) {
+ HInstruction* instruction = i.Current();
+ if (instruction->IsDeadAndRemovable()) {
+ changed = true;
+ block->RemoveInstruction(instruction);
+ }
}
- }
- // Remove trivial control flow blocks from the loop-body.
- if (block->GetPredecessors().size() == 1 &&
- block->GetSuccessors().size() == 1 &&
- block->GetFirstInstruction()->IsGoto()) {
- HBasicBlock* pred = block->GetSinglePredecessor();
- HBasicBlock* succ = block->GetSingleSuccessor();
- if (succ->GetPredecessors().size() == 1) {
+ // Remove trivial control flow blocks from the loop-body.
+ HBasicBlock* succ = nullptr;
+ if (IsGotoBlock(block, &succ) && succ->GetPredecessors().size() == 1) {
+ // Trivial goto block can be removed.
+ HBasicBlock* pred = block->GetSinglePredecessor();
+ changed = true;
pred->ReplaceSuccessor(block, succ);
- block->ClearDominanceInformation();
- block->SetDominator(pred); // needed by next disconnect.
+ block->RemoveDominatedBlock(succ);
block->DisconnectAndDelete();
pred->AddDominatedBlock(succ);
succ->SetDominator(pred);
+ } else if (block->GetSuccessors().size() == 2) {
+ // Trivial if block can be bypassed to either branch.
+ HBasicBlock* succ0 = block->GetSuccessors()[0];
+ HBasicBlock* succ1 = block->GetSuccessors()[1];
+ HBasicBlock* meet0 = nullptr;
+ HBasicBlock* meet1 = nullptr;
+ if (succ0 != succ1 &&
+ IsGotoBlock(succ0, &meet0) &&
+ IsGotoBlock(succ1, &meet1) &&
+ meet0 == meet1 && // meets again
+ meet0 != block && // no self-loop
+ meet0->GetPhis().IsEmpty()) { // not used for merging
+ changed = true;
+ succ0->DisconnectAndDelete();
+ if (block->Dominates(meet0)) {
+ block->RemoveDominatedBlock(meet0);
+ succ1->AddDominatedBlock(meet0);
+ meet0->SetDominator(succ1);
+ }
+ }
}
}
- }
+ } while (changed);
}
void HLoopOptimization::RemoveIfEmptyInnerLoop(LoopNode* node) {
@@ -244,8 +281,7 @@
body->DisconnectAndDelete();
exit->RemovePredecessor(header);
header->RemoveSuccessor(exit);
- header->ClearDominanceInformation();
- header->SetDominator(preheader); // needed by next disconnect.
+ header->RemoveDominatedBlock(exit);
header->DisconnectAndDelete();
preheader->AddSuccessor(exit);
preheader->AddInstruction(new (graph_->GetArena()) HGoto()); // global allocator
@@ -259,22 +295,23 @@
bool HLoopOptimization::IsPhiInduction(HPhi* phi) {
ArenaSet<HInstruction*>* set = induction_range_.LookupCycle(phi);
if (set != nullptr) {
+ DCHECK(iset_->empty());
for (HInstruction* i : *set) {
- // Check that, other than phi, instruction are removable with uses contained in the cycle.
- // TODO: investigate what cases are no longer in the graph.
- if (i != phi) {
- if (!i->IsInBlock() || !i->IsRemovable()) {
- return false;
- }
+ // Check that, other than instructions that are no longer in the graph (removed earlier)
+ // each instruction is removable and, other than the phi, uses are contained in the cycle.
+ if (!i->IsInBlock()) {
+ continue;
+ } else if (!i->IsRemovable()) {
+ return false;
+ } else if (i != phi) {
for (const HUseListNode<HInstruction*>& use : i->GetUses()) {
if (set->find(use.GetUser()) == set->end()) {
return false;
}
}
}
+ iset_->insert(i); // copy
}
- DCHECK(iset_->empty());
- iset_->insert(set->begin(), set->end()); // copy
return true;
}
return false;
diff --git a/compiler/utils/managed_register.h b/compiler/utils/managed_register.h
index 46adb3f..bb30f46 100644
--- a/compiler/utils/managed_register.h
+++ b/compiler/utils/managed_register.h
@@ -115,18 +115,18 @@
public:
// The ManagedRegister does not have information about size and offset.
// In this case it's size and offset determined by BuildFrame (assembler)
- void push_back(ManagedRegister __x) {
- ManagedRegisterSpill spill(__x);
+ void push_back(ManagedRegister x) {
+ ManagedRegisterSpill spill(x);
std::vector<ManagedRegisterSpill>::push_back(spill);
}
- void push_back(ManagedRegister __x, int32_t __size) {
- ManagedRegisterSpill spill(__x, __size);
+ void push_back(ManagedRegister x, int32_t size) {
+ ManagedRegisterSpill spill(x, size);
std::vector<ManagedRegisterSpill>::push_back(spill);
}
- void push_back(ManagedRegisterSpill __x) {
- std::vector<ManagedRegisterSpill>::push_back(__x);
+ void push_back(ManagedRegisterSpill x) {
+ std::vector<ManagedRegisterSpill>::push_back(x);
}
private:
};
diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc
index 8d2a0e7..6b690aa 100644
--- a/compiler/verifier_deps_test.cc
+++ b/compiler/verifier_deps_test.cc
@@ -72,6 +72,22 @@
return klass;
}
+ void SetupCompilerDriver() {
+ compiler_options_->boot_image_ = false;
+ compiler_driver_->InitializeThreadPools();
+ }
+
+ void VerifyWithCompilerDriver(verifier::VerifierDeps* deps) {
+ TimingLogger timings("Verify", false, false);
+ // The compiler driver handles the verifier deps in the callbacks, so
+ // remove what this class did for unit testing.
+ verifier_deps_.reset(nullptr);
+ callbacks_->SetVerifierDeps(nullptr);
+ compiler_driver_->Verify(class_loader_, dex_files_, deps, &timings);
+ // The compiler driver may have updated the VerifierDeps in the callback object.
+ verifier_deps_.reset(callbacks_->GetVerifierDeps());
+ }
+
void SetVerifierDeps(const std::vector<const DexFile*>& dex_files) {
verifier_deps_.reset(new verifier::VerifierDeps(dex_files));
VerifierDepsCompilerCallbacks* callbacks =
@@ -156,15 +172,12 @@
}
void VerifyDexFile(const char* multidex = nullptr) {
- std::string error_msg;
{
ScopedObjectAccess soa(Thread::Current());
LoadDexFile(&soa, "VerifierDeps", multidex);
}
- TimingLogger timings("Verify", false, false);
- compiler_options_->boot_image_ = false;
- compiler_driver_->InitializeThreadPools();
- compiler_driver_->Verify(class_loader_, dex_files_, &timings);
+ SetupCompilerDriver();
+ VerifyWithCompilerDriver(/* verifier_deps */ nullptr);
}
bool TestAssignabilityRecording(const std::string& dst,
@@ -185,6 +198,33 @@
return true;
}
+ // Check that the status of classes in `class_loader_` match the
+ // expected status in `deps`.
+ void VerifyClassStatus(const verifier::VerifierDeps& deps) {
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader_handle(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader_)));
+ MutableHandle<mirror::Class> cls(hs.NewHandle<mirror::Class>(nullptr));
+ for (const DexFile* dex_file : dex_files_) {
+ const std::vector<uint16_t>& unverified_classes = deps.GetUnverifiedClasses(*dex_file);
+ std::set<uint16_t> set(unverified_classes.begin(), unverified_classes.end());
+ for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
+ const char* descriptor = dex_file->GetClassDescriptor(class_def);
+ cls.Assign(class_linker_->FindClass(soa.Self(), descriptor, class_loader_handle));
+ if (cls.Get() == nullptr) {
+ CHECK(soa.Self()->IsExceptionPending());
+ soa.Self()->ClearException();
+ } else if (set.find(class_def.class_idx_) == set.end()) {
+ ASSERT_EQ(cls->GetStatus(), mirror::Class::kStatusVerified);
+ } else {
+ ASSERT_LT(cls->GetStatus(), mirror::Class::kStatusVerified);
+ }
+ }
+ }
+ }
+
bool HasUnverifiedClass(const std::string& cls) {
const DexFile::TypeId* type_id = primary_dex_file_->FindTypeId(cls.c_str());
DCHECK(type_id != nullptr);
@@ -1160,7 +1200,7 @@
MutableHandle<mirror::ClassLoader> new_class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
{
new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
- ASSERT_TRUE(verifier_deps_->Verify(new_class_loader, soa.Self()));
+ ASSERT_TRUE(verifier_deps_->ValidateDependencies(new_class_loader, soa.Self()));
}
std::vector<uint8_t> buffer;
@@ -1170,7 +1210,7 @@
{
VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
- ASSERT_TRUE(decoded_deps.Verify(new_class_loader, soa.Self()));
+ ASSERT_TRUE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
}
// Fiddle with the dependencies to make sure we catch any change and fail to verify.
@@ -1181,7 +1221,7 @@
VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
deps->assignable_types_.insert(*deps->unassignable_types_.begin());
new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
- ASSERT_FALSE(decoded_deps.Verify(new_class_loader, soa.Self()));
+ ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
}
{
@@ -1190,7 +1230,7 @@
VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
deps->unassignable_types_.insert(*deps->assignable_types_.begin());
new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
- ASSERT_FALSE(decoded_deps.Verify(new_class_loader, soa.Self()));
+ ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
}
// Mess up with classes.
@@ -1208,7 +1248,7 @@
}
ASSERT_TRUE(found);
new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
- ASSERT_FALSE(decoded_deps.Verify(new_class_loader, soa.Self()));
+ ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
}
{
@@ -1225,7 +1265,7 @@
}
ASSERT_TRUE(found);
new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
- ASSERT_FALSE(decoded_deps.Verify(new_class_loader, soa.Self()));
+ ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
}
{
@@ -1242,7 +1282,7 @@
}
ASSERT_TRUE(found);
new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
- ASSERT_FALSE(decoded_deps.Verify(new_class_loader, soa.Self()));
+ ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
}
// Mess up with fields.
@@ -1261,7 +1301,7 @@
}
ASSERT_TRUE(found);
new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
- ASSERT_FALSE(decoded_deps.Verify(new_class_loader, soa.Self()));
+ ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
}
{
@@ -1279,7 +1319,7 @@
}
ASSERT_TRUE(found);
new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
- ASSERT_FALSE(decoded_deps.Verify(new_class_loader, soa.Self()));
+ ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
}
{
@@ -1297,7 +1337,7 @@
}
ASSERT_TRUE(found);
new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
- ASSERT_FALSE(decoded_deps.Verify(new_class_loader, soa.Self()));
+ ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
}
{
@@ -1316,7 +1356,7 @@
}
ASSERT_TRUE(found);
new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
- ASSERT_FALSE(decoded_deps.Verify(new_class_loader, soa.Self()));
+ ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
}
// Mess up with methods.
@@ -1338,7 +1378,7 @@
}
ASSERT_TRUE(found);
new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
- ASSERT_FALSE(decoded_deps.Verify(new_class_loader, soa.Self()));
+ ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
}
{
@@ -1357,7 +1397,7 @@
}
ASSERT_TRUE(found);
new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
- ASSERT_FALSE(decoded_deps.Verify(new_class_loader, soa.Self()));
+ ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
}
{
@@ -1376,7 +1416,7 @@
}
ASSERT_TRUE(found);
new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
- ASSERT_FALSE(decoded_deps.Verify(new_class_loader, soa.Self()));
+ ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
}
{
@@ -1396,7 +1436,7 @@
}
ASSERT_TRUE(found);
new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
- ASSERT_FALSE(decoded_deps.Verify(new_class_loader, soa.Self()));
+ ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
}
{
@@ -1415,7 +1455,7 @@
}
ASSERT_TRUE(found);
new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
- ASSERT_FALSE(decoded_deps.Verify(new_class_loader, soa.Self()));
+ ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
}
{
@@ -1434,7 +1474,56 @@
}
ASSERT_TRUE(found);
new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
- ASSERT_FALSE(decoded_deps.Verify(new_class_loader, soa.Self()));
+ ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
+ }
+ }
+}
+
+TEST_F(VerifierDepsTest, CompilerDriver) {
+ SetupCompilerDriver();
+
+ // Test both multi-dex and single-dex configuration.
+ for (const char* multi : { "MultiDex", static_cast<const char*>(nullptr) }) {
+ // Test that the compiler driver behaves as expected when the dependencies
+ // verify and when they don't verify.
+ for (bool verify_failure : { false, true }) {
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ LoadDexFile(&soa, "VerifierDeps", multi);
+ }
+ VerifyWithCompilerDriver(/* verifier_deps */ nullptr);
+
+ std::vector<uint8_t> buffer;
+ verifier_deps_->Encode(dex_files_, &buffer);
+
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ LoadDexFile(&soa, "VerifierDeps", multi);
+ }
+ verifier::VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
+ if (verify_failure) {
+ // Just taint the decoded VerifierDeps with one invalid entry.
+ VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
+ bool found = false;
+ for (const auto& entry : deps->classes_) {
+ if (entry.IsResolved()) {
+ deps->classes_.insert(VerifierDeps::ClassResolution(
+ entry.GetDexTypeIndex(), VerifierDeps::kUnresolvedMarker));
+ found = true;
+ break;
+ }
+ }
+ ASSERT_TRUE(found);
+ }
+ VerifyWithCompilerDriver(&decoded_deps);
+
+ if (verify_failure) {
+ ASSERT_FALSE(verifier_deps_ == nullptr);
+ ASSERT_FALSE(verifier_deps_->Equals(decoded_deps));
+ } else {
+ ASSERT_TRUE(verifier_deps_ == nullptr);
+ VerifyClassStatus(decoded_deps);
+ }
}
}
}
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 28d6289..1180bde 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1497,12 +1497,6 @@
dex_files_ = MakeNonOwningPointerVector(opened_dex_files_);
- if (!IsBootImage()) {
- // Collect verification dependencies when compiling an app.
- verifier_deps_.reset(new verifier::VerifierDeps(dex_files_));
- callbacks_->SetVerifierDeps(verifier_deps_.get());
- }
-
// We had to postpone the swap decision till now, as this is the point when we actually
// know about the dex files we're going to use.
@@ -1660,7 +1654,7 @@
swap_fd_,
profile_compilation_info_.get()));
driver_->SetDexFilesForOatFile(dex_files_);
- driver_->CompileAll(class_loader_, dex_files_, timings_);
+ driver_->CompileAll(class_loader_, dex_files_, /* verifier_deps */ nullptr, timings_);
}
// Notes on the interleaving of creating the images and oat files to
@@ -1785,13 +1779,13 @@
{
TimingLogger::ScopedTiming t2("dex2oat Write VDEX", timings_);
DCHECK(IsBootImage() || oat_files_.size() == 1u);
- DCHECK_EQ(IsBootImage(), verifier_deps_ == nullptr);
+ verifier::VerifierDeps* verifier_deps = callbacks_->GetVerifierDeps();
for (size_t i = 0, size = oat_files_.size(); i != size; ++i) {
File* vdex_file = vdex_files_[i].get();
std::unique_ptr<BufferedOutputStream> vdex_out(
MakeUnique<BufferedOutputStream>(MakeUnique<FileOutputStream>(vdex_file)));
- if (!oat_writers_[i]->WriteVerifierDeps(vdex_out.get(), verifier_deps_.get())) {
+ if (!oat_writers_[i]->WriteVerifierDeps(vdex_out.get(), verifier_deps)) {
LOG(ERROR) << "Failed to write verifier dependencies into VDEX " << vdex_file->GetPath();
return false;
}
@@ -2645,9 +2639,6 @@
std::vector<std::vector<const DexFile*>> dex_files_per_oat_file_;
std::unordered_map<const DexFile*, size_t> dex_file_oat_index_map_;
- // Collector of verifier dependencies.
- std::unique_ptr<verifier::VerifierDeps> verifier_deps_;
-
// Backing storage.
std::vector<std::string> char_backing_storage_;
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index f197fc1..d1d127d98 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -516,8 +516,8 @@
// Sanity check that we are reading a real object
CHECK(obj->GetClass() != nullptr) << "Image object at address " << obj << " has null class";
- if (kUseBakerOrBrooksReadBarrier) {
- obj->AssertReadBarrierPointer();
+ if (kUseBakerReadBarrier) {
+ obj->AssertReadBarrierState();
}
// Iterate every page this object belongs to
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 986f265..7ea5bea 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -747,13 +747,8 @@
void PatchOat::VisitObject(mirror::Object* object) {
mirror::Object* copy = RelocatedCopyOf(object);
CHECK(copy != nullptr);
- if (kUseBakerOrBrooksReadBarrier) {
- object->AssertReadBarrierPointer();
- if (kUseBrooksReadBarrier) {
- mirror::Object* moved_to = RelocatedAddressOfPointer(object);
- copy->SetReadBarrierPointer(moved_to);
- DCHECK_EQ(copy->GetReadBarrierPointer(), moved_to);
- }
+ if (kUseBakerReadBarrier) {
+ object->AssertReadBarrierState();
}
PatchOat::PatchVisitor visitor(this, copy);
object->VisitReferences<kVerifyNone>(visitor, visitor);
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 2856766..e3f39cd 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -2301,8 +2301,15 @@
jz .Lslow_rb_\name
ret
.Lslow_rb_\name:
- // Save all potentially live caller-save core registers.
PUSH rax
+ movl MIRROR_OBJECT_LOCK_WORD_OFFSET(REG_VAR(reg)), %eax
+ addl LITERAL(LOCK_WORD_STATE_FORWARDING_ADDRESS_OVERFLOW), %eax
+ // Jump if overflow, the only case where it overflows should be the forwarding address one.
+ // Taken ~25% of the time.
+ jnae .Lret_overflow\name
+
+ // Save all potentially live caller-save core registers.
+ movq 0(%rsp), %rax
PUSH rcx
PUSH rdx
PUSH rsi
@@ -2367,6 +2374,12 @@
POP_REG_NE rax, RAW_VAR(reg)
.Lret_rb_\name:
ret
+.Lret_overflow\name:
+ // The overflow cleared the top bits.
+ sall LITERAL(LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT), %eax
+ movq %rax, REG_VAR(reg)
+ POP_REG_NE rax, RAW_VAR(reg)
+ ret
END_FUNCTION VAR(name)
END_MACRO
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index cab9d30..e7e5be7 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -394,8 +394,8 @@
CHECK(java_lang_Class.Get() != nullptr);
mirror::Class::SetClassClass(java_lang_Class.Get());
java_lang_Class->SetClass(java_lang_Class.Get());
- if (kUseBakerOrBrooksReadBarrier) {
- java_lang_Class->AssertReadBarrierPointer();
+ if (kUseBakerReadBarrier) {
+ java_lang_Class->AssertReadBarrierState();
}
java_lang_Class->SetClassSize(class_class_size);
java_lang_Class->SetPrimitiveType(Primitive::kPrimNot);
diff --git a/runtime/compiler_callbacks.h b/runtime/compiler_callbacks.h
index 00dedef..806653a 100644
--- a/runtime/compiler_callbacks.h
+++ b/runtime/compiler_callbacks.h
@@ -47,6 +47,7 @@
virtual bool IsRelocationPossible() = 0;
virtual verifier::VerifierDeps* GetVerifierDeps() const = 0;
+ virtual void SetVerifierDeps(verifier::VerifierDeps* deps ATTRIBUTE_UNUSED) {}
bool IsBootImage() {
return mode_ == CallbackMode::kCompileBootImage;
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 7b1fb95..be25803 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -456,22 +456,22 @@
#define DECODE_UNSIGNED_CHECKED_FROM_WITH_ERROR_VALUE(ptr, var, error_value) \
uint32_t var; \
- if (!DecodeUnsignedLeb128Checked(&ptr, begin_ + size_, &var)) { \
+ if (!DecodeUnsignedLeb128Checked(&(ptr), begin_ + size_, &(var))) { \
return error_value; \
}
-#define DECODE_UNSIGNED_CHECKED_FROM(ptr, var) \
- uint32_t var; \
- if (!DecodeUnsignedLeb128Checked(&ptr, begin_ + size_, &var)) { \
- ErrorStringPrintf("Read out of bounds"); \
- return false; \
+#define DECODE_UNSIGNED_CHECKED_FROM(ptr, var) \
+ uint32_t var; \
+ if (!DecodeUnsignedLeb128Checked(&(ptr), begin_ + size_, &(var))) { \
+ ErrorStringPrintf("Read out of bounds"); \
+ return false; \
}
-#define DECODE_SIGNED_CHECKED_FROM(ptr, var) \
- int32_t var; \
- if (!DecodeSignedLeb128Checked(&ptr, begin_ + size_, &var)) { \
- ErrorStringPrintf("Read out of bounds"); \
- return false; \
+#define DECODE_SIGNED_CHECKED_FROM(ptr, var) \
+ int32_t var; \
+ if (!DecodeSignedLeb128Checked(&(ptr), begin_ + size_, &(var))) { \
+ ErrorStringPrintf("Read out of bounds"); \
+ return false; \
}
bool DexFileVerifier::CheckAndGetHandlerOffsets(const DexFile::CodeItem* code_item,
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index dc5fd07..515fcbf 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -43,11 +43,8 @@
obj = self->AllocTlab(byte_count); \
DCHECK(obj != nullptr) << "AllocTlab can't fail"; \
obj->SetClass(klass); \
- if (kUseBakerOrBrooksReadBarrier) { \
- if (kUseBrooksReadBarrier) { \
- obj->SetReadBarrierPointer(obj); \
- } \
- obj->AssertReadBarrierPointer(); \
+ if (kUseBakerReadBarrier) { \
+ obj->AssertReadBarrierState(); \
} \
QuasiAtomic::ThreadFenceForConstructor(); \
return obj; \
@@ -69,11 +66,8 @@
obj = self->AllocTlab(byte_count); \
DCHECK(obj != nullptr) << "AllocTlab can't fail"; \
obj->SetClass(klass); \
- if (kUseBakerOrBrooksReadBarrier) { \
- if (kUseBrooksReadBarrier) { \
- obj->SetReadBarrierPointer(obj); \
- } \
- obj->AssertReadBarrierPointer(); \
+ if (kUseBakerReadBarrier) { \
+ obj->AssertReadBarrierState(); \
} \
QuasiAtomic::ThreadFenceForConstructor(); \
return obj; \
@@ -94,11 +88,8 @@
obj = self->AllocTlab(byte_count); \
DCHECK(obj != nullptr) << "AllocTlab can't fail"; \
obj->SetClass(klass); \
- if (kUseBakerOrBrooksReadBarrier) { \
- if (kUseBrooksReadBarrier) { \
- obj->SetReadBarrierPointer(obj); \
- } \
- obj->AssertReadBarrierPointer(); \
+ if (kUseBakerReadBarrier) { \
+ obj->AssertReadBarrierState(); \
} \
QuasiAtomic::ThreadFenceForConstructor(); \
return obj; \
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 76f500c..6b6a12d 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -32,7 +32,7 @@
mirror::Object* ref, accounting::ContinuousSpaceBitmap* bitmap) {
// For the Baker-style RB, in a rare case, we could incorrectly change the object from white
// to gray even though the object has already been marked through. This happens if a mutator
- // thread gets preempted before the AtomicSetReadBarrierPointer below, GC marks through the
+ // thread gets preempted before the AtomicSetReadBarrierState below, GC marks through the
// object (changes it from white to gray and back to white), and the thread runs and
// incorrectly changes it from white to gray. If this happens, the object will get added to the
// mark stack again and get changed back to white after it is processed.
@@ -50,14 +50,14 @@
// we can avoid an expensive CAS.
// For the baker case, an object is marked if either the mark bit marked or the bitmap bit is
// set.
- success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
+ success = ref->AtomicSetReadBarrierState(ReadBarrier::WhiteState(), ReadBarrier::GrayState());
} else {
success = !bitmap->AtomicTestAndSet(ref);
}
if (success) {
// Newly marked.
if (kUseBakerReadBarrier) {
- DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
+ DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState());
}
PushOntoMarkStack(ref);
}
@@ -84,8 +84,8 @@
return ref;
}
// This may or may not succeed, which is ok because the object may already be gray.
- bool success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(),
- ReadBarrier::GrayPtr());
+ bool success = ref->AtomicSetReadBarrierState(ReadBarrier::WhiteState(),
+ ReadBarrier::GrayState());
if (success) {
MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
immune_gray_stack_.push_back(ref);
@@ -125,10 +125,6 @@
return from_ref;
case space::RegionSpace::RegionType::kRegionTypeFromSpace: {
mirror::Object* to_ref = GetFwdPtr(from_ref);
- if (kUseBakerReadBarrier) {
- DCHECK_NE(to_ref, ReadBarrier::GrayPtr())
- << "from_ref=" << from_ref << " to_ref=" << to_ref;
- }
if (to_ref == nullptr) {
// It isn't marked yet. Mark it by copying it to the to-space.
to_ref = Copy(from_ref);
@@ -155,7 +151,7 @@
mirror::Object* ret;
// TODO: Delete GetMarkBit check when all of the callers properly check the bit. Remaining caller
// is array allocations.
- if (from_ref == nullptr || from_ref->GetMarkBit()) {
+ if (from_ref == nullptr) {
return from_ref;
}
// TODO: Consider removing this check when we are done investigating slow paths. b/30162165
@@ -192,9 +188,9 @@
inline bool ConcurrentCopying::IsMarkedInUnevacFromSpace(mirror::Object* from_ref) {
// Use load acquire on the read barrier pointer to ensure that we never see a white read barrier
- // pointer with an unmarked bit due to reordering.
+ // state with an unmarked bit due to reordering.
DCHECK(region_space_->IsInUnevacFromSpace(from_ref));
- if (kUseBakerReadBarrier && from_ref->GetReadBarrierPointerAcquire() == ReadBarrier::GrayPtr()) {
+ if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
return true;
}
return region_space_bitmap_->Test(from_ref);
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 6dfab8b..11d6849 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -418,7 +418,7 @@
[&visitor](mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_) {
// If an object is not gray, it should only have references to things in the immune spaces.
- if (obj->GetReadBarrierPointer() != ReadBarrier::GrayPtr()) {
+ if (obj->GetReadBarrierState() != ReadBarrier::GrayState()) {
obj->VisitReferences</*kVisitNativeRoots*/true,
kDefaultVerifyFlags,
kWithoutReadBarrier>(visitor, visitor);
@@ -463,7 +463,7 @@
if (kIsDebugBuild) {
Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
}
- obj->SetReadBarrierPointer(ReadBarrier::GrayPtr());
+ obj->SetReadBarrierState(ReadBarrier::GrayState());
}
}
@@ -549,11 +549,11 @@
ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
- if (obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
+ if (obj->GetReadBarrierState() == ReadBarrier::GrayState()) {
collector_->ScanImmuneObject(obj);
// Done scanning the object, go back to white.
- bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
- ReadBarrier::WhitePtr());
+ bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
+ ReadBarrier::WhiteState());
CHECK(success);
}
} else {
@@ -620,9 +620,9 @@
LOG(INFO) << "immune gray stack size=" << immune_gray_stack_.size();
}
for (mirror::Object* obj : immune_gray_stack_) {
- DCHECK(obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
- bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
- ReadBarrier::WhitePtr());
+ DCHECK(obj->GetReadBarrierState() == ReadBarrier::GrayState());
+ bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
+ ReadBarrier::WhiteState());
DCHECK(success);
}
immune_gray_stack_.clear();
@@ -821,11 +821,11 @@
for (mirror::Object* obj : false_gray_stack_) {
DCHECK(IsMarked(obj));
// The object could be white here if a thread got preempted after a success at the
- // AtomicSetReadBarrierPointer in Mark(), GC started marking through it (but not finished so
+ // AtomicSetReadBarrierState in Mark(), GC started marking through it (but not finished so
// still gray), and the thread ran to register it onto the false gray stack.
- if (obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
- bool success = obj->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
- ReadBarrier::WhitePtr());
+ if (obj->GetReadBarrierState() == ReadBarrier::GrayState()) {
+ bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
+ ReadBarrier::WhiteState());
DCHECK(success);
}
}
@@ -955,9 +955,9 @@
}
collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
if (kUseBakerReadBarrier) {
- CHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr())
+ CHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::WhiteState())
<< "Ref " << ref << " " << ref->PrettyTypeOf()
- << " has non-white rb_ptr ";
+ << " has non-white rb_state ";
}
}
@@ -1026,8 +1026,8 @@
VerifyNoFromSpaceRefsFieldVisitor visitor(collector);
obj->VisitReferences(visitor, visitor);
if (kUseBakerReadBarrier) {
- CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr())
- << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
+ CHECK_EQ(obj->GetReadBarrierState(), ReadBarrier::WhiteState())
+ << "obj=" << obj << " non-white rb_state " << obj->GetReadBarrierState();
}
}
@@ -1333,8 +1333,8 @@
inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
DCHECK(!region_space_->IsInFromSpace(to_ref));
if (kUseBakerReadBarrier) {
- DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
- << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
+ DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState())
+ << " " << to_ref << " " << to_ref->GetReadBarrierState()
<< " is_marked=" << IsMarked(to_ref);
}
bool add_to_live_bytes = false;
@@ -1351,8 +1351,8 @@
Scan(to_ref);
}
if (kUseBakerReadBarrier) {
- DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
- << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
+ DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState())
+ << " " << to_ref << " " << to_ref->GetReadBarrierState()
<< " is_marked=" << IsMarked(to_ref);
}
#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
@@ -1368,9 +1368,9 @@
// above IsInToSpace() evaluates to true and we change the color from gray to white here in this
// else block.
if (kUseBakerReadBarrier) {
- bool success = to_ref->AtomicSetReadBarrierPointer</*kCasRelease*/true>(
- ReadBarrier::GrayPtr(),
- ReadBarrier::WhitePtr());
+ bool success = to_ref->AtomicSetReadBarrierState</*kCasRelease*/true>(
+ ReadBarrier::GrayState(),
+ ReadBarrier::WhiteState());
DCHECK(success) << "Must succeed as we won the race.";
}
}
@@ -1458,9 +1458,9 @@
while (!mark_stack->IsEmpty()) {
mirror::Object* obj = mark_stack->PopBack();
if (kUseBakerReadBarrier) {
- mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
- LOG(INFO) << "On mark queue : " << obj << " " << obj->PrettyTypeOf() << " rb_ptr="
- << rb_ptr << " is_marked=" << IsMarked(obj);
+ uint32_t rb_state = obj->GetReadBarrierState();
+ LOG(INFO) << "On mark queue : " << obj << " " << obj->PrettyTypeOf() << " rb_state="
+ << rb_state << " is_marked=" << IsMarked(obj);
} else {
LOG(INFO) << "On mark queue : " << obj << " " << obj->PrettyTypeOf()
<< " is_marked=" << IsMarked(obj);
@@ -1707,7 +1707,7 @@
void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) {
if (kUseBakerReadBarrier) {
LOG(INFO) << "holder=" << obj << " " << obj->PrettyTypeOf()
- << " holder rb_ptr=" << obj->GetReadBarrierPointer();
+ << " holder rb_state=" << obj->GetReadBarrierState();
} else {
LOG(INFO) << "holder=" << obj << " " << obj->PrettyTypeOf();
}
@@ -1762,10 +1762,10 @@
return;
}
bool updated_all_immune_objects = updated_all_immune_objects_.LoadSequentiallyConsistent();
- CHECK(updated_all_immune_objects || ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
- << "Unmarked immune space ref. obj=" << obj << " rb_ptr="
- << (obj != nullptr ? obj->GetReadBarrierPointer() : nullptr)
- << " ref=" << ref << " ref rb_ptr=" << ref->GetReadBarrierPointer()
+ CHECK(updated_all_immune_objects || ref->GetReadBarrierState() == ReadBarrier::GrayState())
+ << "Unmarked immune space ref. obj=" << obj << " rb_state="
+ << (obj != nullptr ? obj->GetReadBarrierState() : 0U)
+ << " ref=" << ref << " ref rb_state=" << ref->GetReadBarrierState()
<< " updated_all_immune_objects=" << updated_all_immune_objects;
}
} else {
@@ -2158,7 +2158,7 @@
to_ref->SetLockWord(old_lock_word, false);
// Set the gray ptr.
if (kUseBakerReadBarrier) {
- to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr());
+ to_ref->SetReadBarrierState(ReadBarrier::GrayState());
}
LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
@@ -2176,7 +2176,7 @@
DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
}
if (kUseBakerReadBarrier) {
- DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
+ DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState());
}
DCHECK(GetFwdPtr(from_ref) == to_ref);
CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
@@ -2262,14 +2262,14 @@
if (!is_los && mark_bitmap->Test(ref)) {
// Already marked.
if (kUseBakerReadBarrier) {
- DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
- ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
+ DCHECK(ref->GetReadBarrierState() == ReadBarrier::GrayState() ||
+ ref->GetReadBarrierState() == ReadBarrier::WhiteState());
}
} else if (is_los && los_bitmap->Test(ref)) {
// Already marked in LOS.
if (kUseBakerReadBarrier) {
- DCHECK(ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
- ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
+ DCHECK(ref->GetReadBarrierState() == ReadBarrier::GrayState() ||
+ ref->GetReadBarrierState() == ReadBarrier::WhiteState());
}
} else {
// Not marked.
@@ -2282,7 +2282,7 @@
DCHECK(!los_bitmap->Test(ref));
}
if (kUseBakerReadBarrier) {
- DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
+ DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::WhiteState());
}
} else {
// For the baker-style RB, we need to handle 'false-gray' cases. See the
@@ -2298,25 +2298,25 @@
// This may or may not succeed, which is ok.
bool cas_success = false;
if (kUseBakerReadBarrier) {
- cas_success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(),
- ReadBarrier::GrayPtr());
+ cas_success = ref->AtomicSetReadBarrierState(ReadBarrier::WhiteState(),
+ ReadBarrier::GrayState());
}
if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
// Already marked.
if (kUseBakerReadBarrier && cas_success &&
- ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
+ ref->GetReadBarrierState() == ReadBarrier::GrayState()) {
PushOntoFalseGrayStack(ref);
}
} else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
// Already marked in LOS.
if (kUseBakerReadBarrier && cas_success &&
- ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
+ ref->GetReadBarrierState() == ReadBarrier::GrayState()) {
PushOntoFalseGrayStack(ref);
}
} else {
// Newly marked.
if (kUseBakerReadBarrier) {
- DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
+ DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState());
}
PushOntoMarkStack(ref);
}
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index e0bf744..ddcb6c0 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -124,9 +124,9 @@
if (obj == nullptr) {
return nullptr;
}
- if (kUseBakerOrBrooksReadBarrier) {
- // Verify all the objects have the correct forward pointer installed.
- obj->AssertReadBarrierPointer();
+ if (kUseBakerReadBarrier) {
+ // Verify all the objects have the correct forward state installed.
+ obj->AssertReadBarrierState();
}
if (!immune_spaces_.IsInImmuneRegion(obj)) {
if (objects_before_forwarding_->HasAddress(obj)) {
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 77d7274..7b73e43 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -462,9 +462,9 @@
mirror::Object* holder,
MemberOffset offset) {
DCHECK(obj != nullptr);
- if (kUseBakerOrBrooksReadBarrier) {
- // Verify all the objects have the correct pointer installed.
- obj->AssertReadBarrierPointer();
+ if (kUseBakerReadBarrier) {
+ // Verify all the objects have the correct state installed.
+ obj->AssertReadBarrierState();
}
if (immune_spaces_.IsInImmuneRegion(obj)) {
if (kCountMarkedObjects) {
@@ -503,9 +503,9 @@
inline bool MarkSweep::MarkObjectParallel(mirror::Object* obj) {
DCHECK(obj != nullptr);
- if (kUseBakerOrBrooksReadBarrier) {
- // Verify all the objects have the correct pointer installed.
- obj->AssertReadBarrierPointer();
+ if (kUseBakerReadBarrier) {
+ // Verify all the objects have the correct state installed.
+ obj->AssertReadBarrierState();
}
if (immune_spaces_.IsInImmuneRegion(obj)) {
DCHECK(IsMarked(obj) != nullptr);
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 2ff4a3f..a815b83 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -589,13 +589,9 @@
// references.
saved_bytes_ +=
CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size);
- if (kUseBakerOrBrooksReadBarrier) {
- obj->AssertReadBarrierPointer();
- if (kUseBrooksReadBarrier) {
- DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj);
- forward_address->SetReadBarrierPointer(forward_address);
- }
- forward_address->AssertReadBarrierPointer();
+ if (kUseBakerReadBarrier) {
+ obj->AssertReadBarrierState();
+ forward_address->AssertReadBarrierState();
}
DCHECK(to_space_->HasAddress(forward_address) ||
fallback_space_->HasAddress(forward_address) ||
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 05ce9c7..97129e8 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -86,11 +86,8 @@
obj = self->AllocTlab(byte_count);
DCHECK(obj != nullptr) << "AllocTlab can't fail";
obj->SetClass(klass);
- if (kUseBakerOrBrooksReadBarrier) {
- if (kUseBrooksReadBarrier) {
- obj->SetReadBarrierPointer(obj.Ptr());
- }
- obj->AssertReadBarrierPointer();
+ if (kUseBakerReadBarrier) {
+ obj->AssertReadBarrierState();
}
bytes_allocated = byte_count;
usable_size = bytes_allocated;
@@ -102,11 +99,8 @@
LIKELY(obj != nullptr)) {
DCHECK(!is_running_on_memory_tool_);
obj->SetClass(klass);
- if (kUseBakerOrBrooksReadBarrier) {
- if (kUseBrooksReadBarrier) {
- obj->SetReadBarrierPointer(obj.Ptr());
- }
- obj->AssertReadBarrierPointer();
+ if (kUseBakerReadBarrier) {
+ obj->AssertReadBarrierState();
}
usable_size = bytes_allocated;
pre_fence_visitor(obj, usable_size);
@@ -143,11 +137,8 @@
DCHECK_GT(bytes_allocated, 0u);
DCHECK_GT(usable_size, 0u);
obj->SetClass(klass);
- if (kUseBakerOrBrooksReadBarrier) {
- if (kUseBrooksReadBarrier) {
- obj->SetReadBarrierPointer(obj.Ptr());
- }
- obj->AssertReadBarrierPointer();
+ if (kUseBakerReadBarrier) {
+ obj->AssertReadBarrierState();
}
if (collector::SemiSpace::kUseRememberedSet && UNLIKELY(allocator == kAllocatorTypeNonMoving)) {
// (Note this if statement will be constant folded away for the
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 5de004b..19760af 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -2406,13 +2406,9 @@
}
// Copy the object over to its new location. Don't use alloc_size to avoid valgrind error.
memcpy(reinterpret_cast<void*>(forward_address), obj, obj_size);
- if (kUseBakerOrBrooksReadBarrier) {
- obj->AssertReadBarrierPointer();
- if (kUseBrooksReadBarrier) {
- DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj);
- forward_address->SetReadBarrierPointer(forward_address);
- }
- forward_address->AssertReadBarrierPointer();
+ if (kUseBakerReadBarrier) {
+ obj->AssertReadBarrierState();
+ forward_address->AssertReadBarrierState();
}
return forward_address;
}
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 4e6f7da..a0eb197 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -75,19 +75,19 @@
// collector (SemiSpace) is running.
CHECK(ref != nullptr);
collector::ConcurrentCopying* concurrent_copying = heap->ConcurrentCopyingCollector();
- mirror::Object* rb_ptr = ref->GetReadBarrierPointer();
- if (rb_ptr == ReadBarrier::GrayPtr()) {
- ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), ReadBarrier::WhitePtr());
- CHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
+ uint32_t rb_state = ref->GetReadBarrierState();
+ if (rb_state == ReadBarrier::GrayState()) {
+ ref->AtomicSetReadBarrierState(ReadBarrier::GrayState(), ReadBarrier::WhiteState());
+ CHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::WhiteState());
} else {
// In ConcurrentCopying::ProcessMarkStackRef() we may leave a white reference in the queue and
// find it here, which is OK.
- CHECK_EQ(rb_ptr, ReadBarrier::WhitePtr()) << "ref=" << ref << " rb_ptr=" << rb_ptr;
+ CHECK_EQ(rb_state, ReadBarrier::WhiteState()) << "ref=" << ref << " rb_state=" << rb_state;
ObjPtr<mirror::Object> referent = ref->GetReferent<kWithoutReadBarrier>();
// The referent could be null if it's cleared by a mutator (Reference.clear()).
if (referent != nullptr) {
CHECK(concurrent_copying->IsInToSpace(referent.Ptr()))
- << "ref=" << ref << " rb_ptr=" << ref->GetReadBarrierPointer()
+ << "ref=" << ref << " rb_state=" << ref->GetReadBarrierState()
<< " referent=" << referent;
}
}
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 6035406..0b602e9 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -399,8 +399,8 @@
auto* obj = reinterpret_cast<mirror::Object*>(current);
CHECK(obj->GetClass() != nullptr) << "Image object at address " << obj << " has null class";
CHECK(live_bitmap_->Test(obj)) << obj->PrettyTypeOf();
- if (kUseBakerOrBrooksReadBarrier) {
- obj->AssertReadBarrierPointer();
+ if (kUseBakerReadBarrier) {
+ obj->AssertReadBarrierState();
}
current += RoundUp(obj->SizeOf(), kObjectAlignment);
}
@@ -1606,7 +1606,7 @@
std::ostringstream oss;
bool first = true;
- for (auto msg : error_msgs) {
+ for (const auto& msg : error_msgs) {
if (!first) {
oss << "\n ";
}
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 7778871..cbb3d73 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -108,13 +108,10 @@
EXPECT_GE(size, SizeOfZeroLengthByteArray());
EXPECT_TRUE(byte_array_class != nullptr);
o->SetClass(byte_array_class);
- if (kUseBakerOrBrooksReadBarrier) {
+ if (kUseBakerReadBarrier) {
// Like the proper heap object allocation, install and verify
- // the correct read barrier pointer.
- if (kUseBrooksReadBarrier) {
- o->SetReadBarrierPointer(o);
- }
- o->AssertReadBarrierPointer();
+ // the correct read barrier state.
+ o->AssertReadBarrierState();
}
mirror::Array* arr = o->AsArray<kVerifyNone>();
size_t header_size = SizeOfZeroLengthByteArray();
diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h
index 6c189b0..f938c9f 100644
--- a/runtime/generated/asm_support_gen.h
+++ b/runtime/generated/asm_support_gen.h
@@ -98,6 +98,10 @@
DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), (static_cast<uint32_t>(art::LockWord::kReadBarrierStateMaskShiftedToggled)))
#define LOCK_WORD_THIN_LOCK_COUNT_ONE 65536
DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_THIN_LOCK_COUNT_ONE), (static_cast<int32_t>(art::LockWord::kThinLockCountOne)))
+#define LOCK_WORD_STATE_FORWARDING_ADDRESS_OVERFLOW 0x40000000
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_FORWARDING_ADDRESS_OVERFLOW), (static_cast<uint32_t>(art::LockWord::kStateForwardingAddressOverflow)))
+#define LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT 0x3
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT), (static_cast<uint32_t>(art::LockWord::kForwardingAddressShift)))
#define LOCK_WORD_GC_STATE_MASK_SHIFTED 0x30000000
DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_GC_STATE_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kGCStateMaskShifted)))
#define LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED 0xcfffffff
diff --git a/runtime/handle.h b/runtime/handle.h
index d33d4a6..3db3be2 100644
--- a/runtime/handle.h
+++ b/runtime/handle.h
@@ -42,13 +42,9 @@
Handle() : reference_(nullptr) {
}
- ALWAYS_INLINE Handle(const Handle<T>& handle) : reference_(handle.reference_) {
- }
+ ALWAYS_INLINE Handle(const Handle<T>& handle) = default;
- ALWAYS_INLINE Handle<T>& operator=(const Handle<T>& handle) {
- reference_ = handle.reference_;
- return *this;
- }
+ ALWAYS_INLINE Handle<T>& operator=(const Handle<T>& handle) = default;
ALWAYS_INLINE explicit Handle(StackReference<T>* reference) : reference_(reference) {
}
@@ -109,15 +105,10 @@
}
ALWAYS_INLINE MutableHandle(const MutableHandle<T>& handle)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : Handle<T>(handle.reference_) {
- }
+ REQUIRES_SHARED(Locks::mutator_lock_) = default;
ALWAYS_INLINE MutableHandle<T>& operator=(const MutableHandle<T>& handle)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- Handle<T>::operator=(handle);
- return *this;
- }
+ REQUIRES_SHARED(Locks::mutator_lock_) = default;
ALWAYS_INLINE explicit MutableHandle(StackReference<T>* reference)
REQUIRES_SHARED(Locks::mutator_lock_)
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index 8a0aba6..adb7d8a 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -69,7 +69,7 @@
number_of_references_(num_references) {}
// Variable sized constructor.
- BaseHandleScope(BaseHandleScope* link)
+ explicit BaseHandleScope(BaseHandleScope* link)
: link_(link),
number_of_references_(kNumReferencesVariableSized) {}
diff --git a/runtime/handle_scope_test.cc b/runtime/handle_scope_test.cc
index 92063c4..aab1d9c 100644
--- a/runtime/handle_scope_test.cc
+++ b/runtime/handle_scope_test.cc
@@ -14,15 +14,27 @@
* limitations under the License.
*/
+#include <type_traits>
+
#include "base/enums.h"
#include "common_runtime_test.h"
#include "gtest/gtest.h"
+#include "handle.h"
#include "handle_scope-inl.h"
+#include "mirror/object.h"
#include "scoped_thread_state_change-inl.h"
#include "thread.h"
namespace art {
+// Handles are value objects and should be trivially copyable.
+static_assert(std::is_trivially_copyable<Handle<mirror::Object>>::value,
+ "Handle should be trivially copyable");
+static_assert(std::is_trivially_copyable<MutableHandle<mirror::Object>>::value,
+ "MutableHandle should be trivially copyable");
+static_assert(std::is_trivially_copyable<ScopedNullHandle<mirror::Object>>::value,
+ "ScopedNullHandle should be trivially copyable");
+
class HandleScopeTest : public CommonRuntimeTest {};
// Test the offsets computed for members of HandleScope. Because of cross-compiling
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 2336759..8cbe491 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -574,9 +574,9 @@
}
void WriteStringTable() {
- for (const std::pair<std::string, HprofStringId>& p : strings_) {
+ for (const auto& p : strings_) {
const std::string& string = p.first;
- const size_t id = p.second;
+ const HprofStringId id = p.second;
output_->StartNewRecord(HPROF_TAG_STRING, kHprofTime);
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index e607d4d..981cc1b 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -774,6 +774,24 @@
return is_invoke_exact;
}
+inline static ObjPtr<mirror::Class> GetAndInitializeDeclaringClass(Thread* self, ArtField* field)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Method handle invocations on static fields should ensure class is
+ // initialized. This usually happens when an instance is constructed
+ // or class members referenced, but this is not guaranteed when
+ // looking up method handles.
+ ObjPtr<mirror::Class> klass = field->GetDeclaringClass();
+ if (UNLIKELY(!klass->IsInitialized())) {
+ StackHandleScope<1> hs(self);
+ HandleWrapperObjPtr<mirror::Class> h(hs.NewHandleWrapper(&klass));
+ if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h, true, true)) {
+ DCHECK(self->IsExceptionPending());
+ return nullptr;
+ }
+ }
+ return klass;
+}
+
template<bool is_range, bool do_access_check>
inline bool DoInvokePolymorphic(Thread* self,
ShadowFrame& shadow_frame,
@@ -957,12 +975,20 @@
return DoFieldPutForInvokePolymorphic(self, shadow_frame, obj, field, field_type, arg[1]);
}
case kStaticGet: {
- ObjPtr<mirror::Object> obj = field->GetDeclaringClass();
+ ObjPtr<mirror::Object> obj = GetAndInitializeDeclaringClass(self, field);
+ if (obj == nullptr) {
+ DCHECK(self->IsExceptionPending());
+ return false;
+ }
DoFieldGetForInvokePolymorphic(self, shadow_frame, obj, field, field_type, result);
return true;
}
case kStaticPut: {
- ObjPtr<mirror::Object> obj = field->GetDeclaringClass();
+ ObjPtr<mirror::Object> obj = GetAndInitializeDeclaringClass(self, field);
+ if (obj == nullptr) {
+ DCHECK(self->IsExceptionPending());
+ return false;
+ }
return DoFieldPutForInvokePolymorphic(self, shadow_frame, obj, field, field_type, arg[0]);
}
default:
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 85bfd17..fad7d90 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -781,7 +781,7 @@
SendRequestAndPossiblySuspend(pReq, suspend_policy, threadId);
}
-static void LogMatchingEventsAndThread(const std::vector<JdwpEvent*> match_list,
+static void LogMatchingEventsAndThread(const std::vector<JdwpEvent*>& match_list,
ObjectId thread_id)
REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0, e = match_list.size(); i < e; ++i) {
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index 538b6eb..dea301c 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -61,7 +61,7 @@
*/
class LockWord {
public:
- enum SizeShiftsAndMasks { // private marker to avoid generate-operator-out.py from processing.
+ enum SizeShiftsAndMasks : uint32_t { // private marker to avoid generate-operator-out.py from processing.
// Number of bits to encode the state, currently just fat or thin/unlocked or hash code.
kStateSize = 2,
kReadBarrierStateSize = 1,
@@ -91,6 +91,8 @@
kStateFat = 1,
kStateHash = 2,
kStateForwardingAddress = 3,
+ kStateForwardingAddressShifted = kStateForwardingAddress << kStateShift,
+ kStateForwardingAddressOverflow = (1 + kStateMask - kStateForwardingAddress) << kStateShift,
// Read barrier bit.
kReadBarrierStateShift = kThinLockCountSize + kThinLockCountShift,
@@ -140,7 +142,7 @@
static LockWord FromForwardingAddress(size_t target) {
DCHECK_ALIGNED(target, (1 << kStateSize));
- return LockWord((target >> kForwardingAddressShift) | (kStateForwardingAddress << kStateShift));
+ return LockWord((target >> kForwardingAddressShift) | kStateForwardingAddressShifted);
}
static LockWord FromHashCode(uint32_t hash_code, uint32_t gc_state) {
@@ -202,6 +204,8 @@
void SetReadBarrierState(uint32_t rb_state) {
DCHECK_EQ(rb_state & ~kReadBarrierStateMask, 0U);
+ DCHECK(rb_state == ReadBarrier::WhiteState() ||
+ rb_state == ReadBarrier::GrayState()) << rb_state;
DCHECK_NE(static_cast<uint32_t>(GetState()), static_cast<uint32_t>(kForwardingAddress));
// Clear and or the bits.
value_ &= ~(kReadBarrierStateMask << kReadBarrierStateShift);
@@ -256,6 +260,11 @@
LockWord();
explicit LockWord(uint32_t val) : value_(val) {
+ // Make sure adding the overflow causes an overflow.
+ constexpr uint64_t overflow = static_cast<uint64_t>(kStateForwardingAddressShifted) +
+ static_cast<uint64_t>(kStateForwardingAddressOverflow);
+ constexpr bool is_larger = overflow > static_cast<uint64_t>(0xFFFFFFFF);
+ static_assert(is_larger, "should have overflowed");
CheckReadBarrierState();
}
@@ -270,9 +279,8 @@
if (!kUseReadBarrier) {
DCHECK_EQ(rb_state, 0U);
} else {
- DCHECK(rb_state == ReadBarrier::white_ptr_ ||
- rb_state == ReadBarrier::gray_ptr_ ||
- rb_state == ReadBarrier::black_ptr_) << rb_state;
+ DCHECK(rb_state == ReadBarrier::WhiteState() ||
+ rb_state == ReadBarrier::GrayState()) << rb_state;
}
}
}
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 03d6487..db46027 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -64,19 +64,45 @@
return GetFieldObject<ClassExt>(OFFSET_OF_OBJECT_MEMBER(Class, ext_data_));
}
-void Class::SetExtData(ObjPtr<ClassExt> ext) {
- CHECK(ext != nullptr) << PrettyClass();
- // TODO It might be wise to just create an internal (global?) mutex that we synchronize on instead
- // to prevent any possibility of deadlocks with java code. Alternatively we might want to come up
- // with some other abstraction.
- DCHECK_EQ(GetLockOwnerThreadId(), Thread::Current()->GetThreadId())
- << "The " << PrettyClass() << " object should be locked when writing to the extData field.";
- DCHECK(GetExtData() == nullptr)
- << "The extData for " << PrettyClass() << " has already been set!";
- if (Runtime::Current()->IsActiveTransaction()) {
- SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, ext_data_), ext);
+ClassExt* Class::EnsureExtDataPresent(Thread* self) {
+ ObjPtr<ClassExt> existing(GetExtData());
+ if (!existing.IsNull()) {
+ return existing.Ptr();
+ }
+ StackHandleScope<3> hs(self);
+ // Handlerize 'this' since we are allocating here.
+ Handle<Class> h_this(hs.NewHandle(this));
+ // Clear exception so we can allocate.
+ Handle<Throwable> throwable(hs.NewHandle(self->GetException()));
+ self->ClearException();
+ // Allocate the ClassExt
+ Handle<ClassExt> new_ext(hs.NewHandle(ClassExt::Alloc(self)));
+ if (new_ext.Get() == nullptr) {
+ // OOM allocating the classExt.
+ // TODO Should we restore the suppressed exception?
+ self->AssertPendingOOMException();
+ return nullptr;
} else {
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, ext_data_), ext);
+ MemberOffset ext_offset(OFFSET_OF_OBJECT_MEMBER(Class, ext_data_));
+ bool set;
+ // Set the ext_data_ field using CAS semantics.
+ if (Runtime::Current()->IsActiveTransaction()) {
+ set = h_this->CasFieldStrongSequentiallyConsistentObject<true>(ext_offset,
+ ObjPtr<ClassExt>(nullptr),
+ new_ext.Get());
+ } else {
+ set = h_this->CasFieldStrongSequentiallyConsistentObject<false>(ext_offset,
+ ObjPtr<ClassExt>(nullptr),
+ new_ext.Get());
+ }
+ ObjPtr<ClassExt> ret(set ? new_ext.Get() : h_this->GetExtData());
+ DCHECK(!set || h_this->GetExtData() == new_ext.Get());
+ CHECK(!ret.IsNull());
+ // Restore the exception if there was one.
+ if (throwable.Get() != nullptr) {
+ self->SetException(throwable.Get());
+ }
+ return ret.Ptr();
}
}
@@ -108,34 +134,16 @@
}
}
- {
- // Ensure we lock around 'this' when we set the ClassExt.
- ObjectLock<mirror::Class> lock(self, h_this);
- StackHandleScope<2> hs(self);
- // Remember the current exception.
- Handle<Throwable> exception(hs.NewHandle(self->GetException()));
- CHECK(exception.Get() != nullptr);
- MutableHandle<ClassExt> ext(hs.NewHandle(h_this->GetExtData()));
- if (ext.Get() == nullptr) {
- // Cannot have exception while allocating.
- self->ClearException();
- ext.Assign(ClassExt::Alloc(self));
- DCHECK(ext.Get() == nullptr || ext->GetVerifyError() == nullptr);
- if (ext.Get() != nullptr) {
- self->AssertNoPendingException();
- h_this->SetExtData(ext.Get());
- self->SetException(exception.Get());
- } else {
- // TODO Should we restore the old exception anyway?
- self->AssertPendingOOMException();
- }
- }
- if (ext.Get() != nullptr) {
- ext->SetVerifyError(self->GetException());
- }
+ ObjPtr<ClassExt> ext(h_this->EnsureExtDataPresent(self));
+ if (!ext.IsNull()) {
+ self->AssertPendingException();
+ ext->SetVerifyError(self->GetException());
+ } else {
+ self->AssertPendingOOMException();
}
self->AssertPendingException();
}
+
static_assert(sizeof(Status) == sizeof(uint32_t), "Size of status not equal to uint32");
if (Runtime::Current()->IsActiveTransaction()) {
h_this->SetField32Volatile<true>(StatusOffset(), new_status);
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 23c70ff..711914d 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -1133,6 +1133,12 @@
ClassExt* GetExtData() REQUIRES_SHARED(Locks::mutator_lock_);
+ // Returns the ExtData for this class, allocating one if necessary. This should be the only way
+ // to force ext_data_ to be set. No functions are available for changing an already set ext_data_
+ // since doing so is not allowed.
+ ClassExt* EnsureExtDataPresent(Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+
uint16_t GetDexClassDefIndex() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_));
}
@@ -1320,9 +1326,6 @@
ALWAYS_INLINE void SetMethodsPtrInternal(LengthPrefixedArray<ArtMethod>* new_methods)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Set the extData field. This should be done while the 'this' is locked to prevent races.
- void SetExtData(ObjPtr<ClassExt> ext) REQUIRES_SHARED(Locks::mutator_lock_);
-
template <bool throw_on_failure, bool use_referrers_cache>
bool ResolvedFieldAccessTest(ObjPtr<Class> access_to,
ArtField* field,
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 3bf9d94..6d29ed3 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -135,25 +135,82 @@
Monitor::Wait(self, this, ms, ns, true, kTimedWaiting);
}
-inline Object* Object::GetReadBarrierPointer() {
+inline uint32_t Object::GetReadBarrierState(uintptr_t* fake_address_dependency) {
+#ifdef USE_BAKER_READ_BARRIER
+ CHECK(kUseBakerReadBarrier);
+#if defined(__arm__)
+ uintptr_t obj = reinterpret_cast<uintptr_t>(this);
+ uintptr_t result;
+ DCHECK_EQ(OFFSETOF_MEMBER(Object, monitor_), 4U);
+ // Use inline assembly to prevent the compiler from optimizing away the false dependency.
+ __asm__ __volatile__(
+ "ldr %[result], [%[obj], #4]\n\t"
+ // This instruction is enough to "fool the compiler and the CPU" by having `fad` always be
+ // null, without them being able to assume that fact.
+ "eor %[fad], %[result], %[result]\n\t"
+ : [result] "+r" (result), [fad] "=r" (*fake_address_dependency)
+ : [obj] "r" (obj));
+ DCHECK_EQ(*fake_address_dependency, 0U);
+ LockWord lw(static_cast<uint32_t>(result));
+ uint32_t rb_state = lw.ReadBarrierState();
+ return rb_state;
+#elif defined(__aarch64__)
+ uintptr_t obj = reinterpret_cast<uintptr_t>(this);
+ uintptr_t result;
+ DCHECK_EQ(OFFSETOF_MEMBER(Object, monitor_), 4U);
+ // Use inline assembly to prevent the compiler from optimizing away the false dependency.
+ __asm__ __volatile__(
+ "ldr %w[result], [%[obj], #4]\n\t"
+ // This instruction is enough to "fool the compiler and the CPU" by having `fad` always be
+ // null, without them being able to assume that fact.
+ "eor %[fad], %[result], %[result]\n\t"
+ : [result] "+r" (result), [fad] "=r" (*fake_address_dependency)
+ : [obj] "r" (obj));
+ DCHECK_EQ(*fake_address_dependency, 0U);
+ LockWord lw(static_cast<uint32_t>(result));
+ uint32_t rb_state = lw.ReadBarrierState();
+ return rb_state;
+#elif defined(__i386__) || defined(__x86_64__)
+ LockWord lw = GetLockWord(false);
+ // i386/x86_64 don't need fake address dependency. Use a compiler fence to avoid compiler
+ // reordering.
+ *fake_address_dependency = 0;
+ std::atomic_signal_fence(std::memory_order_acquire);
+ uint32_t rb_state = lw.ReadBarrierState();
+ return rb_state;
+#else
+ // mips/mips64
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ UNUSED(fake_address_dependency);
+#endif
+#else // !USE_BAKER_READ_BARRIER
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ UNUSED(fake_address_dependency);
+#endif
+}
+
+inline uint32_t Object::GetReadBarrierState() {
#ifdef USE_BAKER_READ_BARRIER
DCHECK(kUseBakerReadBarrier);
- return reinterpret_cast<Object*>(GetLockWord(false).ReadBarrierState());
-#elif USE_BROOKS_READ_BARRIER
- DCHECK(kUseBrooksReadBarrier);
- return GetFieldObject<Object, kVerifyNone, kWithoutReadBarrier>(
- OFFSET_OF_OBJECT_MEMBER(Object, x_rb_ptr_));
+ LockWord lw(GetField<uint32_t, /*kIsVolatile*/false>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_)));
+ uint32_t rb_state = lw.ReadBarrierState();
+ DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
+ return rb_state;
#else
LOG(FATAL) << "Unreachable";
UNREACHABLE();
#endif
}
-inline Object* Object::GetReadBarrierPointerAcquire() {
+inline uint32_t Object::GetReadBarrierStateAcquire() {
#ifdef USE_BAKER_READ_BARRIER
DCHECK(kUseBakerReadBarrier);
LockWord lw(GetFieldAcquire<uint32_t>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_)));
- return reinterpret_cast<Object*>(lw.ReadBarrierState());
+ uint32_t rb_state = lw.ReadBarrierState();
+ DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
+ return rb_state;
#else
LOG(FATAL) << "Unreachable";
UNREACHABLE();
@@ -169,48 +226,38 @@
#endif
}
-inline void Object::SetReadBarrierPointer(Object* rb_ptr) {
+inline void Object::SetReadBarrierState(uint32_t rb_state) {
#ifdef USE_BAKER_READ_BARRIER
DCHECK(kUseBakerReadBarrier);
- DCHECK_EQ(reinterpret_cast<uint64_t>(rb_ptr) >> 32, 0U);
- DCHECK_NE(rb_ptr, ReadBarrier::BlackPtr()) << "Setting to black is not supported";
+ DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
LockWord lw = GetLockWord(false);
- lw.SetReadBarrierState(static_cast<uint32_t>(reinterpret_cast<uintptr_t>(rb_ptr)));
+ lw.SetReadBarrierState(rb_state);
SetLockWord(lw, false);
-#elif USE_BROOKS_READ_BARRIER
- DCHECK(kUseBrooksReadBarrier);
- // We don't mark the card as this occurs as part of object allocation. Not all objects have
- // backing cards, such as large objects.
- SetFieldObjectWithoutWriteBarrier<false, false, kVerifyNone>(
- OFFSET_OF_OBJECT_MEMBER(Object, x_rb_ptr_), rb_ptr);
#else
LOG(FATAL) << "Unreachable";
UNREACHABLE();
- UNUSED(rb_ptr);
+ UNUSED(rb_state);
#endif
}
template<bool kCasRelease>
-inline bool Object::AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr) {
+inline bool Object::AtomicSetReadBarrierState(uint32_t expected_rb_state, uint32_t rb_state) {
#ifdef USE_BAKER_READ_BARRIER
DCHECK(kUseBakerReadBarrier);
- DCHECK_EQ(reinterpret_cast<uint64_t>(expected_rb_ptr) >> 32, 0U);
- DCHECK_EQ(reinterpret_cast<uint64_t>(rb_ptr) >> 32, 0U);
- DCHECK_NE(expected_rb_ptr, ReadBarrier::BlackPtr()) << "Setting to black is not supported";
- DCHECK_NE(rb_ptr, ReadBarrier::BlackPtr()) << "Setting to black is not supported";
+ DCHECK(ReadBarrier::IsValidReadBarrierState(expected_rb_state)) << expected_rb_state;
+ DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
LockWord expected_lw;
LockWord new_lw;
do {
LockWord lw = GetLockWord(false);
- if (UNLIKELY(reinterpret_cast<Object*>(lw.ReadBarrierState()) != expected_rb_ptr)) {
+ if (UNLIKELY(lw.ReadBarrierState() != expected_rb_state)) {
// Lost the race.
return false;
}
expected_lw = lw;
- expected_lw.SetReadBarrierState(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(expected_rb_ptr)));
+ expected_lw.SetReadBarrierState(expected_rb_state);
new_lw = lw;
- new_lw.SetReadBarrierState(static_cast<uint32_t>(reinterpret_cast<uintptr_t>(rb_ptr)));
+ new_lw.SetReadBarrierState(rb_state);
// ConcurrentCopying::ProcessMarkStackRef uses this with kCasRelease == true.
// If kCasRelease == true, use a CAS release so that when GC updates all the fields of
// an object and then changes the object from gray to black, the field updates (stores) will be
@@ -219,23 +266,8 @@
CasLockWordWeakRelease(expected_lw, new_lw) :
CasLockWordWeakRelaxed(expected_lw, new_lw)));
return true;
-#elif USE_BROOKS_READ_BARRIER
- DCHECK(kUseBrooksReadBarrier);
- MemberOffset offset = OFFSET_OF_OBJECT_MEMBER(Object, x_rb_ptr_);
- uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + offset.SizeValue();
- Atomic<uint32_t>* atomic_rb_ptr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
- HeapReference<Object> expected_ref(HeapReference<Object>::FromMirrorPtr(expected_rb_ptr));
- HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(rb_ptr));
- do {
- if (UNLIKELY(atomic_rb_ptr->LoadRelaxed() != expected_ref.reference_)) {
- // Lost the race.
- return false;
- }
- } while (!atomic_rb_ptr->CompareExchangeWeakSequentiallyConsistent(expected_ref.reference_,
- new_ref.reference_));
- return true;
#else
- UNUSED(expected_rb_ptr, rb_ptr);
+ UNUSED(expected_rb_state, rb_state);
LOG(FATAL) << "Unreachable";
UNREACHABLE();
#endif
@@ -259,19 +291,12 @@
}
-inline void Object::AssertReadBarrierPointer() const {
- if (kUseBakerReadBarrier) {
- Object* obj = const_cast<Object*>(this);
- DCHECK(obj->GetReadBarrierPointer() == nullptr)
- << "Bad Baker pointer: obj=" << reinterpret_cast<void*>(obj)
- << " ptr=" << reinterpret_cast<void*>(obj->GetReadBarrierPointer());
- } else {
- CHECK(kUseBrooksReadBarrier);
- Object* obj = const_cast<Object*>(this);
- DCHECK_EQ(obj, obj->GetReadBarrierPointer())
- << "Bad Brooks pointer: obj=" << reinterpret_cast<void*>(obj)
- << " ptr=" << reinterpret_cast<void*>(obj->GetReadBarrierPointer());
- }
+inline void Object::AssertReadBarrierState() const {
+ CHECK(kUseBakerReadBarrier);
+ Object* obj = const_cast<Object*>(this);
+ DCHECK(obj->GetReadBarrierState() == ReadBarrier::WhiteState())
+ << "Bad Baker pointer: obj=" << reinterpret_cast<void*>(obj)
+ << " rb_state" << reinterpret_cast<void*>(obj->GetReadBarrierState());
}
template<VerifyObjectFlags kVerifyFlags>
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 8cfb60e..f5b9ab3 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -235,8 +235,6 @@
}
for (ObjPtr<Class> cur = c; cur != nullptr; cur = cur->GetSuperClass()) {
for (ArtField& field : cur->GetIFields()) {
- StackHandleScope<1> hs(Thread::Current());
- Handle<Object> h_object(hs.NewHandle(new_value));
if (field.GetOffset().Int32Value() == field_offset.Int32Value()) {
CHECK_NE(field.GetTypeAsPrimitiveType(), Primitive::kPrimNot);
// TODO: resolve the field type for moving GC.
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 886637b..67b5ddb 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -94,19 +94,22 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetClass(ObjPtr<Class> new_klass) REQUIRES_SHARED(Locks::mutator_lock_);
- // TODO: Clean these up and change to return int32_t
- Object* GetReadBarrierPointer() REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Get the read barrier pointer with release semantics, only supported for baker.
- Object* GetReadBarrierPointerAcquire() REQUIRES_SHARED(Locks::mutator_lock_);
+ // Get the read barrier state with a fake address dependency.
+ // '*fake_address_dependency' will be set to 0.
+ ALWAYS_INLINE uint32_t GetReadBarrierState(uintptr_t* fake_address_dependency)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ // This version does not offer any special mechanism to prevent load-load reordering.
+ ALWAYS_INLINE uint32_t GetReadBarrierState() REQUIRES_SHARED(Locks::mutator_lock_);
+ // Get the read barrier state with a load-acquire.
+ ALWAYS_INLINE uint32_t GetReadBarrierStateAcquire() REQUIRES_SHARED(Locks::mutator_lock_);
#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
NO_RETURN
#endif
- void SetReadBarrierPointer(Object* rb_ptr) REQUIRES_SHARED(Locks::mutator_lock_);
+ ALWAYS_INLINE void SetReadBarrierState(uint32_t rb_state) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kCasRelease = false>
- ALWAYS_INLINE bool AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr)
+ ALWAYS_INLINE bool AtomicSetReadBarrierState(uint32_t expected_rb_state, uint32_t rb_state)
REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE uint32_t GetMarkBit() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -114,7 +117,8 @@
ALWAYS_INLINE bool AtomicSetMarkBit(uint32_t expected_mark_bit, uint32_t mark_bit)
REQUIRES_SHARED(Locks::mutator_lock_);
- void AssertReadBarrierPointer() const REQUIRES_SHARED(Locks::mutator_lock_);
+ // Assert that the read barrier state is in the default (white) state.
+ ALWAYS_INLINE void AssertReadBarrierState() const REQUIRES_SHARED(Locks::mutator_lock_);
// The verifier treats all interfaces as java.lang.Object and relies on runtime checks in
// invoke-interface to detect incompatible interface types.
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index eb74fcf..f583167 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -771,7 +771,7 @@
return false;
}
// Can't deflate if our lock count is too high.
- if (monitor->lock_count_ > LockWord::kThinLockMaxCount) {
+ if (static_cast<uint32_t>(monitor->lock_count_) > LockWord::kThinLockMaxCount) {
return false;
}
// Deflate to a thin lock.
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index 4ee46dc..4fbfe47 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -401,14 +401,11 @@
Thread* const self = Thread::Current();
ThreadPool thread_pool("the pool", 2);
ScopedObjectAccess soa(self);
- StackHandleScope<3> hs(self);
+ StackHandleScope<1> hs(self);
Handle<mirror::Object> obj1(
hs.NewHandle<mirror::Object>(mirror::String::AllocFromModifiedUtf8(self, "hello, world!")));
- Handle<mirror::Object> obj2(
- hs.NewHandle<mirror::Object>(mirror::String::AllocFromModifiedUtf8(self, "hello, world!")));
{
ObjectLock<mirror::Object> lock1(self, obj1);
- ObjectLock<mirror::Object> lock2(self, obj1);
{
ObjectTryLock<mirror::Object> trylock(self, obj1);
EXPECT_TRUE(trylock.Acquired());
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index fde0a2c..cf9efe0 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -377,7 +377,7 @@
}
}
-static bool AreSharedLibrariesOk(const std::string shared_libraries,
+static bool AreSharedLibrariesOk(const std::string& shared_libraries,
std::priority_queue<DexFileAndClassPair>& queue) {
if (shared_libraries.empty()) {
if (queue.empty()) {
@@ -398,7 +398,7 @@
while (!temp.empty() && index < shared_libraries_split.size() - 1) {
DexFileAndClassPair pair(temp.top());
const DexFile* dex_file = pair.GetDexFile();
- std::string dex_filename(dex_file->GetLocation());
+ const std::string& dex_filename = dex_file->GetLocation();
uint32_t dex_checksum = dex_file->GetLocationChecksum();
if (dex_filename != shared_libraries_split[index] ||
dex_checksum != std::stoul(shared_libraries_split[index + 1])) {
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index 32a5582..fd7e56d 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -199,8 +199,6 @@
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("Interfaces");
StackHandleScope<7> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader(
- hs.NewHandle(soa.Decode<mirror::ClassLoader>(jclass_loader)));
Handle<mirror::Class> proxyClass0;
Handle<mirror::Class> proxyClass1;
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index be95600..37cf257 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -40,14 +40,16 @@
}
}
if (kUseBakerReadBarrier) {
- // The higher bits of the rb_ptr, rb_ptr_high_bits (must be zero)
- // is used to create artificial data dependency from the is_gray
- // load to the ref field (ptr) load to avoid needing a load-load
- // barrier between the two.
- uintptr_t rb_ptr_high_bits;
- bool is_gray = HasGrayReadBarrierPointer(obj, &rb_ptr_high_bits);
+ // fake_address_dependency (must be zero) is used to create artificial data dependency from
+ // the is_gray load to the ref field (ptr) load to avoid needing a load-load barrier between
+ // the two.
+ uintptr_t fake_address_dependency;
+ bool is_gray = IsGray(obj, &fake_address_dependency);
+ if (kEnableReadBarrierInvariantChecks) {
+ CHECK_EQ(fake_address_dependency, 0U) << obj << " rb_state=" << obj->GetReadBarrierState();
+ }
ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>(
- rb_ptr_high_bits | reinterpret_cast<uintptr_t>(ref_addr));
+ fake_address_dependency | reinterpret_cast<uintptr_t>(ref_addr));
MirrorType* ref = ref_addr->AsMirrorPtr();
MirrorType* old_ref = ref;
if (is_gray) {
@@ -60,9 +62,6 @@
offset, old_ref, ref);
}
}
- if (kEnableReadBarrierInvariantChecks) {
- CHECK_EQ(rb_ptr_high_bits, 0U) << obj << " rb_ptr=" << obj->GetReadBarrierPointer();
- }
AssertToSpaceInvariant(obj, offset, ref);
return ref;
} else if (kUseBrooksReadBarrier) {
@@ -223,20 +222,14 @@
return Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->MarkFromReadBarrier(obj);
}
-inline bool ReadBarrier::HasGrayReadBarrierPointer(mirror::Object* obj,
- uintptr_t* out_rb_ptr_high_bits) {
- mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
- uintptr_t rb_ptr_bits = reinterpret_cast<uintptr_t>(rb_ptr);
- uintptr_t rb_ptr_low_bits = rb_ptr_bits & rb_ptr_mask_;
- if (kEnableReadBarrierInvariantChecks) {
- CHECK(rb_ptr_low_bits == white_ptr_ || rb_ptr_low_bits == gray_ptr_ ||
- rb_ptr_low_bits == black_ptr_)
- << "obj=" << obj << " rb_ptr=" << rb_ptr << " " << obj->PrettyTypeOf();
- }
- bool is_gray = rb_ptr_low_bits == gray_ptr_;
- // The high bits are supposed to be zero. We check this on the caller side.
- *out_rb_ptr_high_bits = rb_ptr_bits & ~rb_ptr_mask_;
- return is_gray;
+inline bool ReadBarrier::IsGray(mirror::Object* obj, uintptr_t* fake_address_dependency) {
+ return obj->GetReadBarrierState(fake_address_dependency) == gray_state_;
+}
+
+inline bool ReadBarrier::IsGray(mirror::Object* obj) {
+ // Use a load-acquire to load the read barrier bit to avoid reordering with the subsequent load.
+ // GetReadBarrierStateAcquire() has load-acquire semantics.
+ return obj->GetReadBarrierStateAcquire() == gray_state_;
}
} // namespace art
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index a861861..cbc2697 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -82,26 +82,32 @@
// ALWAYS_INLINE on this caused a performance regression b/26744236.
static mirror::Object* Mark(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
- static mirror::Object* WhitePtr() {
- return reinterpret_cast<mirror::Object*>(white_ptr_);
+ static constexpr uint32_t WhiteState() {
+ return white_state_;
}
- static mirror::Object* GrayPtr() {
- return reinterpret_cast<mirror::Object*>(gray_ptr_);
- }
- static mirror::Object* BlackPtr() {
- return reinterpret_cast<mirror::Object*>(black_ptr_);
+ static constexpr uint32_t GrayState() {
+ return gray_state_;
}
- ALWAYS_INLINE static bool HasGrayReadBarrierPointer(mirror::Object* obj,
- uintptr_t* out_rb_ptr_high_bits)
+ // fake_address_dependency will be zero which should be bitwise-or'ed with the address of the
+ // subsequent load to prevent the reordering of the read barrier bit load and the subsequent
+ // object reference load (from one of `obj`'s fields).
+ // *fake_address_dependency will be set to 0.
+ ALWAYS_INLINE static bool IsGray(mirror::Object* obj, uintptr_t* fake_address_dependency)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Note: These couldn't be constexpr pointers as reinterpret_cast isn't compatible with them.
- static constexpr uintptr_t white_ptr_ = 0x0; // Not marked.
- static constexpr uintptr_t gray_ptr_ = 0x1; // Marked, but not marked through. On mark stack.
- // TODO: black_ptr_ is unused, we should remove it.
- static constexpr uintptr_t black_ptr_ = 0x2; // Marked through. Used for non-moving objects.
- static constexpr uintptr_t rb_ptr_mask_ = 0x1; // The low bits for white|gray.
+ // This uses a load-acquire to load the read barrier bit internally to prevent the reordering of
+ // the read barrier bit load and the subsequent load.
+ ALWAYS_INLINE static bool IsGray(mirror::Object* obj)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ static bool IsValidReadBarrierState(uint32_t rb_state) {
+ return rb_state == white_state_ || rb_state == gray_state_;
+ }
+
+ static constexpr uint32_t white_state_ = 0x0; // Not marked.
+ static constexpr uint32_t gray_state_ = 0x1; // Marked, but not marked through. On mark stack.
+ static constexpr uint32_t rb_state_mask_ = 0x1; // The low bits for white|gray.
};
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 77ec238..6f68960 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -182,7 +182,7 @@
return compiler_options_;
}
- void AddCompilerOption(std::string option) {
+ void AddCompilerOption(const std::string& option) {
compiler_options_.push_back(option);
}
diff --git a/runtime/thread.h b/runtime/thread.h
index 75b5b12..4f26803 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1567,7 +1567,8 @@
class SCOPED_CAPABILITY ScopedAssertNoThreadSuspension {
public:
- ALWAYS_INLINE ScopedAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) {
+ ALWAYS_INLINE explicit ScopedAssertNoThreadSuspension(const char* cause)
+ ACQUIRE(Roles::uninterruptible_) {
if (kIsDebugBuild) {
self_ = Thread::Current();
old_cause_ = self_->StartAssertNoThreadSuspension(cause);
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 97bc79c..d9e3ea7 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -956,7 +956,7 @@
delete last_fail_message;
}
-void MethodVerifier::AppendToLastFailMessage(std::string append) {
+void MethodVerifier::AppendToLastFailMessage(const std::string& append) {
size_t failure_num = failure_messages_.size();
DCHECK_NE(failure_num, 0U);
std::ostringstream* last_fail_message = failure_messages_[failure_num - 1];
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index eb8b7a6..c6ce583 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -318,7 +318,7 @@
void PrependToLastFailMessage(std::string);
// Adds the given string to the end of the last failure message.
- void AppendToLastFailMessage(std::string);
+ void AppendToLastFailMessage(const std::string& append);
// Verification result for method(s). Includes a (maximum) failure kind, and (the union of)
// all failure types.
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 4ec2da6..da3d946 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -411,7 +411,7 @@
}
// Scan the map for the same value.
- for (const std::pair<uint32_t, uint32_t>& pair : search_map) {
+ for (const std::pair<const uint32_t, uint32_t>& pair : search_map) {
if (pair.first != src && pair.second == src_lock_levels) {
return true;
}
diff --git a/runtime/verifier/verifier_deps.cc b/runtime/verifier/verifier_deps.cc
index bdf63cb..c395612d7 100644
--- a/runtime/verifier/verifier_deps.cc
+++ b/runtime/verifier/verifier_deps.cc
@@ -607,7 +607,8 @@
}
}
-bool VerifierDeps::Verify(Handle<mirror::ClassLoader> class_loader, Thread* self) const {
+bool VerifierDeps::ValidateDependencies(Handle<mirror::ClassLoader> class_loader,
+ Thread* self) const {
for (const auto& entry : dex_deps_) {
if (!VerifyDexFile(class_loader, *entry.first, *entry.second, self)) {
return false;
diff --git a/runtime/verifier/verifier_deps.h b/runtime/verifier/verifier_deps.h
index eea0299..7b419d4 100644
--- a/runtime/verifier/verifier_deps.h
+++ b/runtime/verifier/verifier_deps.h
@@ -109,11 +109,18 @@
void Dump(VariableIndentationOutputStream* vios) const
NO_THREAD_SAFETY_ANALYSIS;
- // Verify the encoded dependencies of this `VerifierDeps`.
+ // Verify the encoded dependencies of this `VerifierDeps` are still valid.
// NO_THREAD_SAFETY_ANALYSIS, as this must be called on a read-only `VerifierDeps`.
- bool Verify(Handle<mirror::ClassLoader> class_loader, Thread* self) const
+ bool ValidateDependencies(Handle<mirror::ClassLoader> class_loader, Thread* self) const
NO_THREAD_SAFETY_ANALYSIS;
+ // NO_THREAD_SAFETY_ANALSYS, as this is queried when the VerifierDeps are
+ // fully created.
+ const std::vector<uint16_t>& GetUnverifiedClasses(const DexFile& dex_file) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ return GetDexFileDeps(dex_file)->unverified_classes_;
+ }
+
private:
static constexpr uint16_t kUnresolvedMarker = static_cast<uint16_t>(-1);
@@ -317,6 +324,7 @@
ART_FRIEND_TEST(VerifierDepsTest, EncodeDecode);
ART_FRIEND_TEST(VerifierDepsTest, EncodeDecodeMulti);
ART_FRIEND_TEST(VerifierDepsTest, VerifyDeps);
+ ART_FRIEND_TEST(VerifierDepsTest, CompilerDriver);
};
} // namespace verifier
diff --git a/test/530-checker-lse/src/Main.java b/test/530-checker-lse/src/Main.java
index 6b0dedf..a61b9a0 100644
--- a/test/530-checker-lse/src/Main.java
+++ b/test/530-checker-lse/src/Main.java
@@ -744,6 +744,20 @@
return 1.0f;
}
+ /// CHECK-START: double Main.getCircleArea(double, boolean) load_store_elimination (before)
+ /// CHECK: NewInstance
+
+ /// CHECK-START: double Main.getCircleArea(double, boolean) load_store_elimination (after)
+ /// CHECK-NOT: NewInstance
+
+ private static double getCircleArea(double radius, boolean b) {
+ double area = 0d;
+ if (b) {
+ area = new Circle(radius).getArea();
+ }
+ return area;
+ }
+
static void assertIntEquals(int result, int expected) {
if (expected != result) {
throw new Error("Expected: " + expected + ", found: " + result);
@@ -808,6 +822,8 @@
assertIntEquals(sumWithinRange(array, 1, 5), 11);
assertFloatEquals(testAllocationEliminationWithLoops(), 1.0f);
assertFloatEquals(mF, 0f);
+ assertDoubleEquals(Math.PI * Math.PI * Math.PI, getCircleArea(Math.PI, true));
+ assertDoubleEquals(0d, getCircleArea(Math.PI, false));
}
static boolean sFlag;
diff --git a/test/618-checker-induction/src/Main.java b/test/618-checker-induction/src/Main.java
index d8bc611..f85479a 100644
--- a/test/618-checker-induction/src/Main.java
+++ b/test/618-checker-induction/src/Main.java
@@ -92,6 +92,43 @@
}
}
+ /// CHECK-START: void Main.deadConditional(int) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:{{B\d+}} outer_loop:none
+ //
+ /// CHECK-START: void Main.deadConditional(int) loop_optimization (after)
+ /// CHECK-NOT: Phi loop:{{B\d+}}
+ public static void deadConditional(int n) {
+ int k = 0;
+ int m = 0;
+ for (int i = 0; i < n; i++) {
+ if (i == 3)
+ k = i;
+ else
+ m = i;
+ }
+ }
+
+ /// CHECK-START: void Main.deadConditionalCycle(int) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.deadConditionalCycle(int) loop_optimization (after)
+ /// CHECK-NOT: Phi loop:{{B\d+}}
+ public static void deadConditionalCycle(int n) {
+ int k = 0;
+ int m = 0;
+ for (int i = 0; i < n; i++) {
+ if (i == 3)
+ k--;
+ else
+ m++;
+ }
+ }
+
+
/// CHECK-START: void Main.deadInduction() loop_optimization (before)
/// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
@@ -668,6 +705,8 @@
potentialInfiniteLoop(4);
deadNestedLoops();
deadNestedAndFollowingLoops();
+ deadConditional(4);
+ deadConditionalCycle(4);
deadInduction();
for (int i = 0; i < a.length; i++) {
diff --git a/test/979-invoke-polymorphic-accessors/expected.txt b/test/979-invoke-polymorphic-accessors/expected.txt
index 2987b6c..22f9f2d 100644
--- a/test/979-invoke-polymorphic-accessors/expected.txt
+++ b/test/979-invoke-polymorphic-accessors/expected.txt
@@ -1 +1,2 @@
+1515870810
Passed InvokeExact tests for accessors.
diff --git a/test/979-invoke-polymorphic-accessors/src/Main.java b/test/979-invoke-polymorphic-accessors/src/Main.java
index 6cdcd10..8f1e361 100644
--- a/test/979-invoke-polymorphic-accessors/src/Main.java
+++ b/test/979-invoke-polymorphic-accessors/src/Main.java
@@ -683,10 +683,20 @@
public static class FindAccessorTester {
public static void main() throws Throwable {
- ValueHolder valueHolder = new ValueHolder();
+ // NB having a static field test here is essential for
+ // this test. MethodHandles need to ensure the class
+ // (ValueHolder) is initialized. This happens in the
+ // invoke-polymorphic dispatch.
MethodHandles.Lookup lookup = MethodHandles.lookup();
-
- lookup.findStaticGetter(ValueHolder.class, "s_fi", int.class);
+ try {
+ MethodHandle mh = lookup.findStaticGetter(ValueHolder.class, "s_fi", int.class);
+ int initialValue = (int)mh.invokeExact();
+ System.out.println(initialValue);
+ } catch (NoSuchFieldException e) { unreachable(); }
+ try {
+ MethodHandle mh = lookup.findStaticSetter(ValueHolder.class, "s_i", int.class);
+ mh.invokeExact(0);
+ } catch (NoSuchFieldException e) { unreachable(); }
try {
lookup.findStaticGetter(ValueHolder.class, "s_fi", byte.class);
unreachable();
@@ -721,6 +731,8 @@
}
public static void main(String[] args) throws Throwable {
+ // FindAccessor test should be the first test class in this
+ // file to ensure class initialization test is run.
FindAccessorTester.main();
InvokeExactTester.main();
}
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index cf8db15..8ee4ca2 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -397,11 +397,13 @@
fi
if [ "$HOST" = "n" ]; then
- ISA=$(adb shell ls -F /data/dalvik-cache | grep -Ewo "${ARCHITECTURES_PATTERN}")
+ adb_invocation=$(adb shell ls -F /data/dalvik-cache)
outcome=$?
+ ISA=$(echo $adb_invocation | grep -Ewo "${ARCHITECTURES_PATTERN}")
if [ x"$ISA" = "x" ]; then
echo "Unable to determine architecture"
# Print a few things for helping diagnosing the problem.
+ echo "adb invocation output: $adb_invocation"
echo "adb invocation outcome: $outcome"
echo $(adb shell ls -F /data/dalvik-cache)
echo $(adb shell ls /data/dalvik-cache)
diff --git a/tools/cpp-define-generator/constant_lockword.def b/tools/cpp-define-generator/constant_lockword.def
index 67ed5b5..f9b6b19 100644
--- a/tools/cpp-define-generator/constant_lockword.def
+++ b/tools/cpp-define-generator/constant_lockword.def
@@ -30,6 +30,9 @@
DEFINE_LOCK_WORD_EXPR(READ_BARRIER_STATE_MASK_TOGGLED, uint32_t, kReadBarrierStateMaskShiftedToggled)
DEFINE_LOCK_WORD_EXPR(THIN_LOCK_COUNT_ONE, int32_t, kThinLockCountOne)
+DEFINE_LOCK_WORD_EXPR(STATE_FORWARDING_ADDRESS_OVERFLOW, uint32_t, kStateForwardingAddressOverflow)
+DEFINE_LOCK_WORD_EXPR(STATE_FORWARDING_ADDRESS_SHIFT, uint32_t, kForwardingAddressShift)
+
DEFINE_LOCK_WORD_EXPR(GC_STATE_MASK_SHIFTED, uint32_t, kGCStateMaskShifted)
DEFINE_LOCK_WORD_EXPR(GC_STATE_MASK_SHIFTED_TOGGLED, uint32_t, kGCStateMaskShiftedToggled)
DEFINE_LOCK_WORD_EXPR(GC_STATE_SHIFT, int32_t, kGCStateShift)