Track what intern tables are from boot images
Goal: Use this to make it faster to do collision checks between app
image intern tables against the non boot image intern tables.
Bug: 116059983
Test: test-art-host
Change-Id: I7a2305167335da5b6685822894f7985970e99053
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 10d2b6f..d22b301 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -2178,6 +2178,22 @@
EXPECT_TRUE(preresolved_seen.find("Other class init") == preresolved_seen.end());
// Expect the sets match.
EXPECT_GE(seen.size(), preresolved_seen.size());
+
+ // Verify what strings are marked as boot image.
+ std::set<std::string> boot_image_strings;
+ std::set<std::string> app_image_strings;
+
+ MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
+ intern_table.VisitInterns([&](const GcRoot<mirror::String>& root)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ boot_image_strings.insert(root.Read()->ToModifiedUtf8());
+ }, /*visit_boot_images=*/true, /*visit_non_boot_images=*/false);
+ intern_table.VisitInterns([&](const GcRoot<mirror::String>& root)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ app_image_strings.insert(root.Read()->ToModifiedUtf8());
+ }, /*visit_boot_images=*/false, /*visit_non_boot_images=*/true);
+ EXPECT_EQ(boot_image_strings.size(), 0u);
+ EXPECT_TRUE(app_image_strings == seen);
}
}
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index fd10b6b..5ca7f07 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -2614,17 +2614,19 @@
CHECK_EQ(intern_table_bytes, image_info.intern_table_bytes_);
// Fixup the pointers in the newly written intern table to contain image addresses.
InternTable temp_intern_table;
- // Note that we require that ReadFromMemory does not make an internal copy of the elements so that
- // the VisitRoots() will update the memory directly rather than the copies.
+ // Note that we require that ReadFromMemory does not make an internal copy of the elements so
+ // that the VisitRoots() will update the memory directly rather than the copies.
// This also relies on visit roots not doing any verification which could fail after we update
// the roots to be the image addresses.
- temp_intern_table.AddTableFromMemory(intern_table_memory_ptr, VoidFunctor());
+ temp_intern_table.AddTableFromMemory(intern_table_memory_ptr,
+ VoidFunctor(),
+ /*is_boot_image=*/ false);
CHECK_EQ(temp_intern_table.Size(), intern_table->Size());
temp_intern_table.VisitRoots(&root_visitor, kVisitRootFlagAllRoots);
// Record relocations. (The root visitor does not get to see the slot addresses.)
MutexLock lock(Thread::Current(), *Locks::intern_table_lock_);
DCHECK(!temp_intern_table.strong_interns_.tables_.empty());
- DCHECK(!temp_intern_table.strong_interns_.tables_[0].empty()); // Inserted at the beginning.
+ DCHECK(!temp_intern_table.strong_interns_.tables_[0].Empty()); // Inserted at the beginning.
}
// Write the class table(s) into the image. class_table_bytes_ may be 0 if there are multiple
// class loaders. Writing multiple class tables into the image is currently unsupported.
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 96a2cea..b46abfb 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -1241,7 +1241,7 @@
for (GcRoot<mirror::String>& root : strings) {
root = GcRoot<mirror::String>(fixup_adapter(root.Read<kWithoutReadBarrier>()));
}
- });
+ }, /*is_boot_image=*/ false);
}
}
if (VLOG_IS_ON(image)) {
diff --git a/runtime/intern_table-inl.h b/runtime/intern_table-inl.h
index 84754fa..3c7da09 100644
--- a/runtime/intern_table-inl.h
+++ b/runtime/intern_table-inl.h
@@ -29,14 +29,17 @@
const Visitor& visitor) {
DCHECK(image_space != nullptr);
// Only add if we have the interned strings section.
- const ImageSection& section = image_space->GetImageHeader().GetInternedStringsSection();
+ const ImageHeader& header = image_space->GetImageHeader();
+ const ImageSection& section = header.GetInternedStringsSection();
if (section.Size() > 0) {
- AddTableFromMemory(image_space->Begin() + section.Offset(), visitor);
+ AddTableFromMemory(image_space->Begin() + section.Offset(), visitor, !header.IsAppImage());
}
}
template <typename Visitor>
-inline size_t InternTable::AddTableFromMemory(const uint8_t* ptr, const Visitor& visitor) {
+inline size_t InternTable::AddTableFromMemory(const uint8_t* ptr,
+ const Visitor& visitor,
+ bool is_boot_image) {
size_t read_count = 0;
UnorderedSet set(ptr, /*make copy*/false, &read_count);
{
@@ -46,13 +49,14 @@
// Visit the unordered set, may remove elements.
visitor(set);
if (!set.empty()) {
- strong_interns_.AddInternStrings(std::move(set));
+ strong_interns_.AddInternStrings(std::move(set), is_boot_image);
}
}
return read_count;
}
-inline void InternTable::Table::AddInternStrings(UnorderedSet&& intern_strings) {
+inline void InternTable::Table::AddInternStrings(UnorderedSet&& intern_strings,
+ bool is_boot_image) {
static constexpr bool kCheckDuplicates = kIsDebugBuild;
if (kCheckDuplicates) {
// Avoid doing read barriers since the space might not yet be added to the heap.
@@ -64,7 +68,30 @@
}
}
// Insert at the front since we add new interns into the back.
- tables_.insert(tables_.begin(), std::move(intern_strings));
+ tables_.insert(tables_.begin(),
+ InternalTable(std::move(intern_strings), is_boot_image));
+}
+
+template <typename Visitor>
+inline void InternTable::VisitInterns(const Visitor& visitor,
+ bool visit_boot_images,
+ bool visit_non_boot_images) {
+ auto visit_tables = [&](std::vector<Table::InternalTable>& tables)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ for (Table::InternalTable& table : tables) {
+ // Determine if we want to visit the table based on the flags..
+ const bool visit =
+ (visit_boot_images && table.IsBootImage()) ||
+ (visit_non_boot_images && !table.IsBootImage());
+ if (visit) {
+ for (auto& intern : table.set_) {
+ visitor(intern);
+ }
+ }
+ }
+ };
+ visit_tables(strong_interns_.tables_);
+ visit_tables(weak_interns_.tables_);
}
} // namespace art
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 2449121..9ac9927 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -351,22 +351,22 @@
UnorderedSet combined;
if (tables_.size() > 1) {
table_to_write = &combined;
- for (UnorderedSet& table : tables_) {
- for (GcRoot<mirror::String>& string : table) {
+ for (InternalTable& table : tables_) {
+ for (GcRoot<mirror::String>& string : table.set_) {
combined.insert(string);
}
}
} else {
- table_to_write = &tables_.back();
+ table_to_write = &tables_.back().set_;
}
return table_to_write->WriteToMemory(ptr);
}
void InternTable::Table::Remove(ObjPtr<mirror::String> s) {
- for (UnorderedSet& table : tables_) {
- auto it = table.find(GcRoot<mirror::String>(s));
- if (it != table.end()) {
- table.erase(it);
+ for (InternalTable& table : tables_) {
+ auto it = table.set_.find(GcRoot<mirror::String>(s));
+ if (it != table.set_.end()) {
+ table.set_.erase(it);
return;
}
}
@@ -375,9 +375,9 @@
ObjPtr<mirror::String> InternTable::Table::Find(ObjPtr<mirror::String> s) {
Locks::intern_table_lock_->AssertHeld(Thread::Current());
- for (UnorderedSet& table : tables_) {
- auto it = table.find(GcRoot<mirror::String>(s));
- if (it != table.end()) {
+ for (InternalTable& table : tables_) {
+ auto it = table.set_.find(GcRoot<mirror::String>(s));
+ if (it != table.set_.end()) {
return it->Read();
}
}
@@ -386,9 +386,9 @@
ObjPtr<mirror::String> InternTable::Table::Find(const Utf8String& string) {
Locks::intern_table_lock_->AssertHeld(Thread::Current());
- for (UnorderedSet& table : tables_) {
- auto it = table.find(string);
- if (it != table.end()) {
+ for (InternalTable& table : tables_) {
+ auto it = table.set_.find(string);
+ if (it != table.set_.end()) {
return it->Read();
}
}
@@ -396,29 +396,29 @@
}
void InternTable::Table::AddNewTable() {
- tables_.push_back(UnorderedSet());
+ tables_.push_back(InternalTable());
}
void InternTable::Table::Insert(ObjPtr<mirror::String> s) {
// Always insert the last table, the image tables are before and we avoid inserting into these
// to prevent dirty pages.
DCHECK(!tables_.empty());
- tables_.back().insert(GcRoot<mirror::String>(s));
+ tables_.back().set_.insert(GcRoot<mirror::String>(s));
}
void InternTable::Table::VisitRoots(RootVisitor* visitor) {
BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
visitor, RootInfo(kRootInternedString));
- for (UnorderedSet& table : tables_) {
- for (auto& intern : table) {
+ for (InternalTable& table : tables_) {
+ for (auto& intern : table.set_) {
buffered_visitor.VisitRoot(intern);
}
}
}
void InternTable::Table::SweepWeaks(IsMarkedVisitor* visitor) {
- for (UnorderedSet& table : tables_) {
- SweepWeaks(&table, visitor);
+ for (InternalTable& table : tables_) {
+ SweepWeaks(&table.set_, visitor);
}
}
@@ -440,8 +440,8 @@
return std::accumulate(tables_.begin(),
tables_.end(),
0U,
- [](size_t sum, const UnorderedSet& set) {
- return sum + set.size();
+ [](size_t sum, const InternalTable& table) {
+ return sum + table.Size();
});
}
@@ -460,10 +460,10 @@
InternTable::Table::Table() {
Runtime* const runtime = Runtime::Current();
- // Initial table.
- tables_.push_back(UnorderedSet());
- tables_.back().SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
- runtime->GetHashTableMaxLoadFactor());
+ InternalTable initial_table;
+ initial_table.set_.SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
+ runtime->GetHashTableMaxLoadFactor());
+ tables_.push_back(std::move(initial_table));
}
} // namespace art
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index e918a45..baeb1a9 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -167,6 +167,13 @@
void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
+ // Visit all of the interns in the table.
+ template <typename Visitor>
+ void VisitInterns(const Visitor& visitor,
+ bool visit_boot_images,
+ bool visit_non_boot_images)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+
void DumpForSigQuit(std::ostream& os) const REQUIRES(!Locks::intern_table_lock_);
void BroadcastForNewInterns();
@@ -198,6 +205,33 @@
// weak interns and strong interns.
class Table {
public:
+ class InternalTable {
+ public:
+ InternalTable() = default;
+ InternalTable(UnorderedSet&& set, bool is_boot_image)
+ : set_(std::move(set)), is_boot_image_(is_boot_image) {}
+
+ bool Empty() const {
+ return set_.empty();
+ }
+
+ size_t Size() const {
+ return set_.size();
+ }
+
+ bool IsBootImage() const {
+ return is_boot_image_;
+ }
+
+ private:
+ UnorderedSet set_;
+ bool is_boot_image_ = false;
+
+ friend class InternTable;
+ friend class Table;
+ ART_FRIEND_TEST(InternTableTest, CrossHash);
+ };
+
Table();
ObjPtr<mirror::String> Find(ObjPtr<mirror::String> s) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::intern_table_lock_);
@@ -219,7 +253,7 @@
// debug builds. Returns how many bytes were read.
// NO_THREAD_SAFETY_ANALYSIS for the visitor that may require locks.
template <typename Visitor>
- size_t AddTableFromMemory(const uint8_t* ptr, const Visitor& visitor)
+ size_t AddTableFromMemory(const uint8_t* ptr, const Visitor& visitor, bool is_boot_image)
REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
// Write the intern tables to ptr, if there are multiple tables they are combined into a single
// one. Returns how many bytes were written.
@@ -231,12 +265,12 @@
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
// Add a table to the front of the tables vector.
- void AddInternStrings(UnorderedSet&& intern_strings)
+ void AddInternStrings(UnorderedSet&& intern_strings, bool is_boot_image)
REQUIRES(Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
// We call AddNewTable when we create the zygote to reduce private dirty pages caused by
// modifying the zygote intern table. The back of table is modified when strings are interned.
- std::vector<UnorderedSet> tables_;
+ std::vector<InternalTable> tables_;
friend class InternTable;
friend class linker::ImageWriter;
@@ -251,7 +285,7 @@
// Add a table from memory to the strong interns.
template <typename Visitor>
- size_t AddTableFromMemory(const uint8_t* ptr, const Visitor& visitor)
+ size_t AddTableFromMemory(const uint8_t* ptr, const Visitor& visitor, bool is_boot_image)
REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
ObjPtr<mirror::String> InsertStrong(ObjPtr<mirror::String> s)
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index b3bf1ba..b64ca7d 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -78,9 +78,9 @@
GcRoot<mirror::String> str(mirror::String::AllocFromModifiedUtf8(soa.Self(), "00000000"));
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- for (InternTable::UnorderedSet& table : t.strong_interns_.tables_) {
+ for (InternTable::Table::InternalTable& table : t.strong_interns_.tables_) {
// The negative hash value shall be 32-bit wide on every host.
- ASSERT_TRUE(IsUint<32>(table.hashfn_(str)));
+ ASSERT_TRUE(IsUint<32>(table.set_.hashfn_(str)));
}
}