Set Mark-bit for large Zygote objects
We set the mark-bit of Zygote objects before forking to ensure that the
objects are not dirtied due to GC trying to modify the GC related bits
in the objects' header. The same was mistakenly not being done for
large-space objects corresponding to Zygote.
Test: art/test/testrunner/testrunner.py --target --64
Bug: 37254935
Change-Id: I173d6090a6ec0fd7e511e1d684341ecf0968e97f
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index eab292c..d2e1520 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -2352,7 +2352,9 @@
AddSpace(zygote_space_);
non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
AddSpace(non_moving_space_);
- if (kUseBakerReadBarrier && gc::collector::ConcurrentCopying::kGrayDirtyImmuneObjects) {
+ constexpr bool set_mark_bit = kUseBakerReadBarrier
+ && gc::collector::ConcurrentCopying::kGrayDirtyImmuneObjects;
+ if (set_mark_bit) {
// Treat all of the objects in the zygote as marked to avoid unnecessary dirty pages. This is
// safe since we mark all of the objects that may reference non immune objects as gray.
zygote_space_->GetLiveBitmap()->VisitMarkedRange(
@@ -2392,7 +2394,7 @@
}
}
AddModUnionTable(mod_union_table);
- large_object_space_->SetAllLargeObjectsAsZygoteObjects(self);
+ large_object_space_->SetAllLargeObjectsAsZygoteObjects(self, set_mark_bit);
if (collector::SemiSpace::kUseRememberedSet) {
// Add a new remembered set for the post-zygote non-moving space.
accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 2c18888..54d6c77 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -31,6 +31,7 @@
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
#include "image.h"
+#include "mirror/object-readbarrier-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "space-inl.h"
#include "thread-current-inl.h"
@@ -176,10 +177,14 @@
return it->second.is_zygote;
}
-void LargeObjectMapSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self) {
+void LargeObjectMapSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self, const bool set_mark_bit) {
MutexLock mu(self, lock_);
for (auto& pair : large_objects_) {
pair.second.is_zygote = true;
+ if (set_mark_bit) {
+ bool success = pair.first->AtomicSetMarkBit(0, 1);
+ CHECK(success);
+ }
}
}
@@ -579,7 +584,7 @@
return info->IsZygoteObject();
}
-void FreeListSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self) {
+void FreeListSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self, const bool set_mark_bit) {
MutexLock mu(self, lock_);
uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
for (AllocationInfo* cur_info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(Begin())),
@@ -587,6 +592,12 @@
cur_info = cur_info->GetNextInfo()) {
if (!cur_info->IsFree()) {
cur_info->SetZygoteObject();
+ if (set_mark_bit) {
+ mirror::Object* obj =
+ reinterpret_cast<mirror::Object*>(GetAddressForAllocationInfo(cur_info));
+ bool success = obj->AtomicSetMarkBit(0, 1);
+ CHECK(success);
+ }
}
}
}
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 4d1cbc0..85f84b2 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -107,8 +107,9 @@
// Return true if the large object is a zygote large object. Potentially slow.
virtual bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const = 0;
// Called when we create the zygote space, mark all existing large objects as zygote large
- // objects.
- virtual void SetAllLargeObjectsAsZygoteObjects(Thread* self) = 0;
+ // objects. Set mark-bit if called from PreZygoteFork() for ConcurrentCopying
+ // GC to avoid dirtying the first page.
+ virtual void SetAllLargeObjectsAsZygoteObjects(Thread* self, const bool set_mark_bit = false) = 0;
virtual void ForEachMemMap(std::function<void(const MemMap&)> func) const = 0;
// GetRangeAtomic returns Begin() and End() atomically, that is, it never returns Begin() and
@@ -173,7 +174,9 @@
virtual ~LargeObjectMapSpace() {}
bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override REQUIRES(!lock_);
- void SetAllLargeObjectsAsZygoteObjects(Thread* self) override REQUIRES(!lock_);
+ void SetAllLargeObjectsAsZygoteObjects(Thread* self, const bool set_mark_bit = false) override
+ REQUIRES(!lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
AllocationTrackingSafeMap<mirror::Object*, LargeObject, kAllocatorTagLOSMaps> large_objects_
GUARDED_BY(lock_);
@@ -215,7 +218,9 @@
// Removes header from the free blocks set by finding the corresponding iterator and erasing it.
void RemoveFreePrev(AllocationInfo* info) REQUIRES(lock_);
bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override;
- void SetAllLargeObjectsAsZygoteObjects(Thread* self) override REQUIRES(!lock_);
+ void SetAllLargeObjectsAsZygoteObjects(Thread* self, const bool set_mark_bit = false) override
+ REQUIRES(!lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
class SortByPrevFree {
public: