Rework the newly-allocated region logic in RegionSpace.
- Mark large (and large tail) regions as "newly allocated" in
RegionSpace::AllocLarge when they are allocated for non-evac
purposes, the same way non-large regions were already marked
as "newly allocated" in RegionSpace::AllocateRegion when
they were allocated for non-evac purposes.
- Clear the "newly allocated" status of regions only when
setting them as (evac) from-space and non-evac from-space.
This addresses an issue with Sticky-Bit (Generational) CC collection,
where an assertion in ConcurrentCopying::VerifyNoMissingCardMarks
could fail when a recently allocated large region held a dead large
object holding a (presumably stale) object reference pointing to a
newly allocated (non-large) region.
Test: ART run-tests & gtests, libcore tests, JDWP tests (host & device)
Test: Device/emulator boot test
Bug: 67628039
Bug: 12687968
Change-Id: Idd20c159db9359ee783a421ec699d3e3aa450552
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index d0c10be..c9300b5 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -627,8 +627,6 @@
void CheckReference(mirror::Object* ref, int32_t offset = -1) const
REQUIRES_SHARED(Locks::mutator_lock_) {
- // FIXME: This assertion is failing in 004-ThreadStress most of
- // the time with Sticky-Bit (Generational) CC.
CHECK(ref == nullptr || !cc_->region_space_->IsInNewlyAllocatedRegion(ref))
<< holder_->PrettyTypeOf() << "(" << holder_.Ptr() << ") references object "
<< ref->PrettyTypeOf() << "(" << ref << ") in newly allocated region at offset=" << offset;
@@ -1618,7 +1616,9 @@
}
space::RegionSpace::RegionType rtype = region_space_->GetRegionType(to_ref);
bool add_to_live_bytes = false;
- DCHECK(!region_space_->IsInNewlyAllocatedRegion(to_ref));
+ // Invariant: There should be no object from a newly-allocated
+ // region (either large or non-large) on the mark stack.
+ DCHECK(!region_space_->IsInNewlyAllocatedRegion(to_ref)) << to_ref;
if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
// Mark the bitmap only in the GC thread here so that we don't need a CAS.
if (!kUseBakerReadBarrier ||
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index e30b63a..e048515 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -355,6 +355,10 @@
// We make 'top' all usable bytes, as the caller of this
// allocation may use all of 'usable_size' (see mirror::Array::Alloc).
first_reg->SetTop(first_reg->Begin() + allocated);
+ if (!kForEvac) {
+ // Evac doesn't count as newly allocated.
+ first_reg->SetNewlyAllocated();
+ }
for (size_t p = left + 1; p < right; ++p) {
DCHECK_LT(p, num_regions_);
DCHECK(regions_[p].IsFree());
@@ -364,6 +368,10 @@
} else {
++num_non_free_regions_;
}
+ if (!kForEvac) {
+ // Evac doesn't count as newly allocated.
+ regions_[p].SetNewlyAllocated();
+ }
}
*bytes_allocated = allocated;
if (usable_size != nullptr) {
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index b2a0a97..88e683e 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -183,7 +183,40 @@
}
bool result = false;
if (is_newly_allocated_) {
- result = true;
+ // Invariant: newly allocated regions have an undefined live bytes count.
+ DCHECK_EQ(live_bytes_, static_cast<size_t>(-1));
+ if (IsAllocated()) {
+ // We always evacuate newly-allocated non-large regions as we
+ // believe they contain many dead objects (a very simple form of
+ // the generational hypothesis, even before the Sticky-Bit CC
+ // approach).
+ //
+ // TODO: Verify that assertion by collecting statistics on the
+ // number/proportion of live objects in newly allocated regions
+ // in RegionSpace::ClearFromSpace.
+ //
+ // Note that a side effect of evacuating a newly-allocated
+ // non-large region is that the "newly allocated" status will
+ // later be removed, as its live objects will be copied to an
+ // evacuation region, which won't be marked as "newly
+ // allocated" (see RegionSpace::AllocateRegion).
+ result = true;
+ } else {
+ DCHECK(IsLarge());
+ // We never want to evacuate a large region (and the associated
+ // tail regions), except if:
+ // - we are forced to do so (see the `kEvacModeForceAll` case
+ // above); or
+ // - we know that the (sole) object contained in this region is
+ // dead (see the corresponding logic below, in the
+ // `kEvacModeLivePercentNewlyAllocated` case).
+ // For a newly allocated region (i.e. allocated since the
+ // previous GC started), we don't have any liveness information
+ // (the live bytes count is -1 -- also note this region has been
+ // a to-space one between the time of its allocation and now),
+ // so we prefer not to evacuate it.
+ result = false;
+ }
} else if (evac_mode == kEvacModeLivePercentNewlyAllocated) {
bool is_live_percent_valid = (live_bytes_ != static_cast<size_t>(-1));
if (is_live_percent_valid) {
@@ -309,6 +342,8 @@
rb_table->Clear(r->Begin(), r->End());
}
}
+ // Invariant: There should be no newly-allocated region in the from-space.
+ DCHECK(!r->is_newly_allocated_);
}
DCHECK_EQ(num_expected_large_tails, 0U);
current_region_ = &full_region_;
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 3f9644d..2c8129b 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -449,6 +449,18 @@
void SetAsFromSpace() {
DCHECK(!IsFree() && IsInToSpace());
type_ = RegionType::kRegionTypeFromSpace;
+ if (IsNewlyAllocated()) {
+ // Clear the "newly allocated" status here, as we do not want the
+ // GC to see it when encountering references in the from-space.
+ //
+ // Invariant: There should be no newly-allocated region in the
+ // from-space (when the from-space exists, which is between the calls
+ // to RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace).
+ is_newly_allocated_ = false;
+ }
+ // Set live bytes to an invalid value, as we have made an
+ // evacuation decision (possibly based on the percentage of live
+ // bytes).
live_bytes_ = static_cast<size_t>(-1);
}
@@ -461,7 +473,25 @@
DCHECK(kEnableGenerationalConcurrentCopyingCollection || clear_live_bytes);
DCHECK(!IsFree() && IsInToSpace());
type_ = RegionType::kRegionTypeUnevacFromSpace;
+ if (IsNewlyAllocated()) {
+ // A newly allocated region set as unevac from-space must be
+ // a large or large tail region.
+ DCHECK(IsLarge() || IsLargeTail()) << static_cast<uint>(state_);
+ // Always clear the live bytes of a newly allocated (large or
+ // large tail) region.
+ clear_live_bytes = true;
+ // Clear the "newly allocated" status here, as we do not want the
+ // GC to see it when encountering (and processing) references in the
+ // from-space.
+ //
+ // Invariant: There should be no newly-allocated region in the
+ // from-space (when the from-space exists, which is between the calls
+ // to RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace).
+ is_newly_allocated_ = false;
+ }
if (clear_live_bytes) {
+ // Reset the live bytes, as we have made a non-evacuation
+ // decision (possibly based on the percentage of live bytes).
live_bytes_ = 0;
}
}
@@ -471,9 +501,6 @@
void SetUnevacFromSpaceAsToSpace() {
DCHECK(!IsFree() && IsInUnevacFromSpace());
type_ = RegionType::kRegionTypeToSpace;
- if (kEnableGenerationalConcurrentCopyingCollection) {
- is_newly_allocated_ = false;
- }
}
// Return whether this region should be evacuated. Used by RegionSpace::SetFromSpace.