Revert "Revert "Disable adding main and non moving spaces to immune region in GSS""
This reverts commit f85c2fb317399ab540854cd7551ac47690366543.
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index fe5a2ef..fae9271 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -176,6 +176,7 @@
#endif
#define PURE __attribute__ ((__pure__))
+#define WARN_UNUSED __attribute__((warn_unused_result))
template<typename T> void UNUSED(const T&) {}
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index fefb907..b70041c 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -209,7 +209,7 @@
// obj will be NULL. Otherwise, obj should always be non-NULL
// and valid.
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(obj)) {
- Runtime::Current()->GetHeap()->DumpSpaces();
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
JniAbortF(function_name_, "field operation on invalid %s: %p",
ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object);
return;
@@ -248,7 +248,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::Object* o = soa_.Decode<mirror::Object*>(java_object);
if (o == nullptr || !Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
- Runtime::Current()->GetHeap()->DumpSpaces();
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
JniAbortF(function_name_, "field operation on invalid %s: %p",
ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object);
return;
@@ -628,7 +628,7 @@
mirror::Object* obj = soa_.Decode<mirror::Object*>(java_object);
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(obj)) {
- Runtime::Current()->GetHeap()->DumpSpaces();
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
JniAbortF(function_name_, "%s is an invalid %s: %p (%p)",
what, ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object, obj);
return false;
@@ -682,7 +682,7 @@
mirror::Array* a = soa_.Decode<mirror::Array*>(java_array);
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(a)) {
- Runtime::Current()->GetHeap()->DumpSpaces();
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
JniAbortF(function_name_, "jarray is an invalid %s: %p (%p)",
ToStr<IndirectRefKind>(GetIndirectRefKind(java_array)).c_str(), java_array, a);
} else if (!a->IsArrayInstance()) {
@@ -703,7 +703,7 @@
}
mirror::ArtField* f = soa_.DecodeField(fid);
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(f) || !f->IsArtField()) {
- Runtime::Current()->GetHeap()->DumpSpaces();
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
JniAbortF(function_name_, "invalid jfieldID: %p", fid);
return nullptr;
}
@@ -717,7 +717,7 @@
}
mirror::ArtMethod* m = soa_.DecodeMethod(mid);
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(m) || !m->IsArtMethod()) {
- Runtime::Current()->GetHeap()->DumpSpaces();
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
JniAbortF(function_name_, "invalid jmethodID: %p", mid);
return nullptr;
}
@@ -738,7 +738,7 @@
mirror::Object* o = soa_.Decode<mirror::Object*>(java_object);
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
- Runtime::Current()->GetHeap()->DumpSpaces();
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
// TODO: when we remove work_around_app_jni_bugs, this should be impossible.
JniAbortF(function_name_, "native code passing in reference to invalid %s: %p",
ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object);
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 64bffc9..c17f88d 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -635,7 +635,7 @@
// retire a class, the version of the class in the table is returned and this may differ from
// the class passed in.
mirror::Class* EnsureResolved(Thread* self, const char* descriptor, mirror::Class* klass)
- __attribute__((warn_unused_result)) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ WARN_UNUSED SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FixupTemporaryDeclaringClass(mirror::Class* temp_class, mirror::Class* new_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 228d1dc..2686af0 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -185,7 +185,7 @@
<< from_space->GetGcRetentionPolicy();
LOG(INFO) << "ToSpace " << to_space->GetName() << " type "
<< to_space->GetGcRetentionPolicy();
- heap->DumpSpaces();
+ heap->DumpSpaces(LOG(INFO));
LOG(FATAL) << "FATAL ERROR";
}
}
diff --git a/runtime/gc/collector/mark_sweep-inl.h b/runtime/gc/collector/mark_sweep-inl.h
index 974952d..104ed36 100644
--- a/runtime/gc/collector/mark_sweep-inl.h
+++ b/runtime/gc/collector/mark_sweep-inl.h
@@ -32,10 +32,7 @@
template<typename MarkVisitor, typename ReferenceVisitor>
inline void MarkSweep::ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor,
const ReferenceVisitor& ref_visitor) {
- if (kIsDebugBuild && !IsMarked(obj)) {
- heap_->DumpSpaces();
- LOG(FATAL) << "Scanning unmarked object " << obj;
- }
+ DCHECK(IsMarked(obj)) << "Scanning unmarked object " << obj << "\n" << heap_->DumpSpaces();
obj->VisitReferences<false>(visitor, ref_visitor);
if (kCountScannedTypes) {
mirror::Class* klass = obj->GetClass<kVerifyNone>();
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 7e97b3b..95530be 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -313,10 +313,8 @@
}
}
}
- if (current_space_bitmap_ == nullptr) {
- heap_->DumpSpaces();
- LOG(FATAL) << "Could not find a default mark bitmap";
- }
+ CHECK(current_space_bitmap_ != nullptr) << "Could not find a default mark bitmap\n"
+ << heap_->DumpSpaces();
}
void MarkSweep::ExpandMarkStack() {
@@ -943,12 +941,9 @@
void MarkSweep::VerifyIsLive(const Object* obj) {
if (!heap_->GetLiveBitmap()->Test(obj)) {
- if (std::find(heap_->allocation_stack_->Begin(), heap_->allocation_stack_->End(), obj) ==
- heap_->allocation_stack_->End()) {
- // Object not found!
- heap_->DumpSpaces();
- LOG(FATAL) << "Found dead object " << obj;
- }
+ accounting::ObjectStack* allocation_stack = heap_->allocation_stack_.get();
+ CHECK(std::find(allocation_stack->Begin(), allocation_stack->End(), obj) !=
+ allocation_stack->End()) << "Found dead object " << obj << "\n" << heap_->DumpSpaces();
}
}
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
index 47682cc..922a71c 100644
--- a/runtime/gc/collector/semi_space-inl.h
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -64,34 +64,25 @@
// Verify all the objects have the correct forward pointer installed.
obj->AssertReadBarrierPointer();
}
- if (!immune_region_.ContainsObject(obj)) {
- if (from_space_->HasAddress(obj)) {
- mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj);
- // If the object has already been moved, return the new forward address.
- if (UNLIKELY(forward_address == nullptr)) {
- forward_address = MarkNonForwardedObject(obj);
- DCHECK(forward_address != nullptr);
- // Make sure to only update the forwarding address AFTER you copy the object so that the
- // monitor word doesn't Get stomped over.
- obj->SetLockWord(
- LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address)), false);
- // Push the object onto the mark stack for later processing.
- MarkStackPush(forward_address);
- }
- obj_ptr->Assign(forward_address);
- } else {
- BitmapSetSlowPathVisitor visitor(this);
- if (kIsDebugBuild && mark_bitmap_->GetContinuousSpaceBitmap(obj) != nullptr) {
- // If a bump pointer space only collection, we should not
- // reach here as we don't/won't mark the objects in the
- // non-moving space (except for the promoted objects.) Note
- // the non-moving space is added to the immune space.
- DCHECK(!generational_ || whole_heap_collection_);
- }
- if (!mark_bitmap_->Set(obj, visitor)) {
- // This object was not previously marked.
- MarkStackPush(obj);
- }
+ if (from_space_->HasAddress(obj)) {
+ mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj);
+ // If the object has already been moved, return the new forward address.
+ if (UNLIKELY(forward_address == nullptr)) {
+ forward_address = MarkNonForwardedObject(obj);
+ DCHECK(forward_address != nullptr);
+ // Make sure to only update the forwarding address AFTER you copy the object so that the
+ // monitor word doesn't Get stomped over.
+ obj->SetLockWord(
+ LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address)), false);
+ // Push the object onto the mark stack for later processing.
+ MarkStackPush(forward_address);
+ }
+ obj_ptr->Assign(forward_address);
+ } else if (!collect_from_space_only_ && !immune_region_.ContainsObject(obj)) {
+ BitmapSetSlowPathVisitor visitor(this);
+ if (!mark_bitmap_->Set(obj, visitor)) {
+ // This object was not previously marked.
+ MarkStackPush(obj);
}
}
}
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index cabfe21..c7c567f 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -63,23 +63,23 @@
WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
// Mark all of the spaces we never collect as immune.
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
- if (space->GetLiveBitmap() != nullptr) {
- if (space == to_space_) {
- CHECK(to_space_->IsContinuousMemMapAllocSpace());
- to_space_->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
- } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
- || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect
- // Add the main free list space and the non-moving
- // space to the immune space if a bump pointer space
- // only collection.
- || (generational_ && !whole_heap_collection_ &&
- (space == GetHeap()->GetNonMovingSpace() ||
- space == GetHeap()->GetPrimaryFreeListSpace()))) {
- CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
+ if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
+ space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
+ CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
+ } else if (space->GetLiveBitmap() != nullptr) {
+ if (space == to_space_ || collect_from_space_only_) {
+ if (collect_from_space_only_) {
+ // Bind the main free list space and the non-moving space to the immune space if a bump
+ // pointer space only collection.
+ CHECK(space == to_space_ || space == GetHeap()->GetPrimaryFreeListSpace() ||
+ space == GetHeap()->GetNonMovingSpace());
+ }
+ CHECK(space->IsContinuousMemMapAllocSpace());
+ space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
}
}
}
- if (generational_ && !whole_heap_collection_) {
+ if (collect_from_space_only_) {
// We won't collect the large object space if a bump pointer space only collection.
is_large_object_space_immune_ = true;
}
@@ -95,7 +95,7 @@
bytes_promoted_(0),
bytes_promoted_since_last_whole_heap_collection_(0),
large_object_bytes_allocated_at_last_whole_heap_collection_(0),
- whole_heap_collection_(true),
+ collect_from_space_only_(generational),
collector_name_(name_),
swap_semi_spaces_(true) {
}
@@ -147,6 +147,10 @@
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
mark_bitmap_ = heap_->GetMarkBitmap();
}
+ if (generational_) {
+ promo_dest_space_ = GetHeap()->GetPrimaryFreeListSpace();
+ }
+ fallback_space_ = GetHeap()->GetNonMovingSpace();
}
void SemiSpace::ProcessReferences(Thread* self) {
@@ -180,9 +184,9 @@
GetCurrentIteration()->GetClearSoftReferences()) {
// If an explicit, native allocation-triggered, or last attempt
// collection, collect the whole heap.
- whole_heap_collection_ = true;
+ collect_from_space_only_ = false;
}
- if (whole_heap_collection_) {
+ if (!collect_from_space_only_) {
VLOG(heap) << "Whole heap collection";
name_ = collector_name_ + " whole";
} else {
@@ -191,7 +195,7 @@
}
}
- if (!generational_ || whole_heap_collection_) {
+ if (!collect_from_space_only_) {
// If non-generational, always clear soft references.
// If generational, clear soft references if a whole heap collection.
GetCurrentIteration()->SetClearSoftReferences(true);
@@ -227,8 +231,6 @@
{
WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
MarkRoots();
- // Mark roots of immune spaces.
- UpdateAndMarkModUnion();
// Recursively mark remaining objects.
MarkReachableObjects();
}
@@ -259,46 +261,6 @@
}
}
-void SemiSpace::UpdateAndMarkModUnion() {
- for (auto& space : heap_->GetContinuousSpaces()) {
- // If the space is immune then we need to mark the references to other spaces.
- if (immune_region_.ContainsSpace(space)) {
- accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
- if (table != nullptr) {
- // TODO: Improve naming.
- TimingLogger::ScopedTiming t(
- space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
- "UpdateAndMarkImageModUnionTable",
- GetTimings());
- table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
- } else if (heap_->FindRememberedSetFromSpace(space) != nullptr) {
- DCHECK(kUseRememberedSet);
- // If a bump pointer space only collection, the non-moving
- // space is added to the immune space. The non-moving space
- // doesn't have a mod union table, but has a remembered
- // set. Its dirty cards will be scanned later in
- // MarkReachableObjects().
- DCHECK(generational_ && !whole_heap_collection_ &&
- (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace()))
- << "Space " << space->GetName() << " "
- << "generational_=" << generational_ << " "
- << "whole_heap_collection_=" << whole_heap_collection_ << " ";
- } else {
- DCHECK(!kUseRememberedSet);
- // If a bump pointer space only collection, the non-moving
- // space is added to the immune space. But the non-moving
- // space doesn't have a mod union table. Instead, its live
- // bitmap will be scanned later in MarkReachableObjects().
- DCHECK(generational_ && !whole_heap_collection_ &&
- (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace()))
- << "Space " << space->GetName() << " "
- << "generational_=" << generational_ << " "
- << "whole_heap_collection_=" << whole_heap_collection_ << " ";
- }
- }
- }
-}
-
class SemiSpaceScanObjectVisitor {
public:
explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {}
@@ -355,20 +317,30 @@
heap_->MarkAllocStackAsLive(live_stack);
live_stack->Reset();
}
- t.NewTiming("UpdateAndMarkRememberedSets");
for (auto& space : heap_->GetContinuousSpaces()) {
- // If the space is immune and has no mod union table (the
- // non-moving space when the bump pointer space only collection is
- // enabled,) then we need to scan its live bitmap or dirty cards as roots
- // (including the objects on the live stack which have just marked
- // in the live bitmap above in MarkAllocStackAsLive().)
- if (immune_region_.ContainsSpace(space) &&
- heap_->FindModUnionTableFromSpace(space) == nullptr) {
- DCHECK(generational_ && !whole_heap_collection_ &&
- (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace()));
- accounting::RememberedSet* rem_set = heap_->FindRememberedSetFromSpace(space);
- if (kUseRememberedSet) {
- DCHECK(rem_set != nullptr);
+ // If the space is immune then we need to mark the references to other spaces.
+ accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+ if (table != nullptr) {
+ // TODO: Improve naming.
+ TimingLogger::ScopedTiming t2(
+ space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
+ "UpdateAndMarkImageModUnionTable",
+ GetTimings());
+ table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
+ DCHECK(GetHeap()->FindRememberedSetFromSpace(space) == nullptr);
+ } else if (collect_from_space_only_ && space->GetLiveBitmap() != nullptr) {
+ // If the space has no mod union table (the non-moving space and main spaces when the bump
+ // pointer space only collection is enabled,) then we need to scan its live bitmap or dirty
+ // cards as roots (including the objects on the live stack which have just marked in the live
+ // bitmap above in MarkAllocStackAsLive().)
+ DCHECK(space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())
+ << "Space " << space->GetName() << " "
+ << "generational_=" << generational_ << " "
+ << "collect_from_space_only_=" << collect_from_space_only_;
+ accounting::RememberedSet* rem_set = GetHeap()->FindRememberedSetFromSpace(space);
+ CHECK_EQ(rem_set != nullptr, kUseRememberedSet);
+ if (rem_set != nullptr) {
+ TimingLogger::ScopedTiming t2("UpdateAndMarkRememberedSet", GetTimings());
rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, DelayReferenceReferentCallback,
from_space_, this);
if (kIsDebugBuild) {
@@ -383,7 +355,7 @@
visitor);
}
} else {
- DCHECK(rem_set == nullptr);
+ TimingLogger::ScopedTiming t2("VisitLiveBits", GetTimings());
accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
SemiSpaceScanObjectVisitor visitor(this);
live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
@@ -393,9 +365,10 @@
}
}
+ CHECK_EQ(is_large_object_space_immune_, collect_from_space_only_);
if (is_large_object_space_immune_) {
TimingLogger::ScopedTiming t("VisitLargeObjects", GetTimings());
- DCHECK(generational_ && !whole_heap_collection_);
+ DCHECK(collect_from_space_only_);
// Delay copying the live set to the marked set until here from
// BindBitmaps() as the large objects on the allocation stack may
// be newly added to the live set above in MarkAllocStackAsLive().
@@ -506,19 +479,20 @@
}
mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
- size_t object_size = obj->SizeOf();
+ const size_t object_size = obj->SizeOf();
size_t bytes_allocated;
mirror::Object* forward_address = nullptr;
if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) {
// If it's allocated before the last GC (older), move
// (pseudo-promote) it to the main free list space (as sort
// of an old generation.)
- space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
- forward_address = promo_dest_space->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
- nullptr);
+ forward_address = promo_dest_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
+ nullptr);
if (UNLIKELY(forward_address == nullptr)) {
// If out of space, fall back to the to-space.
forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr);
+ // No logic for marking the bitmap, so it must be null.
+ DCHECK(to_space_->GetLiveBitmap() == nullptr);
} else {
bytes_promoted_ += bytes_allocated;
// Dirty the card at the destionation as it may contain
@@ -526,12 +500,12 @@
// space.
GetHeap()->WriteBarrierEveryFieldOf(forward_address);
// Handle the bitmaps marking.
- accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap();
+ accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space_->GetLiveBitmap();
DCHECK(live_bitmap != nullptr);
- accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
+ accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap();
DCHECK(mark_bitmap != nullptr);
DCHECK(!live_bitmap->Test(forward_address));
- if (!whole_heap_collection_) {
+ if (collect_from_space_only_) {
// If collecting the bump pointer spaces only, live_bitmap == mark_bitmap.
DCHECK_EQ(live_bitmap, mark_bitmap);
@@ -559,12 +533,23 @@
mark_bitmap->Set(forward_address);
}
}
- DCHECK(forward_address != nullptr);
} else {
// If it's allocated after the last GC (younger), copy it to the to-space.
forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr);
+ if (forward_address != nullptr && to_space_live_bitmap_ != nullptr) {
+ to_space_live_bitmap_->Set(forward_address);
+ }
}
- CHECK(forward_address != nullptr) << "Out of memory in the to-space.";
+ // If it's still null, attempt to use the fallback space.
+ if (UNLIKELY(forward_address == nullptr)) {
+ forward_address = fallback_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
+ nullptr);
+ CHECK(forward_address != nullptr) << "Out of memory in the to-space and fallback space.";
+ accounting::ContinuousSpaceBitmap* bitmap = fallback_space_->GetLiveBitmap();
+ if (bitmap != nullptr) {
+ bitmap->Set(forward_address);
+ }
+ }
++objects_moved_;
bytes_moved_ += bytes_allocated;
// Copy over the object and add it to the mark stack since we still need to update its
@@ -579,11 +564,10 @@
}
forward_address->AssertReadBarrierPointer();
}
- if (to_space_live_bitmap_ != nullptr) {
- to_space_live_bitmap_->Set(forward_address);
- }
DCHECK(to_space_->HasAddress(forward_address) ||
- (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address)));
+ fallback_space_->HasAddress(forward_address) ||
+ (generational_ && promo_dest_space_->HasAddress(forward_address)))
+ << forward_address << "\n" << GetHeap()->DumpSpaces();
return forward_address;
}
@@ -648,7 +632,7 @@
}
bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
- return space != from_space_ && space != to_space_ && !immune_region_.ContainsSpace(space);
+ return space != from_space_ && space != to_space_;
}
void SemiSpace::Sweep(bool swap_bitmaps) {
@@ -714,22 +698,20 @@
// Scan anything that's on the mark stack.
void SemiSpace::ProcessMarkStack() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
- space::MallocSpace* promo_dest_space = nullptr;
accounting::ContinuousSpaceBitmap* live_bitmap = nullptr;
- if (generational_ && !whole_heap_collection_) {
+ if (collect_from_space_only_) {
// If a bump pointer space only collection (and the promotion is
// enabled,) we delay the live-bitmap marking of promoted objects
// from MarkObject() until this function.
- promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
- live_bitmap = promo_dest_space->GetLiveBitmap();
+ live_bitmap = promo_dest_space_->GetLiveBitmap();
DCHECK(live_bitmap != nullptr);
- accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
+ accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap();
DCHECK(mark_bitmap != nullptr);
DCHECK_EQ(live_bitmap, mark_bitmap);
}
while (!mark_stack_->IsEmpty()) {
Object* obj = mark_stack_->PopBack();
- if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) {
+ if (collect_from_space_only_ && promo_dest_space_->HasAddress(obj)) {
// obj has just been promoted. Mark the live bitmap for it,
// which is delayed from MarkObject().
DCHECK(!live_bitmap->Test(obj));
@@ -742,16 +724,12 @@
inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
// All immune objects are assumed marked.
- if (immune_region_.ContainsObject(obj)) {
- return obj;
- }
if (from_space_->HasAddress(obj)) {
// Returns either the forwarding address or nullptr.
return GetForwardingAddressInFromSpace(obj);
- } else if (to_space_->HasAddress(obj)) {
- // Should be unlikely.
- // Already forwarded, must be marked.
- return obj;
+ } else if (collect_from_space_only_ || immune_region_.ContainsObject(obj) ||
+ to_space_->HasAddress(obj)) {
+ return obj; // Already forwarded, must be marked.
}
return mark_bitmap_->Test(obj) ? obj : nullptr;
}
@@ -777,9 +755,9 @@
if (generational_) {
// Decide whether to do a whole heap collection or a bump pointer
// only space collection at the next collection by updating
- // whole_heap_collection.
- if (!whole_heap_collection_) {
- // Enable whole_heap_collection if the bytes promoted since the
+ // collect_from_space_only_.
+ if (collect_from_space_only_) {
+ // Disable collect_from_space_only_ if the bytes promoted since the
// last whole heap collection or the large object bytes
// allocated exceeds a threshold.
bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_;
@@ -792,14 +770,14 @@
current_los_bytes_allocated >=
last_los_bytes_allocated + kLargeObjectBytesAllocatedThreshold;
if (bytes_promoted_threshold_exceeded || large_object_bytes_threshold_exceeded) {
- whole_heap_collection_ = true;
+ collect_from_space_only_ = false;
}
} else {
// Reset the counters.
bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_;
large_object_bytes_allocated_at_last_whole_heap_collection_ =
GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated();
- whole_heap_collection_ = false;
+ collect_from_space_only_ = true;
}
}
// Clear all of the spaces' mark bitmaps.
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index bff0847..c5d25f3 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -243,9 +243,14 @@
// large objects were allocated at the last whole heap collection.
uint64_t large_object_bytes_allocated_at_last_whole_heap_collection_;
- // Used for the generational mode. When true, collect the whole
- // heap. When false, collect only the bump pointer spaces.
- bool whole_heap_collection_;
+ // Used for generational mode. When true, we only collect the from_space_.
+ bool collect_from_space_only_;
+
+ // The space which we are promoting into, only used for GSS.
+ space::ContinuousMemMapAllocSpace* promo_dest_space_;
+
+ // The space which we copy to if the to_space_ is full.
+ space::ContinuousMemMapAllocSpace* fallback_space_;
// How many objects and bytes we moved, used so that we don't need to Get the size of the
// to_space_ when calculating how many objects and bytes we freed.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 4ec9bc2..696df32 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1126,7 +1126,13 @@
return false;
}
-void Heap::DumpSpaces(std::ostream& stream) {
+std::string Heap::DumpSpaces() const {
+ std::ostringstream oss;
+ DumpSpaces(oss);
+ return oss.str();
+}
+
+void Heap::DumpSpaces(std::ostream& stream) const {
for (const auto& space : continuous_spaces_) {
accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
@@ -1159,10 +1165,7 @@
if (verify_object_mode_ > kVerifyObjectModeFast) {
// Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
- if (!IsLiveObjectLocked(obj)) {
- DumpSpaces();
- LOG(FATAL) << "Object is dead: " << obj;
- }
+ CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces();
}
}
@@ -2354,7 +2357,7 @@
accounting::RememberedSet* remembered_set = table_pair.second;
remembered_set->Dump(LOG(ERROR) << remembered_set->GetName() << ": ");
}
- DumpSpaces();
+ DumpSpaces(LOG(ERROR));
}
return visitor.GetFailureCount();
}
@@ -2471,12 +2474,7 @@
visitor(*it);
}
}
-
- if (visitor.Failed()) {
- DumpSpaces();
- return false;
- }
- return true;
+ return !visitor.Failed();
}
void Heap::SwapStacks(Thread* self) {
@@ -2574,9 +2572,8 @@
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
SwapStacks(self);
// Sort the live stack so that we can quickly binary search it later.
- if (!VerifyMissingCardMarks()) {
- LOG(FATAL) << "Pre " << gc->GetName() << " missing card mark verification failed";
- }
+ CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName()
+ << " missing card mark verification failed\n" << DumpSpaces();
SwapStacks(self);
}
if (verify_mod_union_table_) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index b207953..0da113f 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -539,7 +539,8 @@
}
}
- void DumpSpaces(std::ostream& stream = LOG(INFO));
+ std::string DumpSpaces() const WARN_UNUSED;
+ void DumpSpaces(std::ostream& stream) const;
// Dump object should only be used by the signal handler.
void DumpObject(std::ostream& stream, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index fff4df1..71c8eb5 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -407,11 +407,11 @@
// Clear the space back to an empty space.
virtual void Clear() = 0;
- accounting::ContinuousSpaceBitmap* GetLiveBitmap() const {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
return live_bitmap_.get();
}
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() const {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
return mark_bitmap_.get();
}
diff --git a/runtime/object_callbacks.h b/runtime/object_callbacks.h
index 0e6f4d8..592deed 100644
--- a/runtime/object_callbacks.h
+++ b/runtime/object_callbacks.h
@@ -24,6 +24,8 @@
// For size_t.
#include <stdlib.h>
+#include "base/macros.h"
+
namespace art {
namespace mirror {
class Class;
@@ -57,8 +59,7 @@
// A callback for visiting an object in the heap.
typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
// A callback used for marking an object, returns the new address of the object if the object moved.
-typedef mirror::Object* (MarkObjectCallback)(mirror::Object* obj, void* arg)
- __attribute__((warn_unused_result));
+typedef mirror::Object* (MarkObjectCallback)(mirror::Object* obj, void* arg) WARN_UNUSED;
// A callback for verifying roots.
typedef void (VerifyRootCallback)(const mirror::Object* root, void* arg, size_t vreg,
const StackVisitor* visitor, RootType root_type);
@@ -68,13 +69,12 @@
// A callback for testing if an object is marked, returns nullptr if not marked, otherwise the new
// address the object (if the object didn't move, returns the object input parameter).
-typedef mirror::Object* (IsMarkedCallback)(mirror::Object* object, void* arg)
- __attribute__((warn_unused_result));
+typedef mirror::Object* (IsMarkedCallback)(mirror::Object* object, void* arg) WARN_UNUSED;
// Returns true if the object in the heap reference is marked, if it is marked and has moved the
// callback updates the heap reference contain the new value.
typedef bool (IsHeapReferenceMarkedCallback)(mirror::HeapReference<mirror::Object>* object,
- void* arg) __attribute__((warn_unused_result));
+ void* arg) WARN_UNUSED;
typedef void (ProcessMarkStackCallback)(void* arg);
} // namespace art
diff --git a/runtime/utils.h b/runtime/utils.h
index b47de81..2ea4953 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -167,8 +167,7 @@
// For rounding integers.
template<typename T>
-static constexpr T RoundDown(T x, typename TypeIdentity<T>::type n)
- __attribute__((warn_unused_result));
+static constexpr T RoundDown(T x, typename TypeIdentity<T>::type n) WARN_UNUSED;
template<typename T>
static constexpr T RoundDown(T x, typename TypeIdentity<T>::type n) {
@@ -178,8 +177,7 @@
}
template<typename T>
-static constexpr T RoundUp(T x, typename TypeIdentity<T>::type n)
- __attribute__((warn_unused_result));
+static constexpr T RoundUp(T x, typename TypeIdentity<T>::type n) WARN_UNUSED;
template<typename T>
static constexpr T RoundUp(T x, typename TypeIdentity<T>::type n) {
@@ -188,7 +186,7 @@
// For aligning pointers.
template<typename T>
-static inline T* AlignDown(T* x, uintptr_t n) __attribute__((warn_unused_result));
+static inline T* AlignDown(T* x, uintptr_t n) WARN_UNUSED;
template<typename T>
static inline T* AlignDown(T* x, uintptr_t n) {
@@ -196,7 +194,7 @@
}
template<typename T>
-static inline T* AlignUp(T* x, uintptr_t n) __attribute__((warn_unused_result));
+static inline T* AlignUp(T* x, uintptr_t n) WARN_UNUSED;
template<typename T>
static inline T* AlignUp(T* x, uintptr_t n) {