Do not wait in ClearReferent Make IsNullOrMarkedHeapReference use a CAS when the GC requires an update, to make it safe to run it concurrently with clearing the referent. This means we no longer have to wait in ClearReferent. Bug: 399794032 Test: Treehugger Change-Id: I599980bb093f674d83591b0c5dd9066d09a589af
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index f666a8f..59caaed 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc
@@ -3736,8 +3736,7 @@ } } -bool ConcurrentCopying::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field, - bool do_atomic_update) { +bool ConcurrentCopying::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) { mirror::Object* from_ref = field->AsMirrorPtr(); if (from_ref == nullptr) { return true; @@ -3747,17 +3746,15 @@ return false; } if (from_ref != to_ref) { - if (do_atomic_update) { - do { - if (field->AsMirrorPtr() != from_ref) { - // Concurrently overwritten by a mutator. - break; - } - } while (!field->CasWeakRelaxed(from_ref, to_ref)); - // See comment in MarkHeapReference() for memory ordering. - } else { - field->Assign(to_ref); + // We have to update it while it may be concurrently overwritten by the mutator. + // If the mutator overwrites it, we're done. + while (UNLIKELY(!field->CasWeakRelaxed(from_ref, to_ref))) { + if (field->AsMirrorPtr() != from_ref) { + // Concurrently overwritten by a mutator. + break; + } } + // See comment in MarkHeapReference() for memory ordering. } return true; }
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h index 97d120e..dad26b2 100644 --- a/runtime/gc/collector/concurrent_copying.h +++ b/runtime/gc/collector/concurrent_copying.h
@@ -249,8 +249,7 @@ REQUIRES_SHARED(Locks::mutator_lock_); bool IsMarkedInNonMovingSpace(mirror::Object* from_ref) REQUIRES_SHARED(Locks::mutator_lock_); - bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field, - bool do_atomic_update) override + bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) override REQUIRES_SHARED(Locks::mutator_lock_); void SweepSystemWeaks(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h index 1f697fd..386ef6b 100644 --- a/runtime/gc/collector/garbage_collector.h +++ b/runtime/gc/collector/garbage_collector.h
@@ -132,9 +132,8 @@ virtual mirror::Object* IsMarked(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) = 0; // Returns true if the given heap reference is null or is already marked. If it's already marked, - // update the reference (uses a CAS if do_atomic_update is true). Otherwise, returns false. - virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj, - bool do_atomic_update) + // update the reference, using a CAS if the GC requires it. Otherwise, returns false. + virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj) REQUIRES_SHARED(Locks::mutator_lock_) = 0; // Used by reference processor. virtual void ProcessMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc index 483b875..021d8ba 100644 --- a/runtime/gc/collector/mark_compact.cc +++ b/runtime/gc/collector/mark_compact.cc
@@ -4950,8 +4950,7 @@ return marking_done_ && IsOnAllocStack(obj) ? obj : nullptr; } -bool MarkCompact::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj, - [[maybe_unused]] bool do_atomic_update) { +bool MarkCompact::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj) { mirror::Object* ref = obj->AsMirrorPtr(); if (ref == nullptr) { return true;
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h index f16dcfa..24f2b8f 100644 --- a/runtime/gc/collector/mark_compact.h +++ b/runtime/gc/collector/mark_compact.h
@@ -89,8 +89,8 @@ [[maybe_unused]] const RootInfo& info) override { UNIMPLEMENTED(FATAL); } - bool IsNullOrMarkedHeapReference([[maybe_unused]] mirror::HeapReference<mirror::Object>* obj, - [[maybe_unused]] bool do_atomic_update) override { + bool IsNullOrMarkedHeapReference( + [[maybe_unused]] mirror::HeapReference<mirror::Object>* obj) override { UNIMPLEMENTED(FATAL); UNREACHABLE(); } @@ -172,10 +172,8 @@ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_); - bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj, - bool do_atomic_update) override - REQUIRES_SHARED(Locks::mutator_lock_) - REQUIRES(Locks::heap_bitmap_lock_); + bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj) override + REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_); void RevokeAllThreadLocalBuffers() override;
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc index 918e97f..9fcd460 100644 --- a/runtime/gc/collector/mark_sweep.cc +++ b/runtime/gc/collector/mark_sweep.cc
@@ -403,8 +403,7 @@ } } -bool MarkSweep::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref, - [[maybe_unused]] bool do_atomic_update) { +bool MarkSweep::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) { mirror::Object* obj = ref->AsMirrorPtr(); if (obj == nullptr) { return true;
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h index b022c7b..367a1ff 100644 --- a/runtime/gc/collector/mark_sweep.h +++ b/runtime/gc/collector/mark_sweep.h
@@ -178,10 +178,8 @@ void VerifyIsLive(const mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); - bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref, - bool do_atomic_update) override - REQUIRES(Locks::heap_bitmap_lock_) - REQUIRES_SHARED(Locks::mutator_lock_); + bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) override + REQUIRES(Locks::heap_bitmap_lock_) REQUIRES_SHARED(Locks::mutator_lock_); void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override REQUIRES(Locks::heap_bitmap_lock_)
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc index 9ec44fa..e6bd38d 100644 --- a/runtime/gc/collector/semi_space.cc +++ b/runtime/gc/collector/semi_space.cc
@@ -609,9 +609,7 @@ return mark_bitmap_->Test(obj) ? obj : nullptr; } -bool SemiSpace::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object, - // SemiSpace does the GC in a pause. No CAS needed. - [[maybe_unused]] bool do_atomic_update) { +bool SemiSpace::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) { mirror::Object* obj = object->AsMirrorPtr(); if (obj == nullptr) { return true; @@ -621,8 +619,8 @@ return false; } if (new_obj != obj) { - // Write barrier is not necessary since it still points to the same object, just at a different - // address. + // SemiSpace does the GC in a pause. No CAS needed. Write barrier is not necessary since it + // still points to the same object, just at a different address. object->Assign(new_obj); } return true;
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h index e6d1266..c72ce98 100644 --- a/runtime/gc/collector/semi_space.h +++ b/runtime/gc/collector/semi_space.h
@@ -160,10 +160,8 @@ REQUIRES(Locks::mutator_lock_) REQUIRES_SHARED(Locks::heap_bitmap_lock_); - bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object, - bool do_atomic_update) override - REQUIRES(Locks::mutator_lock_) - REQUIRES_SHARED(Locks::heap_bitmap_lock_); + bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) override + REQUIRES(Locks::mutator_lock_) REQUIRES_SHARED(Locks::heap_bitmap_lock_); // Marks or unmarks a large object based on whether or not set is true. If set is true, then we // mark, otherwise we unmark.
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc index 6420fbb..26aa4d7 100644 --- a/runtime/gc/reference_processor.cc +++ b/runtime/gc/reference_processor.cc
@@ -325,7 +325,7 @@ mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr(); // do_atomic_update needs to be true because this happens outside of the reference processing // phase. - if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update=*/true)) { + if (!collector->IsNullOrMarkedHeapReference(referent)) { if (UNLIKELY(collector->IsTransactionActive())) { // In transaction mode, keep the referent alive and avoid any reference processing to avoid the // issue of rolling back reference processing. do_atomic_update needs to be true because this @@ -405,11 +405,10 @@ void ReferenceProcessor::ClearReferent(ObjPtr<mirror::Reference> ref) { Thread* self = Thread::Current(); MutexLock mu(self, *Locks::reference_processor_lock_); - // Need to wait until reference processing is done since IsMarkedHeapReference does not have a - // CAS. If we do not wait, it can result in the GC un-clearing references due to race conditions. - // This also handles the race where the referent gets cleared after a null check but before - // IsMarkedHeapReference is called. - WaitUntilDoneProcessingReferences(self); + // If the collector requires the mutator to update references, the IsNullOrMarkedHeapReference + // now uses a CAS to perform the update, as with other reference forwarding. Since this also + // cannot introduce new strong references, we go ahead and do this even while processing + // references. if (Runtime::Current()->IsActiveTransaction()) { ref->ClearReferent<true>(); } else {
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc index 82fd89e..f1ac031 100644 --- a/runtime/gc/reference_queue.cc +++ b/runtime/gc/reference_queue.cc
@@ -139,7 +139,7 @@ mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr(); // do_atomic_update is false because this happens during the reference processing phase where // Reference.clear() would block. - if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update=*/false)) { + if (!collector->IsNullOrMarkedHeapReference(referent_addr)) { // Referent is white, clear it. if (Runtime::Current()->IsActiveTransaction()) { ref->ClearReferent<true>(); @@ -172,7 +172,7 @@ mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr(); // do_atomic_update is false because this happens during the reference processing phase where // Reference.clear() would block. - if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update=*/false)) { + if (!collector->IsNullOrMarkedHeapReference(referent_addr)) { ObjPtr<mirror::Object> forward_address = collector->MarkObject(referent_addr->AsMirrorPtr()); // Move the updated referent to the zombie field. if (Runtime::Current()->IsActiveTransaction()) {