Fix race condition btw DelayReferenceRefernent vs Reference.clear().

Rename IsMarkedHeapReference to IsNullOrMarkedHeapReference.

Move the null check from the caller of IsMarkedHeapReference into
IsNullOrMarkedHeapReference.

Make sure that the referent is only loaded once between the null
check and the IsMarked call.

Use a CAS in ConcurrentCopying::IsNullOrMarkedHeapReference when
called from DelayReferenceRefernent.

Bug: 33389022
Test: test-art-host without and with CC.

Change-Id: I20edab4dac2a4bb02dbb72af0f09de77b55ac08e
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index b889913..741d1da 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -2385,16 +2385,29 @@
   }
 }
 
-bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) {
+bool ConcurrentCopying::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
+                                                    bool do_atomic_update) {
   mirror::Object* from_ref = field->AsMirrorPtr();
+  if (from_ref == nullptr) {
+    return true;
+  }
   mirror::Object* to_ref = IsMarked(from_ref);
   if (to_ref == nullptr) {
     return false;
   }
   if (from_ref != to_ref) {
-    QuasiAtomic::ThreadFenceRelease();
-    field->Assign(to_ref);
-    QuasiAtomic::ThreadFenceSequentiallyConsistent();
+    if (do_atomic_update) {
+      do {
+        if (field->AsMirrorPtr() != from_ref) {
+          // Concurrently overwritten by a mutator.
+          break;
+        }
+      } while (!field->CasWeakRelaxed(from_ref, to_ref));
+    } else {
+      QuasiAtomic::ThreadFenceRelease();
+      field->Assign(to_ref);
+      QuasiAtomic::ThreadFenceSequentiallyConsistent();
+    }
   }
   return true;
 }
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 5b8a557..844bb45 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -183,7 +183,8 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
   bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) OVERRIDE
+  virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
+                                           bool do_atomic_update) OVERRIDE
       REQUIRES_SHARED(Locks::mutator_lock_);
   void SweepSystemWeaks(Thread* self)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 5b51399..0177e2a 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -187,7 +187,10 @@
   // and will be used for reading system weaks while the GC is running.
   virtual mirror::Object* IsMarked(mirror::Object* obj)
       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
-  virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj)
+  // Returns true if the given heap reference is null or is already marked. If it's already marked,
+  // update the reference (uses a CAS if do_atomic_update is true. Otherwise, returns false.
+  virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj,
+                                           bool do_atomic_update)
       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
   // Used by reference processor.
   virtual void ProcessMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index ddcb6c0..85e67835 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -472,9 +472,15 @@
   return mark_bitmap_->Test(object) ? object : nullptr;
 }
 
-bool MarkCompact::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref_ptr) {
+bool MarkCompact::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref_ptr,
+                                              // MarkCompact does the GC in a pause. No CAS needed.
+                                              bool do_atomic_update ATTRIBUTE_UNUSED) {
   // Side effect free since we call this before ever moving objects.
-  return IsMarked(ref_ptr->AsMirrorPtr()) != nullptr;
+  mirror::Object* obj = ref_ptr->AsMirrorPtr();
+  if (obj == nullptr) {
+    return true;
+  }
+  return IsMarked(obj) != nullptr;
 }
 
 void MarkCompact::SweepSystemWeaks() {
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index 564f85b..6d52d5d 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -175,7 +175,8 @@
   virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE
       REQUIRES_SHARED(Locks::heap_bitmap_lock_)
       REQUIRES(Locks::mutator_lock_);
-  virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj) OVERRIDE
+  virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj,
+                                           bool do_atomic_update) OVERRIDE
       REQUIRES_SHARED(Locks::heap_bitmap_lock_)
       REQUIRES(Locks::mutator_lock_);
   void ForwardObject(mirror::Object* obj) REQUIRES(Locks::heap_bitmap_lock_,
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 06ed029..f00da73 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -390,8 +390,13 @@
   }
 }
 
-bool MarkSweep::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) {
-  return IsMarked(ref->AsMirrorPtr());
+bool MarkSweep::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
+                                            bool do_atomic_update ATTRIBUTE_UNUSED) {
+  mirror::Object* obj = ref->AsMirrorPtr();
+  if (obj == nullptr) {
+    return true;
+  }
+  return IsMarked(obj);
 }
 
 class MarkSweep::MarkObjectSlowPath {
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 02cf462..a6e2d61 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -188,7 +188,8 @@
   void VerifyIsLive(const mirror::Object* obj)
       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
 
-  virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
+  virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
+                                           bool do_atomic_update) OVERRIDE
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index a815b83..82505ce 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -765,8 +765,13 @@
   return mark_bitmap_->Test(obj) ? obj : nullptr;
 }
 
-bool SemiSpace::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) {
+bool SemiSpace::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
+                                            // SemiSpace does the GC in a pause. No CAS needed.
+                                            bool do_atomic_update ATTRIBUTE_UNUSED) {
   mirror::Object* obj = object->AsMirrorPtr();
+  if (obj == nullptr) {
+    return true;
+  }
   mirror::Object* new_obj = IsMarked(obj);
   if (new_obj == nullptr) {
     return false;
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 4cebcc3..52b5e5f 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -166,7 +166,8 @@
       REQUIRES(Locks::mutator_lock_)
       REQUIRES_SHARED(Locks::heap_bitmap_lock_);
 
-  virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) OVERRIDE
+  virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
+                                           bool do_atomic_update) OVERRIDE
       REQUIRES(Locks::mutator_lock_)
       REQUIRES_SHARED(Locks::heap_bitmap_lock_);
 
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 081be96..c154836 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -203,7 +203,9 @@
   DCHECK(klass != nullptr);
   DCHECK(klass->IsTypeOfReferenceClass());
   mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
-  if (referent->AsMirrorPtr() != nullptr && !collector->IsMarkedHeapReference(referent)) {
+  // do_atomic_update needs to be true because this happens outside of the reference processing
+  // phase.
+  if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update*/true)) {
     Thread* self = Thread::Current();
     // TODO: Remove these locks, and use atomic stacks for storing references?
     // We need to check that the references haven't already been enqueued since we can end up
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index a0eb197..734caea 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -129,8 +129,9 @@
   while (!IsEmpty()) {
     ObjPtr<mirror::Reference> ref = DequeuePendingReference();
     mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
-    if (referent_addr->AsMirrorPtr() != nullptr &&
-        !collector->IsMarkedHeapReference(referent_addr)) {
+    // do_atomic_update is false because this happens during the reference processing phase where
+    // Reference.clear() would block.
+    if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
       // Referent is white, clear it.
       if (Runtime::Current()->IsActiveTransaction()) {
         ref->ClearReferent<true>();
@@ -147,8 +148,9 @@
   while (!IsEmpty()) {
     ObjPtr<mirror::FinalizerReference> ref = DequeuePendingReference()->AsFinalizerReference();
     mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
-    if (referent_addr->AsMirrorPtr() != nullptr &&
-        !collector->IsMarkedHeapReference(referent_addr)) {
+    // do_atomic_update is false because this happens during the reference processing phase where
+    // Reference.clear() would block.
+    if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
       ObjPtr<mirror::Object> forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
       // Move the updated referent to the zombie field.
       if (Runtime::Current()->IsActiveTransaction()) {
diff --git a/runtime/mirror/object_reference-inl.h b/runtime/mirror/object_reference-inl.h
index e70b936..22fb83c 100644
--- a/runtime/mirror/object_reference-inl.h
+++ b/runtime/mirror/object_reference-inl.h
@@ -34,6 +34,15 @@
   return HeapReference<MirrorType>(ptr.Ptr());
 }
 
+template<class MirrorType>
+bool HeapReference<MirrorType>::CasWeakRelaxed(MirrorType* expected_ptr, MirrorType* new_ptr) {
+  HeapReference<Object> expected_ref(HeapReference<Object>::FromMirrorPtr(expected_ptr));
+  HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_ptr));
+  Atomic<uint32_t>* atomic_reference = reinterpret_cast<Atomic<uint32_t>*>(&this->reference_);
+  return atomic_reference->CompareExchangeWeakRelaxed(expected_ref.reference_,
+                                                      new_ref.reference_);
+}
+
 }  // namespace mirror
 }  // namespace art
 
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index 71f34c6..a96a120 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -94,6 +94,9 @@
   static HeapReference<MirrorType> FromObjPtr(ObjPtr<MirrorType> ptr)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  bool CasWeakRelaxed(MirrorType* old_ptr, MirrorType* new_ptr)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
  private:
   explicit HeapReference(MirrorType* mirror_ptr) REQUIRES_SHARED(Locks::mutator_lock_)
       : ObjectReference<kPoisonHeapReferences, MirrorType>(mirror_ptr) {}