Merge "Always mark reference referents in transaction mode."
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 0325535..a5bb91a 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -327,7 +327,7 @@
 class EmptyMarkObjectVisitor : public MarkObjectVisitor {
  public:
   mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE {return obj;}
-  void MarkHeapReference(mirror::HeapReference<mirror::Object>*) OVERRIDE {}
+  void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) OVERRIDE {}
 };
 
 void ModUnionTable::FilterCards() {
@@ -459,7 +459,7 @@
     for (mirror::HeapReference<mirror::Object>* obj_ptr : references) {
       if (obj_ptr->AsMirrorPtr() != nullptr) {
         all_null = false;
-        visitor->MarkHeapReference(obj_ptr);
+        visitor->MarkHeapReference(obj_ptr, /*do_atomic_update*/ false);
       }
     }
     count += references.size();
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index cf63b30..48a8742 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -97,7 +97,8 @@
 class CollectVisitedVisitor : public MarkObjectVisitor {
  public:
   explicit CollectVisitedVisitor(std::set<mirror::Object*>* out) : out_(out) {}
-  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
+  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
+                                 bool do_atomic_update ATTRIBUTE_UNUSED) OVERRIDE
       REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(ref != nullptr);
     MarkObject(ref->AsMirrorPtr());
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index 29bab01..7b1e2b8 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -74,7 +74,7 @@
     mirror::HeapReference<mirror::Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
     if (target_space_->HasAddress(ref_ptr->AsMirrorPtr())) {
       *contains_reference_to_target_space_ = true;
-      collector_->MarkHeapReference(ref_ptr);
+      collector_->MarkHeapReference(ref_ptr, /*do_atomic_update*/ false);
       DCHECK(!target_space_->HasAddress(ref_ptr->AsMirrorPtr()));
     }
   }
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index b939cbe..65f85f6 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -111,12 +111,29 @@
   }
 }
 
-void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) {
-  // Used for preserving soft references, should be OK to not have a CAS here since there should be
-  // no other threads which can trigger read barriers on the same referent during reference
-  // processing.
-  from_ref->Assign(Mark(from_ref->AsMirrorPtr()));
-  DCHECK(!from_ref->IsNull());
+void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* field,
+                                          bool do_atomic_update) {
+  if (UNLIKELY(do_atomic_update)) {
+    // Used to mark the referent in DelayReferenceReferent in transaction mode.
+    mirror::Object* from_ref = field->AsMirrorPtr();
+    if (from_ref == nullptr) {
+      return;
+    }
+    mirror::Object* to_ref = Mark(from_ref);
+    if (from_ref != to_ref) {
+      do {
+        if (field->AsMirrorPtr() != from_ref) {
+          // Concurrently overwritten by a mutator.
+          break;
+        }
+      } while (!field->CasWeakRelaxed(from_ref, to_ref));
+    }
+  } else {
+    // Used for preserving soft references, should be OK to not have a CAS here since there should be
+    // no other threads which can trigger read barriers on the same referent during reference
+    // processing.
+    field->Assign(Mark(field->AsMirrorPtr()));
+  }
 }
 
 ConcurrentCopying::~ConcurrentCopying() {
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 11060a8..a0da9fc 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -182,7 +182,8 @@
   virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
-  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) OVERRIDE
+  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref,
+                                 bool do_atomic_update) OVERRIDE
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
   virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 14fd332..1e4196b 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -65,7 +65,8 @@
       name_(name),
       pause_histogram_((name_ + " paused").c_str(), kPauseBucketSize, kPauseBucketCount),
       cumulative_timings_(name),
-      pause_histogram_lock_("pause histogram lock", kDefaultMutexLevel, true) {
+      pause_histogram_lock_("pause histogram lock", kDefaultMutexLevel, true),
+      is_transaction_active_(false) {
   ResetCumulativeStatistics();
 }
 
@@ -88,6 +89,9 @@
   uint64_t start_time = NanoTime();
   Iteration* current_iteration = GetCurrentIteration();
   current_iteration->Reset(gc_cause, clear_soft_references);
+  // Note transaction mode is single-threaded and there's no asynchronous GC and this flag doesn't
+  // change in the middle of a GC.
+  is_transaction_active_ = Runtime::Current()->IsActiveTransaction();
   RunPhases();  // Run all the GC phases.
   // Add the current timings to the cumulative timings.
   cumulative_timings_.AddLogger(*GetTimings());
@@ -109,6 +113,7 @@
     MutexLock mu(self, pause_histogram_lock_);
     pause_histogram_.AdjustAndAddValue(pause_time);
   }
+  is_transaction_active_ = false;
 }
 
 void GarbageCollector::SwapBitmaps() {
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 95601d7..14d0499 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -199,12 +199,17 @@
   // Force mark an object.
   virtual mirror::Object* MarkObject(mirror::Object* obj)
       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
-  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj)
+  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj,
+                                 bool do_atomic_update)
       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
   virtual void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
                                       ObjPtr<mirror::Reference> reference)
       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
 
+  bool IsTransactionActive() const {
+    return is_transaction_active_;
+  }
+
  protected:
   // Run all of the GC phases.
   virtual void RunPhases() = 0;
@@ -223,6 +228,7 @@
   int64_t total_freed_bytes_;
   CumulativeLogger cumulative_timings_;
   mutable Mutex pause_histogram_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  bool is_transaction_active_;
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(GarbageCollector);
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 85e67835..0039388 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -260,7 +260,8 @@
   mark_stack_->PushBack(obj);
 }
 
-void MarkCompact::MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr) {
+void MarkCompact::MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr,
+                                    bool do_atomic_update ATTRIBUTE_UNUSED) {
   if (updating_references_) {
     UpdateHeapReference(obj_ptr);
   } else {
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index 6d52d5d..85727c2 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -170,7 +170,8 @@
   // Mark a single object.
   virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
       REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr) OVERRIDE
+  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr,
+                                 bool do_atomic_update) OVERRIDE
       REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
   virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE
       REQUIRES_SHARED(Locks::heap_bitmap_lock_)
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index f00da73..f591cf0 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -532,7 +532,8 @@
   return !mark_bitmap_->AtomicTestAndSet(obj, visitor);
 }
 
-void MarkSweep::MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) {
+void MarkSweep::MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
+                                  bool do_atomic_update ATTRIBUTE_UNUSED) {
   MarkObject(ref->AsMirrorPtr(), nullptr, MemberOffset(0));
 }
 
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index a6e2d61..5a9b9f8 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -216,7 +216,8 @@
       REQUIRES(!mark_stack_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
+  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
+                                 bool do_atomic_update) OVERRIDE
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES(!mark_stack_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index cb9e7e2..4c0f317 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -606,7 +606,8 @@
   return ref.AsMirrorPtr();
 }
 
-void SemiSpace::MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr) {
+void SemiSpace::MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr,
+                                  bool do_atomic_update ATTRIBUTE_UNUSED) {
   MarkObject(obj_ptr);
 }
 
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 52b5e5f..9d6e74d 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -110,7 +110,8 @@
   virtual mirror::Object* MarkObject(mirror::Object* root) OVERRIDE
       REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
-  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr) OVERRIDE
+  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr,
+                                 bool do_atomic_update) OVERRIDE
       REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   void ScanObject(mirror::Object* obj)
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 34afa2a..53be30e 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3332,7 +3332,7 @@
   virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE {
     return obj;
   }
-  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>*) OVERRIDE {
+  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>*, bool) OVERRIDE {
   }
 };
 
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 86b1522..65a550e 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -139,6 +139,14 @@
       CHECK_EQ(!self->GetWeakRefAccessEnabled(), concurrent);
     }
   }
+  if (kIsDebugBuild && collector->IsTransactionActive()) {
+    // In transaction mode, we shouldn't enqueue any Reference to the queues.
+    // See DelayReferenceReferent().
+    DCHECK(soft_reference_queue_.IsEmpty());
+    DCHECK(weak_reference_queue_.IsEmpty());
+    DCHECK(finalizer_reference_queue_.IsEmpty());
+    DCHECK(phantom_reference_queue_.IsEmpty());
+  }
   // Unless required to clear soft references with white references, preserve some white referents.
   if (!clear_soft_references) {
     TimingLogger::ScopedTiming split(concurrent ? "ForwardSoftReferences" :
@@ -206,6 +214,15 @@
   // do_atomic_update needs to be true because this happens outside of the reference processing
   // phase.
   if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update*/true)) {
+    if (UNLIKELY(collector->IsTransactionActive())) {
+      // In transaction mode, keep the referent alive and avoid any reference processing to avoid the
+      // issue of rolling back reference processing.  do_atomic_update needs to be true because this
+      // happens outside of the reference processing phase.
+      if (!referent->IsNull()) {
+        collector->MarkHeapReference(referent, /*do_atomic_update*/ true);
+      }
+      return;
+    }
     Thread* self = Thread::Current();
     // TODO: Remove these locks, and use atomic stacks for storing references?
     // We need to check that the references haven't already been enqueued since we can end up
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 734caea..fd5dcf9 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -67,6 +67,11 @@
     list_->SetPendingNext(next);
   }
   ref->SetPendingNext(nullptr);
+  return ref;
+}
+
+// This must be called whenever DequeuePendingReference is called.
+void ReferenceQueue::DisableReadBarrierForReference(ObjPtr<mirror::Reference> ref) {
   Heap* heap = Runtime::Current()->GetHeap();
   if (kUseBakerOrBrooksReadBarrier && heap->CurrentCollectorType() == kCollectorTypeCC &&
       heap->ConcurrentCopyingCollector()->IsActive()) {
@@ -92,7 +97,6 @@
       }
     }
   }
-  return ref;
 }
 
 void ReferenceQueue::Dump(std::ostream& os) const {
@@ -140,6 +144,9 @@
       }
       cleared_references->EnqueueReference(ref);
     }
+    // Delay disabling the read barrier until here so that the ClearReferent call above in
+    // transaction mode will trigger the read barrier.
+    DisableReadBarrierForReference(ref);
   }
 }
 
@@ -162,6 +169,9 @@
       }
       cleared_references->EnqueueReference(ref);
     }
+    // Delay disabling the read barrier until here so that the ClearReferent call above in
+    // transaction mode will trigger the read barrier.
+    DisableReadBarrierForReference(ref->AsReference());
   }
 }
 
@@ -174,7 +184,9 @@
   do {
     mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
     if (referent_addr->AsMirrorPtr() != nullptr) {
-      visitor->MarkHeapReference(referent_addr);
+      // do_atomic_update is false because mutators can't access the referent due to the weak ref
+      // access blocking.
+      visitor->MarkHeapReference(referent_addr, /*do_atomic_update*/ false);
     }
     ref = ref->GetPendingNext();
   } while (LIKELY(ref != head));
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index b5ec1e5..b73a880 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -63,8 +63,15 @@
   void EnqueueReference(ObjPtr<mirror::Reference> ref) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Dequeue a reference from the queue and return that dequeued reference.
+  // Call DisableReadBarrierForReference for the reference that's returned from this function.
   ObjPtr<mirror::Reference> DequeuePendingReference() REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // If applicable, disable the read barrier for the reference after its referent is handled (see
+  // ConcurrentCopying::ProcessMarkStackRef.) This must be called for a reference that's dequeued
+  // from pending queue (DequeuePendingReference).
+  void DisableReadBarrierForReference(ObjPtr<mirror::Reference> ref)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   // Enqueues finalizer references with white referents.  White referents are blackened, moved to
   // the zombie field, and the referent field is cleared.
   void EnqueueFinalizerReferences(ReferenceQueue* cleared_references,
diff --git a/runtime/object_callbacks.h b/runtime/object_callbacks.h
index 4d726ec..ea5e698 100644
--- a/runtime/object_callbacks.h
+++ b/runtime/object_callbacks.h
@@ -43,7 +43,8 @@
   // May return the same address as the input if the object did not move.
   virtual mirror::Object* MarkObject(mirror::Object* obj) = 0;
   // Mark an object and update the value stored in the heap reference if the object moved.
-  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj) = 0;
+  virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj,
+                                 bool do_atomic_update) = 0;
 };
 
 }  // namespace art