Improvements for table lookup read barriers.

- Implement fast paths for the GC root read barrier routines.
- Avoid unnecessary CAS operations.

Bug: 12687968
Change-Id: Iceef44e253062af5bf2295a521a9c64403deafe1
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 468179c..0a7a69f 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -620,7 +620,10 @@
     gc_mark_stack_->PushBack(to_ref);
   } else {
     CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
-             static_cast<uint32_t>(kMarkStackModeGcExclusive));
+             static_cast<uint32_t>(kMarkStackModeGcExclusive))
+        << "ref=" << to_ref
+        << " self->gc_marking=" << self->GetIsGcMarking()
+        << " cc->is_marking=" << is_marking_;
     CHECK(self == thread_running_gc_)
         << "Only GC-running thread should access the mark stack "
         << "in the GC exclusive mark stack mode";
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index daae401..85ac4aa 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -62,8 +62,10 @@
     if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) {
       ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
       // Update the field atomically. This may fail if mutator updates before us, but it's ok.
-      obj->CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier<false, false>(
-          offset, old_ref, ref);
+      if (ref != old_ref) {
+        obj->CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier<false, false>(
+            offset, old_ref, ref);
+      }
     }
     AssertToSpaceInvariant(obj, offset, ref);
     return ref;
@@ -90,17 +92,17 @@
     // To be implemented.
     return ref;
   } else if (with_read_barrier && kUseTableLookupReadBarrier) {
-    if (kMaybeDuringStartup && IsDuringStartup()) {
-      // During startup, the heap may not be initialized yet. Just
-      // return the given ref.
-      return ref;
-    }
-    if (Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
+    Thread* self = Thread::Current();
+    if (self != nullptr &&
+        self->GetIsGcMarking() &&
+        Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
       MirrorType* old_ref = ref;
       ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
       // Update the field atomically. This may fail if mutator updates before us, but it's ok.
-      Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
-      atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, ref);
+      if (ref != old_ref) {
+        Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
+        atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, ref);
+      }
     }
     AssertToSpaceInvariant(gc_root_source, ref);
     return ref;
@@ -127,19 +129,19 @@
     // To be implemented.
     return ref;
   } else if (with_read_barrier && kUseTableLookupReadBarrier) {
-    if (kMaybeDuringStartup && IsDuringStartup()) {
-      // During startup, the heap may not be initialized yet. Just
-      // return the given ref.
-      return ref;
-    }
-    if (Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
+    Thread* self = Thread::Current();
+    if (self != nullptr &&
+        self->GetIsGcMarking() &&
+        Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
       auto old_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
       ref = reinterpret_cast<MirrorType*>(Mark(ref));
       auto new_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
       // Update the field atomically. This may fail if mutator updates before us, but it's ok.
-      auto* atomic_root =
-          reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
-      atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, new_ref);
+      if (new_ref.AsMirrorPtr() != old_ref.AsMirrorPtr()) {
+        auto* atomic_root =
+            reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
+        atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, new_ref);
+      }
     }
     AssertToSpaceInvariant(gc_root_source, ref);
     return ref;