Rename art::ReadBarrier::WhiteState as art::ReadBarrier::NonGrayState.

The read barrier state recorded in object's lockword used to be a
three-state value (white/gray/black), which was turned into a
two-state value (white/gray), where the "black" state was conceptually
merged into the "white" state. This change renames the "white" state
as "non-gray" and adjusts corresponding comments.

Test: art/test.py
Change-Id: I2a17ed15651bdbbe99270c1b81b4d78a1c2c132b
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 15e3d27..462225a 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -6162,7 +6162,7 @@
   // Load the lock word containing the rb_state.
   __ Ldr(ip0.W(), lock_word);
   // Given the numeric representation, it's enough to check the low bit of the rb_state.
-  static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+  static_assert(ReadBarrier::NonGrayState() == 0, "Expecting non-gray to have value 0");
   static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
   __ Tbnz(ip0.W(), LockWord::kReadBarrierStateShift, slow_path);
   static_assert(
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index f624216..d811e07 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -899,7 +899,7 @@
     // Given the numeric representation, it's enough to check the low bit of the
     // rb_state. We do that by shifting the bit out of the lock word with LSRS
     // which can be a 16-bit instruction unlike the TST immediate.
-    static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+    static_assert(ReadBarrier::NonGrayState() == 0, "Expecting non-gray to have value 0");
     static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
     __ Lsrs(temp1_, temp1_, LockWord::kReadBarrierStateShift + 1);
     __ B(cc, GetExitLabel());  // Carry flag is the last bit shifted out by LSRS.
@@ -9661,7 +9661,7 @@
   // Load the lock word containing the rb_state.
   __ Ldr(ip, lock_word);
   // Given the numeric representation, it's enough to check the low bit of the rb_state.
-  static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+  static_assert(ReadBarrier::NonGrayState() == 0, "Expecting non-gray to have value 0");
   static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
   __ Tst(ip, Operand(LockWord::kReadBarrierStateMaskShifted));
   __ B(ne, slow_path, /* is_far_target */ false);
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 476e8ab..aed334b 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -7454,7 +7454,7 @@
   // Given the numeric representation, it's enough to check the low bit of the
   // rb_state. We do that by shifting the bit into the sign bit (31) and
   // performing a branch on less than zero.
-  static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+  static_assert(ReadBarrier::NonGrayState() == 0, "Expecting non-gray to have value 0");
   static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
   static_assert(LockWord::kReadBarrierStateSize == 1, "Expecting 1-bit read barrier state size");
   __ Sll(temp_reg, temp_reg, 31 - LockWord::kReadBarrierStateShift);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index c05f627..72318e9 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -5570,7 +5570,7 @@
   // Given the numeric representation, it's enough to check the low bit of the
   // rb_state. We do that by shifting the bit into the sign bit (31) and
   // performing a branch on less than zero.
-  static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+  static_assert(ReadBarrier::NonGrayState() == 0, "Expecting non-gray to have value 0");
   static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
   static_assert(LockWord::kReadBarrierStateSize == 1, "Expecting 1-bit read barrier state size");
   __ Sll(temp_reg, temp_reg, 31 - LockWord::kReadBarrierStateShift);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 63bd841..df00ec7 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -7757,7 +7757,7 @@
   uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
 
   // Given the numeric representation, it's enough to check the low bit of the rb_state.
-  static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+  static_assert(ReadBarrier::NonGrayState() == 0, "Expecting non-gray to have value 0");
   static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
   constexpr uint32_t gray_byte_position = LockWord::kReadBarrierStateShift / kBitsPerByte;
   constexpr uint32_t gray_bit_position = LockWord::kReadBarrierStateShift % kBitsPerByte;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 0bd7319..ae2a000 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -7050,7 +7050,7 @@
   uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
 
   // Given the numeric representation, it's enough to check the low bit of the rb_state.
-  static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+  static_assert(ReadBarrier::NonGrayState() == 0, "Expecting non-gray to have value 0");
   static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
   constexpr uint32_t gray_byte_position = LockWord::kReadBarrierStateShift / kBitsPerByte;
   constexpr uint32_t gray_bit_position = LockWord::kReadBarrierStateShift % kBitsPerByte;
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 74d4a8f..a657b58 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -2739,7 +2739,7 @@
         codegen_->AddSlowPath(read_barrier_slow_path);
 
         // Given the numeric representation, it's enough to check the low bit of the rb_state.
-        static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+        static_assert(ReadBarrier::NonGrayState() == 0, "Expecting non-gray to have value 0");
         static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
         __ Tbnz(tmp, LockWord::kReadBarrierStateShift, read_barrier_slow_path->GetEntryLabel());
 
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index b920750..53b0aa2 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -2205,7 +2205,7 @@
       // Given the numeric representation, it's enough to check the low bit of the
       // rb_state. We do that by shifting the bit out of the lock word with LSRS
       // which can be a 16-bit instruction unlike the TST immediate.
-      static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+      static_assert(ReadBarrier::NonGrayState() == 0, "Expecting non-gray to have value 0");
       static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
       __ Lsrs(temp2, temp2, LockWord::kReadBarrierStateShift + 1);
       // Carry flag is the last bit shifted out by LSRS.
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 98cea35..5c7be54 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -2781,7 +2781,7 @@
     __ j(kEqual, &done);
 
     // Given the numeric representation, it's enough to check the low bit of the rb_state.
-    static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+    static_assert(ReadBarrier::NonGrayState() == 0, "Expecting non-gray to have value 0");
     static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
     constexpr uint32_t gray_byte_position = LockWord::kReadBarrierStateShift / kBitsPerByte;
     constexpr uint32_t gray_bit_position = LockWord::kReadBarrierStateShift % kBitsPerByte;
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index ac6eab0..b5afe93 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1143,7 +1143,7 @@
     __ j(kEqual, &done);
 
     // Given the numeric representation, it's enough to check the low bit of the rb_state.
-    static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
+    static_assert(ReadBarrier::NonGrayState() == 0, "Expecting non-gray to have value 0");
     static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
     constexpr uint32_t gray_byte_position = LockWord::kReadBarrierStateShift / kBitsPerByte;
     constexpr uint32_t gray_bit_position = LockWord::kReadBarrierStateShift % kBitsPerByte;
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 36fefbd..cde5dc7 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -35,12 +35,13 @@
     Thread* const self,
     mirror::Object* ref,
     accounting::ContinuousSpaceBitmap* bitmap) {
-  // For the Baker-style RB, in a rare case, we could incorrectly change the object from white
-  // to gray even though the object has already been marked through. This happens if a mutator
-  // thread gets preempted before the AtomicSetReadBarrierState below, GC marks through the
-  // object (changes it from white to gray and back to white), and the thread runs and
-  // incorrectly changes it from white to gray. If this happens, the object will get added to the
-  // mark stack again and get changed back to white after it is processed.
+  // For the Baker-style RB, in a rare case, we could incorrectly change the object from non-gray
+  // (black) to gray even though the object has already been marked through. This happens if a
+  // mutator thread gets preempted before the AtomicSetReadBarrierState below, GC marks through the
+  // object (changes it from non-gray (white) to gray and back to non-gray (black)), and the thread
+  // runs and incorrectly changes it from non-gray (black) to gray. If this happens, the object
+  // will get added to the mark stack again and get changed back to non-gray (black) after it is
+  // processed.
   if (kUseBakerReadBarrier) {
     // Test the bitmap first to avoid graying an object that has already been marked through most
     // of the time.
@@ -55,7 +56,7 @@
     // we can avoid an expensive CAS.
     // For the baker case, an object is marked if either the mark bit marked or the bitmap bit is
     // set.
-    success = ref->AtomicSetReadBarrierState(/* expected_rb_state */ ReadBarrier::WhiteState(),
+    success = ref->AtomicSetReadBarrierState(/* expected_rb_state */ ReadBarrier::NonGrayState(),
                                              /* rb_state */ ReadBarrier::GrayState());
   } else {
     success = !bitmap->AtomicTestAndSet(ref);
@@ -91,8 +92,9 @@
       return ref;
     }
     // This may or may not succeed, which is ok because the object may already be gray.
-    bool success = ref->AtomicSetReadBarrierState(/* expected_rb_state */ ReadBarrier::WhiteState(),
-                                                  /* rb_state */ ReadBarrier::GrayState());
+    bool success =
+        ref->AtomicSetReadBarrierState(/* expected_rb_state */ ReadBarrier::NonGrayState(),
+                                       /* rb_state */ ReadBarrier::GrayState());
     if (success) {
       MutexLock mu(self, immune_gray_stack_lock_);
       immune_gray_stack_.push_back(ref);
@@ -204,8 +206,8 @@
 }
 
 inline bool ConcurrentCopying::IsMarkedInUnevacFromSpace(mirror::Object* from_ref) {
-  // Use load acquire on the read barrier pointer to ensure that we never see a white read barrier
-  // state with an unmarked bit due to reordering.
+  // Use load-acquire on the read barrier pointer to ensure that we never see a black (non-gray)
+  // read barrier state with an unmarked bit due to reordering.
   DCHECK(region_space_->IsInUnevacFromSpace(from_ref));
   if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
     return true;
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index b03f671..07abbfc 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -646,10 +646,10 @@
   explicit GrayImmuneObjectVisitor(Thread* self) : self_(self) {}
 
   ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (kUseBakerReadBarrier && obj->GetReadBarrierState() == ReadBarrier::WhiteState()) {
+    if (kUseBakerReadBarrier && obj->GetReadBarrierState() == ReadBarrier::NonGrayState()) {
       if (kConcurrent) {
         Locks::mutator_lock_->AssertSharedHeld(self_);
-        obj->AtomicSetReadBarrierState(ReadBarrier::WhiteState(), ReadBarrier::GrayState());
+        obj->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState());
         // Mod union table VisitObjects may visit the same object multiple times so we can't check
         // the result of the atomic set.
       } else {
@@ -765,9 +765,9 @@
       // Only need to scan gray objects.
       if (obj->GetReadBarrierState() == ReadBarrier::GrayState()) {
         collector_->ScanImmuneObject(obj);
-        // Done scanning the object, go back to white.
+        // Done scanning the object, go back to black (non-gray).
         bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
-                                                      ReadBarrier::WhiteState());
+                                                      ReadBarrier::NonGrayState());
         CHECK(success)
             << Runtime::Current()->GetHeap()->GetVerification()->DumpObjectInfo(obj, "failed CAS");
       }
@@ -824,21 +824,23 @@
     // This release fence makes the field updates in the above loop visible before allowing mutator
     // getting access to immune objects without graying it first.
     updated_all_immune_objects_.store(true, std::memory_order_release);
-    // Now whiten immune objects concurrently accessed and grayed by mutators. We can't do this in
-    // the above loop because we would incorrectly disable the read barrier by whitening an object
-    // which may point to an unscanned, white object, breaking the to-space invariant.
+    // Now "un-gray" (conceptually blacken) immune objects concurrently accessed and grayed by
+    // mutators. We can't do this in the above loop because we would incorrectly disable the read
+    // barrier by un-graying (conceptually blackening) an object which may point to an unscanned,
+    // white object, breaking the to-space invariant (a mutator shall never observe a from-space
+    // (white) object).
     //
-    // Make sure no mutators are in the middle of marking an immune object before whitening immune
-    // objects.
+    // Make sure no mutators are in the middle of marking an immune object before un-graying
+    // (blackening) immune objects.
     IssueEmptyCheckpoint();
     MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
     if (kVerboseMode) {
       LOG(INFO) << "immune gray stack size=" << immune_gray_stack_.size();
     }
     for (mirror::Object* obj : immune_gray_stack_) {
-      DCHECK(obj->GetReadBarrierState() == ReadBarrier::GrayState());
+      DCHECK_EQ(obj->GetReadBarrierState(), ReadBarrier::GrayState());
       bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
-                                                    ReadBarrier::WhiteState());
+                                                    ReadBarrier::NonGrayState());
       DCHECK(success);
     }
     immune_gray_stack_.clear();
@@ -1038,16 +1040,17 @@
 
 void ConcurrentCopying::ProcessFalseGrayStack() {
   CHECK(kUseBakerReadBarrier);
-  // Change the objects on the false gray stack from gray to white.
+  // Change the objects on the false gray stack from gray to non-gray (conceptually black).
   MutexLock mu(Thread::Current(), mark_stack_lock_);
   for (mirror::Object* obj : false_gray_stack_) {
     DCHECK(IsMarked(obj));
-    // The object could be white here if a thread got preempted after a success at the
-    // AtomicSetReadBarrierState in Mark(), GC started marking through it (but not finished so
-    // still gray), and the thread ran to register it onto the false gray stack.
+    // The object could be non-gray (conceptually black) here if a thread got preempted after a
+    // success at the AtomicSetReadBarrierState in MarkNonMoving(), GC started marking through it
+    // (but not finished so still gray), the thread ran to register it onto the false gray stack,
+    // and then GC eventually marked it black (non-gray) after it finished scanning it.
     if (obj->GetReadBarrierState() == ReadBarrier::GrayState()) {
       bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
-                                                    ReadBarrier::WhiteState());
+                                                    ReadBarrier::NonGrayState());
       DCHECK(success);
     }
   }
@@ -1166,9 +1169,8 @@
     }
     collector_->AssertToSpaceInvariant(holder, offset, ref);
     if (kUseBakerReadBarrier) {
-      CHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::WhiteState())
-          << "Ref " << ref << " " << ref->PrettyTypeOf()
-          << " has non-white rb_state ";
+      CHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::NonGrayState())
+          << "Ref " << ref << " " << ref->PrettyTypeOf() << " has gray rb_state";
     }
   }
 
@@ -1243,8 +1245,8 @@
         visitor,
         visitor);
     if (kUseBakerReadBarrier) {
-      CHECK_EQ(obj->GetReadBarrierState(), ReadBarrier::WhiteState())
-          << "obj=" << obj << " non-white rb_state " << obj->GetReadBarrierState();
+      CHECK_EQ(obj->GetReadBarrierState(), ReadBarrier::NonGrayState())
+          << "obj=" << obj << " has gray rb_state " << obj->GetReadBarrierState();
     }
   };
   // Roots.
@@ -1542,18 +1544,18 @@
                 (referent = to_ref->AsReference()->GetReferent<kWithoutReadBarrier>()) != nullptr &&
                 !IsInToSpace(referent)))) {
     // Leave this reference gray in the queue so that GetReferent() will trigger a read barrier. We
-    // will change it to white later in ReferenceQueue::DequeuePendingReference().
+    // will change it to non-gray later in ReferenceQueue::DisableReadBarrierForReference.
     DCHECK(to_ref->AsReference()->GetPendingNext() != nullptr)
         << "Left unenqueued ref gray " << to_ref;
   } else {
-    // We may occasionally leave a reference white in the queue if its referent happens to be
+    // We may occasionally leave a reference non-gray in the queue if its referent happens to be
     // concurrently marked after the Scan() call above has enqueued the Reference, in which case the
-    // above IsInToSpace() evaluates to true and we change the color from gray to white here in this
-    // else block.
+    // above IsInToSpace() evaluates to true and we change the color from gray to non-gray here in
+    // this else block.
     if (kUseBakerReadBarrier) {
       bool success = to_ref->AtomicSetReadBarrierState<std::memory_order_release>(
           ReadBarrier::GrayState(),
-          ReadBarrier::WhiteState());
+          ReadBarrier::NonGrayState());
       DCHECK(success) << "Must succeed as we won the race.";
     }
   }
@@ -2617,7 +2619,7 @@
   } else {
     // Not marked.
     if (IsOnAllocStack(ref)) {
-      // If it's on the allocation stack, it's considered marked. Keep it white.
+      // If it's on the allocation stack, it's considered marked. Keep it white (non-gray).
       // Objects on the allocation stack need not be marked.
       if (!is_los) {
         DCHECK(!mark_bitmap->Test(ref));
@@ -2625,7 +2627,7 @@
         DCHECK(!los_bitmap->Test(ref));
       }
       if (kUseBakerReadBarrier) {
-        DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::WhiteState());
+        DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::NonGrayState());
       }
     } else {
       // For the baker-style RB, we need to handle 'false-gray' cases. See the
@@ -2642,22 +2644,24 @@
         // AtomicSetReadBarrierState since it will fault if the address is not valid.
         heap_->GetVerification()->LogHeapCorruption(holder, offset, ref, /* fatal */ true);
       }
-      // Not marked or on the allocation stack. Try to mark it.
+      // Not marked nor on the allocation stack. Try to mark it.
       // This may or may not succeed, which is ok.
       bool cas_success = false;
       if (kUseBakerReadBarrier) {
-        cas_success = ref->AtomicSetReadBarrierState(ReadBarrier::WhiteState(),
+        cas_success = ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(),
                                                      ReadBarrier::GrayState());
       }
       if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
         // Already marked.
-        if (kUseBakerReadBarrier && cas_success &&
+        if (kUseBakerReadBarrier &&
+            cas_success &&
             ref->GetReadBarrierState() == ReadBarrier::GrayState()) {
           PushOntoFalseGrayStack(self, ref);
         }
       } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
         // Already marked in LOS.
-        if (kUseBakerReadBarrier && cas_success &&
+        if (kUseBakerReadBarrier &&
+            cas_success &&
             ref->GetReadBarrierState() == ReadBarrier::GrayState()) {
           PushOntoFalseGrayStack(self, ref);
         }
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 321d22a..e25e279 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -76,19 +76,19 @@
   Heap* heap = Runtime::Current()->GetHeap();
   if (kUseBakerOrBrooksReadBarrier && heap->CurrentCollectorType() == kCollectorTypeCC &&
       heap->ConcurrentCopyingCollector()->IsActive()) {
-    // Change the gray ptr we left in ConcurrentCopying::ProcessMarkStackRef() to white.
+    // Change the gray ptr we left in ConcurrentCopying::ProcessMarkStackRef() to non-gray.
     // We check IsActive() above because we don't want to do this when the zygote compaction
     // collector (SemiSpace) is running.
     CHECK(ref != nullptr);
     collector::ConcurrentCopying* concurrent_copying = heap->ConcurrentCopyingCollector();
     uint32_t rb_state = ref->GetReadBarrierState();
     if (rb_state == ReadBarrier::GrayState()) {
-      ref->AtomicSetReadBarrierState(ReadBarrier::GrayState(), ReadBarrier::WhiteState());
-      CHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::WhiteState());
+      ref->AtomicSetReadBarrierState(ReadBarrier::GrayState(), ReadBarrier::NonGrayState());
+      CHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::NonGrayState());
     } else {
-      // In ConcurrentCopying::ProcessMarkStackRef() we may leave a white reference in the queue and
-      // find it here, which is OK.
-      CHECK_EQ(rb_state, ReadBarrier::WhiteState()) << "ref=" << ref << " rb_state=" << rb_state;
+      // In ConcurrentCopying::ProcessMarkStackRef() we may leave a non-gray reference in the queue
+      // and find it here, which is OK.
+      CHECK_EQ(rb_state, ReadBarrier::NonGrayState()) << "ref=" << ref << " rb_state=" << rb_state;
       ObjPtr<mirror::Object> referent = ref->GetReferent<kWithoutReadBarrier>();
       // The referent could be null if it's cleared by a mutator (Reference.clear()).
       if (referent != nullptr) {
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index ce7fe34..ac7890c 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -210,7 +210,7 @@
 
   void SetReadBarrierState(uint32_t rb_state) {
     DCHECK_EQ(rb_state & ~kReadBarrierStateMask, 0U);
-    DCHECK(rb_state == ReadBarrier::WhiteState() ||
+    DCHECK(rb_state == ReadBarrier::NonGrayState() ||
            rb_state == ReadBarrier::GrayState()) << rb_state;
     DCHECK_NE(static_cast<uint32_t>(GetState()), static_cast<uint32_t>(kForwardingAddress));
     // Clear and or the bits.
@@ -288,7 +288,7 @@
       if (!kUseReadBarrier) {
         DCHECK_EQ(rb_state, 0U);
       } else {
-        DCHECK(rb_state == ReadBarrier::WhiteState() ||
+        DCHECK(rb_state == ReadBarrier::NonGrayState() ||
                rb_state == ReadBarrier::GrayState()) << rb_state;
       }
     }
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 47f0a29..bd89907 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -119,7 +119,7 @@
 inline void Object::AssertReadBarrierState() const {
   CHECK(kUseBakerReadBarrier);
   Object* obj = const_cast<Object*>(this);
-  DCHECK_EQ(obj->GetReadBarrierState(), ReadBarrier::WhiteState())
+  DCHECK_EQ(obj->GetReadBarrierState(), ReadBarrier::NonGrayState())
       << "Bad Baker pointer: obj=" << obj << " rb_state" << obj->GetReadBarrierState();
 }
 
diff --git a/runtime/mirror/object-readbarrier-inl.h b/runtime/mirror/object-readbarrier-inl.h
index df50d06..cc375bd 100644
--- a/runtime/mirror/object-readbarrier-inl.h
+++ b/runtime/mirror/object-readbarrier-inl.h
@@ -168,10 +168,11 @@
     expected_lw.SetReadBarrierState(expected_rb_state);
     new_lw = lw;
     new_lw.SetReadBarrierState(rb_state);
-    // ConcurrentCopying::ProcessMarkStackRef uses this with kCasRelease == true.
-    // If kCasRelease == true, use a CAS release so that when GC updates all the fields of
-    // an object and then changes the object from gray to black, the field updates (stores) will be
-    // visible (won't be reordered after this CAS.)
+    // ConcurrentCopying::ProcessMarkStackRef uses this with
+    // `kMemoryOrder` == `std::memory_order_release`.
+    // If `kMemoryOrder` == `std::memory_order_release`, use a CAS release so that when GC updates
+    // all the fields of an object and then changes the object from gray to black (non-gray), the
+    // field updates (stores) will be visible (won't be reordered after this CAS.)
   } while (!CasLockWord(expected_lw, new_lw, CASMode::kWeak, kMemoryOrder));
   return true;
 }
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 2801928..47aded3 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -117,7 +117,7 @@
   ALWAYS_INLINE bool AtomicSetMarkBit(uint32_t expected_mark_bit, uint32_t mark_bit)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Assert that the read barrier state is in the default (white) state.
+  // Assert that the read barrier state is in the default (white, i.e. non-gray) state.
   ALWAYS_INLINE void AssertReadBarrierState() const REQUIRES_SHARED(Locks::mutator_lock_);
 
   // The verifier treats all interfaces as java.lang.Object and relies on runtime checks in
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index 640fa7e..672303a 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -255,13 +255,13 @@
 }
 
 inline bool ReadBarrier::IsGray(mirror::Object* obj, uintptr_t* fake_address_dependency) {
-  return obj->GetReadBarrierState(fake_address_dependency) == gray_state_;
+  return obj->GetReadBarrierState(fake_address_dependency) == kGrayState;
 }
 
 inline bool ReadBarrier::IsGray(mirror::Object* obj) {
   // Use a load-acquire to load the read barrier bit to avoid reordering with the subsequent load.
   // GetReadBarrierStateAcquire() has load-acquire semantics.
-  return obj->GetReadBarrierStateAcquire() == gray_state_;
+  return obj->GetReadBarrierStateAcquire() == kGrayState;
 }
 
 }  // namespace art
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index e8df2ad..0741da6 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -102,11 +102,11 @@
   // ALWAYS_INLINE on this caused a performance regression b/26744236.
   static mirror::Object* Mark(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static constexpr uint32_t WhiteState() {
-    return white_state_;
+  static constexpr uint32_t NonGrayState() {
+    return kNonGrayState;
   }
   static constexpr uint32_t GrayState() {
-    return gray_state_;
+    return kGrayState;
   }
 
   // fake_address_dependency will be zero which should be bitwise-or'ed with the address of the
@@ -122,12 +122,13 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   static bool IsValidReadBarrierState(uint32_t rb_state) {
-    return rb_state == white_state_ || rb_state == gray_state_;
+    return rb_state == kNonGrayState || rb_state == kGrayState;
   }
 
-  static constexpr uint32_t white_state_ = 0x0;    // Not marked.
-  static constexpr uint32_t gray_state_ = 0x1;     // Marked, but not marked through. On mark stack.
-  static constexpr uint32_t rb_state_mask_ = 0x1;  // The low bits for white|gray.
+ private:
+  static constexpr uint32_t kNonGrayState = 0x0;  // White (not marked) or black (marked through).
+  static constexpr uint32_t kGrayState = 0x1;     // Marked, but not marked through. On mark stack.
+  static constexpr uint32_t kRBStateMask = 0x1;   // The low bits for non-gray|gray.
 };
 
 }  // namespace art