Change intern table to not use WaitHoldingLocks

Bug: 22423014
Change-Id: I9e16b8cb4def72fff73f1783a182877105feb7aa
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index fdfeb48..2b65aa9 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -715,8 +715,10 @@
       DCHECK_EQ(obj, obj->AsString()->Intern());
       return;
     }
-    mirror::String* const interned = Runtime::Current()->GetInternTable()->InternStrong(
-        obj->AsString()->Intern());
+    // InternImageString allows us to intern while holding the heap bitmap lock. This is safe since
+    // we are guaranteed to not have GC during image writing.
+    mirror::String* const interned = Runtime::Current()->GetInternTable()->InternImageString(
+        obj->AsString());
     if (obj != interned) {
       if (!IsImageBinSlotAssigned(interned)) {
         // interned obj is after us, allocate its location early
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 7f103a4..dd1613e 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -311,13 +311,14 @@
   dex_instruction.h \
   dex_instruction_utils.h \
   gc_root.h \
-  gc/allocator/rosalloc.h \
-  gc/collector/gc_type.h \
   gc/allocator_type.h \
+  gc/allocator/rosalloc.h \
   gc/collector_type.h \
+  gc/collector/gc_type.h \
+  gc/heap.h \
   gc/space/region_space.h \
   gc/space/space.h \
-  gc/heap.h \
+  gc/weak_root_state.h \
   image.h \
   instrumentation.h \
   indirect_reference_table.h \
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 97d170e..eccebf1 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -2100,6 +2100,7 @@
     case kWaitingInMainDebuggerLoop:
     case kWaitingInMainSignalCatcherLoop:
     case kWaitingPerformingGc:
+    case kWaitingWeakRootRead:
     case kWaiting:
       return JDWP::TS_WAIT;
       // Don't add a 'default' here so the compiler can spot incompatible enum changes.
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 4035143..abb1d3d 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -1006,7 +1006,7 @@
 
 void MarkSweep::SweepSystemWeaks(Thread* self) {
   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
-  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+  ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
   Runtime::Current()->SweepSystemWeaks(this);
 }
 
diff --git a/runtime/gc/weak_root_state.h b/runtime/gc/weak_root_state.h
new file mode 100644
index 0000000..b66f19d
--- /dev/null
+++ b/runtime/gc/weak_root_state.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_WEAK_ROOT_STATE_H_
+#define ART_RUNTIME_GC_WEAK_ROOT_STATE_H_
+
+#include <iosfwd>
+
+namespace art {
+namespace gc {
+
+enum WeakRootState {
+  // Can read or add weak roots.
+  kWeakRootStateNormal,
+  // Need to wait until we can read weak roots.
+  kWeakRootStateNoReadsOrWrites,
+  // Need to mark new weak roots to make sure they don't get swept.
+  kWeakRootStateMarkNewRoots,
+};
+
+std::ostream& operator<<(std::ostream& os, const WeakRootState&);
+
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_RUNTIME_GC_WEAK_ROOT_STATE_H_
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 6ea047f..ae521b1 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -21,6 +21,7 @@
 #include "gc_root-inl.h"
 #include "gc/collector/garbage_collector.h"
 #include "gc/space/image_space.h"
+#include "gc/weak_root_state.h"
 #include "mirror/dex_cache.h"
 #include "mirror/object_array-inl.h"
 #include "mirror/object-inl.h"
@@ -32,8 +33,8 @@
 
 InternTable::InternTable()
     : image_added_to_intern_table_(false), log_new_roots_(false),
-      allow_new_interns_(true),
-      new_intern_condition_("New intern condition", *Locks::intern_table_lock_) {
+      weak_intern_condition_("New intern condition", *Locks::intern_table_lock_),
+      weak_root_state_(gc::kWeakRootStateNormal) {
 }
 
 size_t InternTable::Size() const {
@@ -89,6 +90,7 @@
 }
 
 mirror::String* InternTable::LookupWeak(mirror::String* s) {
+  // TODO: Return only if marked.
   return weak_interns_.Find(s);
 }
 
@@ -183,8 +185,7 @@
   }
 }
 
-mirror::String* InternTable::LookupStringFromImage(mirror::String* s)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+mirror::String* InternTable::LookupStringFromImage(mirror::String* s) {
   if (image_added_to_intern_table_) {
     return nullptr;
   }
@@ -212,48 +213,61 @@
   return nullptr;
 }
 
-void InternTable::AllowNewInterns() {
-  Thread* self = Thread::Current();
-  MutexLock mu(self, *Locks::intern_table_lock_);
-  allow_new_interns_ = true;
-  new_intern_condition_.Broadcast(self);
-}
-
-void InternTable::DisallowNewInterns() {
-  Thread* self = Thread::Current();
-  MutexLock mu(self, *Locks::intern_table_lock_);
-  allow_new_interns_ = false;
-}
-
-void InternTable::EnsureNewInternsDisallowed() {
+void InternTable::EnsureNewWeakInternsDisallowed() {
   // Lock and unlock once to ensure that no threads are still in the
   // middle of adding new interns.
   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
-  CHECK(!allow_new_interns_);
+  CHECK_EQ(weak_root_state_, gc::kWeakRootStateNoReadsOrWrites);
 }
 
 void InternTable::BroadcastForNewInterns() {
   CHECK(kUseReadBarrier);
   Thread* self = Thread::Current();
   MutexLock mu(self, *Locks::intern_table_lock_);
-  new_intern_condition_.Broadcast(self);
+  weak_intern_condition_.Broadcast(self);
 }
 
-mirror::String* InternTable::Insert(mirror::String* s, bool is_strong) {
+void InternTable::WaitUntilAccessible(Thread* self) {
+  Locks::intern_table_lock_->ExclusiveUnlock(self);
+  self->TransitionFromRunnableToSuspended(kWaitingWeakRootRead);
+  Locks::intern_table_lock_->ExclusiveLock(self);
+  while (weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) {
+    weak_intern_condition_.Wait(self);
+  }
+  Locks::intern_table_lock_->ExclusiveUnlock(self);
+  self->TransitionFromSuspendedToRunnable();
+  Locks::intern_table_lock_->ExclusiveLock(self);
+}
+
+mirror::String* InternTable::Insert(mirror::String* s, bool is_strong, bool holding_locks) {
   if (s == nullptr) {
     return nullptr;
   }
-  Thread* self = Thread::Current();
+  Thread* const self = Thread::Current();
   MutexLock mu(self, *Locks::intern_table_lock_);
-  while (UNLIKELY((!kUseReadBarrier && !allow_new_interns_) ||
-                  (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
-    new_intern_condition_.WaitHoldingLocks(self);
+  if (kDebugLocking && !holding_locks) {
+    Locks::mutator_lock_->AssertSharedHeld(self);
+    CHECK_EQ(2u, self->NumberOfHeldMutexes()) << "may only safely hold the mutator lock";
   }
-  // Check the strong table for a match.
-  mirror::String* strong = LookupStrong(s);
-  if (strong != nullptr) {
-    return strong;
+  while (true) {
+    // Check the strong table for a match.
+    mirror::String* strong = LookupStrong(s);
+    if (strong != nullptr) {
+      return strong;
+    }
+    // weak_root_state_ is set to gc::kWeakRootStateNoReadsOrWrites in the GC pause but is only
+    // cleared after SweepSystemWeaks has completed. This is why we need to wait until it is
+    // cleared.
+    if (weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) {
+      break;
+    }
+    CHECK(!holding_locks);
+    StackHandleScope<1> hs(self);
+    auto h = hs.NewHandleWrapper(&s);
+    WaitUntilAccessible(self);
   }
+  CHECK_NE(weak_root_state_, gc::kWeakRootStateNoReadsOrWrites);
+  DCHECK_NE(weak_root_state_, gc::kWeakRootStateMarkNewRoots) << "Unsupported";
   // There is no match in the strong table, check the weak table.
   mirror::String* weak = LookupWeak(s);
   if (weak != nullptr) {
@@ -284,12 +298,17 @@
   return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
 }
 
+mirror::String* InternTable::InternImageString(mirror::String* s) {
+  // May be holding the heap bitmap lock.
+  return Insert(s, true, true);
+}
+
 mirror::String* InternTable::InternStrong(mirror::String* s) {
-  return Insert(s, true);
+  return Insert(s, true, false);
 }
 
 mirror::String* InternTable::InternWeak(mirror::String* s) {
-  return Insert(s, false);
+  return Insert(s, false, false);
 }
 
 bool InternTable::ContainsWeak(mirror::String* s) {
@@ -300,6 +319,8 @@
 void InternTable::SweepInternTableWeaks(IsMarkedVisitor* visitor) {
   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
   weak_interns_.SweepWeaks(visitor);
+  // Done sweeping, back to a normal state.
+  ChangeWeakRootStateLocked(gc::kWeakRootStateNormal);
 }
 
 void InternTable::AddImageInternTable(gc::space::ImageSpace* image_space) {
@@ -425,4 +446,16 @@
   return pre_zygote_table_.Size() + post_zygote_table_.Size();
 }
 
+void InternTable::ChangeWeakRootState(gc::WeakRootState new_state) {
+  MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
+  ChangeWeakRootStateLocked(new_state);
+}
+
+void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) {
+  weak_root_state_ = new_state;
+  if (new_state != gc::kWeakRootStateNoReadsOrWrites) {
+    weak_intern_condition_.Broadcast(Thread::Current());
+  }
+}
+
 }  // namespace art
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 67a8b34..ef08d74 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -19,10 +19,12 @@
 
 #include <unordered_set>
 
+#include "atomic.h"
 #include "base/allocator.h"
 #include "base/hash_set.h"
 #include "base/mutex.h"
 #include "gc_root.h"
+#include "gc/weak_root_state.h"
 #include "object_callbacks.h"
 
 namespace art {
@@ -54,18 +56,22 @@
  public:
   InternTable();
 
-  // Interns a potentially new string in the 'strong' table. (See above.)
+  // Interns a potentially new string in the 'strong' table. May cause thread suspension.
   mirror::String* InternStrong(int32_t utf16_length, const char* utf8_data)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Interns a potentially new string in the 'strong' table. (See above.)
+  // Only used by image writer.
+  mirror::String* InternImageString(mirror::String* s)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Interns a potentially new string in the 'strong' table. May cause thread suspension.
   mirror::String* InternStrong(const char* utf8_data)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Interns a potentially new string in the 'strong' table. (See above.)
+  // Interns a potentially new string in the 'strong' table. May cause thread suspension.
   mirror::String* InternStrong(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Interns a potentially new string in the 'weak' table. (See above.)
+  // Interns a potentially new string in the 'weak' table. May cause thread suspension.
   mirror::String* InternWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SweepInternTableWeaks(IsMarkedVisitor* visitor)
@@ -89,6 +95,7 @@
   void AllowNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void EnsureNewInternsDisallowed() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void BroadcastForNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void EnsureNewWeakInternsDisallowed() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Adds all of the resolved image strings from the image space into the intern table. The
   // advantage of doing this is preventing expensive DexFile::FindStringId calls.
@@ -112,6 +119,10 @@
   size_t WriteToMemory(uint8_t* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       LOCKS_EXCLUDED(Locks::intern_table_lock_);
 
+  // Change the weak root state. May broadcast to waiters.
+  void ChangeWeakRootState(gc::WeakRootState new_state)
+      LOCKS_EXCLUDED(Locks::intern_table_lock_);
+
  private:
   class StringHashEquals {
    public:
@@ -176,7 +187,7 @@
   };
 
   // Insert if non null, otherwise return null.
-  mirror::String* Insert(mirror::String* s, bool is_strong)
+  mirror::String* Insert(mirror::String* s, bool is_strong, bool holding_locks)
       LOCKS_EXCLUDED(Locks::intern_table_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -221,10 +232,17 @@
       EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  // Change the weak root state. May broadcast to waiters.
+  void ChangeWeakRootStateLocked(gc::WeakRootState new_state)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+
+  // Wait until we can read weak roots.
+  void WaitUntilAccessible(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   bool image_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_);
   bool log_new_roots_ GUARDED_BY(Locks::intern_table_lock_);
-  bool allow_new_interns_ GUARDED_BY(Locks::intern_table_lock_);
-  ConditionVariable new_intern_condition_ GUARDED_BY(Locks::intern_table_lock_);
+  ConditionVariable weak_intern_condition_ GUARDED_BY(Locks::intern_table_lock_);
   // Since this contains (strong) roots, they need a read barrier to
   // enable concurrent intern table (strong) root scan. Do not
   // directly access the strings in it. Use functions that contain
@@ -236,6 +254,8 @@
   // not directly access the strings in it. Use functions that contain
   // read barriers.
   Table weak_interns_ GUARDED_BY(Locks::intern_table_lock_);
+  // Weak root state, used for concurrent system weak processing and more.
+  gc::WeakRootState weak_root_state_ GUARDED_BY(Locks::intern_table_lock_);
 };
 
 }  // namespace art
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index 6569d83..b40d94a 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -90,6 +90,7 @@
     case kWaitingInMainSignalCatcherLoop: return kJavaWaiting;
     case kWaitingForMethodTracingStart:   return kJavaWaiting;
     case kWaitingForVisitObjects:         return kJavaWaiting;
+    case kWaitingWeakRootRead:            return kJavaWaiting;
     case kSuspended:                      return kJavaRunnable;
     // Don't add a 'default' here so the compiler can spot incompatible enum changes.
   }
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 3b0ca9e..4e8775e 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1497,14 +1497,14 @@
 
 void Runtime::DisallowNewSystemWeaks() {
   monitor_list_->DisallowNewMonitors();
-  intern_table_->DisallowNewInterns();
+  intern_table_->ChangeWeakRootState(gc::kWeakRootStateNoReadsOrWrites);
   java_vm_->DisallowNewWeakGlobals();
   heap_->DisallowNewAllocationRecords();
 }
 
 void Runtime::AllowNewSystemWeaks() {
   monitor_list_->AllowNewMonitors();
-  intern_table_->AllowNewInterns();
+  intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal);  // TODO: Do this in the sweeping?
   java_vm_->AllowNewWeakGlobals();
   heap_->AllowNewAllocationRecords();
 }
@@ -1513,7 +1513,7 @@
   // Lock and unlock the system weak locks once to ensure that no
   // threads are still in the middle of adding new system weaks.
   monitor_list_->EnsureNewMonitorsDisallowed();
-  intern_table_->EnsureNewInternsDisallowed();
+  intern_table_->EnsureNewWeakInternsDisallowed();
   java_vm_->EnsureNewWeakGlobalsDisallowed();
 }
 
diff --git a/runtime/thread.cc b/runtime/thread.cc
index cede998..b9753e1 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2734,4 +2734,12 @@
   tlsPtr_.method_verifier = verifier->link_;
 }
 
+size_t Thread::NumberOfHeldMutexes() const {
+  size_t count = 0;
+  for (BaseMutex* mu : tlsPtr_.held_mutexes) {
+    count += static_cast<size_t>(mu != nullptr);
+  }
+  return count;
+}
+
 }  // namespace art
diff --git a/runtime/thread.h b/runtime/thread.h
index 7826e62..cf87f22 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -288,6 +288,8 @@
     return tls32_.daemon;
   }
 
+  size_t NumberOfHeldMutexes() const;
+
   bool HoldsLock(mirror::Object*) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
diff --git a/runtime/thread_state.h b/runtime/thread_state.h
index c7ea7f4..c000e61 100644
--- a/runtime/thread_state.h
+++ b/runtime/thread_state.h
@@ -43,6 +43,7 @@
   kWaitingForMethodTracingStart,    // WAITING        TS_WAIT      waiting for method tracing to start
   kWaitingForVisitObjects,          // WAITING        TS_WAIT      waiting for visiting objects
   kWaitingForGetObjectsAllocated,   // WAITING        TS_WAIT      waiting for getting the number of allocated objects
+  kWaitingWeakRootRead,             // WAITING        TS_WAIT      waiting to read a weak root
   kStarting,                        // NEW            TS_WAIT      native thread started, not yet ready to run managed code
   kNative,                          // RUNNABLE       TS_RUNNING   running in a JNI native method
   kSuspended,                       // RUNNABLE       TS_RUNNING   suspended by GC or debugger