ART: Refactor ObjectTagTable

Factor out the plain holder from the event handling.

Bug: 31385354
Bug: 31385027
Test: m test-art-host
Change-Id: If6af36e5275f8f1b924ab7dba1fb14f1ce2de8fe
diff --git a/runtime/openjdkjvmti/jvmti_weak_table-inl.h b/runtime/openjdkjvmti/jvmti_weak_table-inl.h
new file mode 100644
index 0000000..f67fffc
--- /dev/null
+++ b/runtime/openjdkjvmti/jvmti_weak_table-inl.h
@@ -0,0 +1,389 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h.  The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_INL_H_
+#define ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_INL_H_
+
+#include "jvmti_weak_table.h"
+
+#include <limits>
+
+#include "art_jvmti.h"
+#include "base/logging.h"
+#include "gc/allocation_listener.h"
+#include "instrumentation.h"
+#include "jni_env_ext-inl.h"
+#include "jvmti_allocator.h"
+#include "mirror/class.h"
+#include "mirror/object.h"
+#include "runtime.h"
+#include "ScopedLocalRef.h"
+
+namespace openjdkjvmti {
+
+template <typename T>
+void JvmtiWeakTable<T>::Lock() {
+  allow_disallow_lock_.ExclusiveLock(art::Thread::Current());
+}
+template <typename T>
+void JvmtiWeakTable<T>::Unlock() {
+  allow_disallow_lock_.ExclusiveUnlock(art::Thread::Current());
+}
+template <typename T>
+void JvmtiWeakTable<T>::AssertLocked() {
+  allow_disallow_lock_.AssertHeld(art::Thread::Current());
+}
+
+template <typename T>
+void JvmtiWeakTable<T>::UpdateTableWithReadBarrier() {
+  update_since_last_sweep_ = true;
+
+  auto WithReadBarrierUpdater = [&](const art::GcRoot<art::mirror::Object>& original_root,
+                                    art::mirror::Object* original_obj ATTRIBUTE_UNUSED)
+     REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    return original_root.Read<art::kWithReadBarrier>();
+  };
+
+  UpdateTableWith<decltype(WithReadBarrierUpdater), kIgnoreNull>(WithReadBarrierUpdater);
+}
+
+template <typename T>
+bool JvmtiWeakTable<T>::GetTagSlowPath(art::Thread* self, art::mirror::Object* obj, T* result) {
+  // Under concurrent GC, there is a window between moving objects and sweeping of system
+  // weaks in which mutators are active. We may receive a to-space object pointer in obj,
+  // but still have from-space pointers in the table. Explicitly update the table once.
+  // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
+  UpdateTableWithReadBarrier();
+  return GetTagLocked(self, obj, result);
+}
+
+template <typename T>
+bool JvmtiWeakTable<T>::Remove(art::mirror::Object* obj, /* out */ T* tag) {
+  art::Thread* self = art::Thread::Current();
+  art::MutexLock mu(self, allow_disallow_lock_);
+  Wait(self);
+
+  return RemoveLocked(self, obj, tag);
+}
+template <typename T>
+bool JvmtiWeakTable<T>::RemoveLocked(art::mirror::Object* obj, T* tag) {
+  art::Thread* self = art::Thread::Current();
+  allow_disallow_lock_.AssertHeld(self);
+  Wait(self);
+
+  return RemoveLocked(self, obj, tag);
+}
+
+template <typename T>
+bool JvmtiWeakTable<T>::RemoveLocked(art::Thread* self, art::mirror::Object* obj, T* tag) {
+  auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
+  if (it != tagged_objects_.end()) {
+    if (tag != nullptr) {
+      *tag = it->second;
+    }
+    tagged_objects_.erase(it);
+    return true;
+  }
+
+  if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
+    // Under concurrent GC, there is a window between moving objects and sweeping of system
+    // weaks in which mutators are active. We may receive a to-space object pointer in obj,
+    // but still have from-space pointers in the table. Explicitly update the table once.
+    // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
+
+    // Update the table.
+    UpdateTableWithReadBarrier();
+
+    // And try again.
+    return RemoveLocked(self, obj, tag);
+  }
+
+  // Not in here.
+  return false;
+}
+
+template <typename T>
+bool JvmtiWeakTable<T>::Set(art::mirror::Object* obj, T new_tag) {
+  art::Thread* self = art::Thread::Current();
+  art::MutexLock mu(self, allow_disallow_lock_);
+  Wait(self);
+
+  return SetLocked(self, obj, new_tag);
+}
+template <typename T>
+bool JvmtiWeakTable<T>::SetLocked(art::mirror::Object* obj, T new_tag) {
+  art::Thread* self = art::Thread::Current();
+  allow_disallow_lock_.AssertHeld(self);
+  Wait(self);
+
+  return SetLocked(self, obj, new_tag);
+}
+
+template <typename T>
+bool JvmtiWeakTable<T>::SetLocked(art::Thread* self, art::mirror::Object* obj, T new_tag) {
+  auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
+  if (it != tagged_objects_.end()) {
+    it->second = new_tag;
+    return true;
+  }
+
+  if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
+    // Under concurrent GC, there is a window between moving objects and sweeping of system
+    // weaks in which mutators are active. We may receive a to-space object pointer in obj,
+    // but still have from-space pointers in the table. Explicitly update the table once.
+    // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
+
+    // Update the table.
+    UpdateTableWithReadBarrier();
+
+    // And try again.
+    return SetLocked(self, obj, new_tag);
+  }
+
+  // New element.
+  auto insert_it = tagged_objects_.emplace(art::GcRoot<art::mirror::Object>(obj), new_tag);
+  DCHECK(insert_it.second);
+  return false;
+}
+
+template <typename T>
+void JvmtiWeakTable<T>::Sweep(art::IsMarkedVisitor* visitor) {
+  if (DoesHandleNullOnSweep()) {
+    SweepImpl<true>(visitor);
+  } else {
+    SweepImpl<false>(visitor);
+  }
+
+  // Under concurrent GC, there is a window between moving objects and sweeping of system
+  // weaks in which mutators are active. We may receive a to-space object pointer in obj,
+  // but still have from-space pointers in the table. We explicitly update the table then
+  // to ensure we compare against to-space pointers. But we want to do this only once. Once
+  // sweeping is done, we know all objects are to-space pointers until the next GC cycle,
+  // so we re-enable the explicit update for the next marking.
+  update_since_last_sweep_ = false;
+}
+
+template <typename T>
+template <bool kHandleNull>
+void JvmtiWeakTable<T>::SweepImpl(art::IsMarkedVisitor* visitor) {
+  art::Thread* self = art::Thread::Current();
+  art::MutexLock mu(self, allow_disallow_lock_);
+
+  auto IsMarkedUpdater = [&](const art::GcRoot<art::mirror::Object>& original_root ATTRIBUTE_UNUSED,
+                             art::mirror::Object* original_obj) {
+    return visitor->IsMarked(original_obj);
+  };
+
+  UpdateTableWith<decltype(IsMarkedUpdater),
+                  kHandleNull ? kCallHandleNull : kRemoveNull>(IsMarkedUpdater);
+}
+
+template <typename T>
+template <typename Updater, typename JvmtiWeakTable<T>::TableUpdateNullTarget kTargetNull>
+ALWAYS_INLINE inline void JvmtiWeakTable<T>::UpdateTableWith(Updater& updater) {
+  // We optimistically hope that elements will still be well-distributed when re-inserting them.
+  // So play with the map mechanics, and postpone rehashing. This avoids the need of a side
+  // vector and two passes.
+  float original_max_load_factor = tagged_objects_.max_load_factor();
+  tagged_objects_.max_load_factor(std::numeric_limits<float>::max());
+  // For checking that a max load-factor actually does what we expect.
+  size_t original_bucket_count = tagged_objects_.bucket_count();
+
+  for (auto it = tagged_objects_.begin(); it != tagged_objects_.end();) {
+    DCHECK(!it->first.IsNull());
+    art::mirror::Object* original_obj = it->first.template Read<art::kWithoutReadBarrier>();
+    art::mirror::Object* target_obj = updater(it->first, original_obj);
+    if (original_obj != target_obj) {
+      if (kTargetNull == kIgnoreNull && target_obj == nullptr) {
+        // Ignore null target, don't do anything.
+      } else {
+        T tag = it->second;
+        it = tagged_objects_.erase(it);
+        if (target_obj != nullptr) {
+          tagged_objects_.emplace(art::GcRoot<art::mirror::Object>(target_obj), tag);
+          DCHECK_EQ(original_bucket_count, tagged_objects_.bucket_count());
+        } else if (kTargetNull == kCallHandleNull) {
+          HandleNullSweep(tag);
+        }
+        continue;  // Iterator was implicitly updated by erase.
+      }
+    }
+    it++;
+  }
+
+  tagged_objects_.max_load_factor(original_max_load_factor);
+  // TODO: consider rehash here.
+}
+
+template <typename T>
+template <typename Storage, class Allocator>
+struct JvmtiWeakTable<T>::ReleasableContainer {
+  using allocator_type = Allocator;
+
+  explicit ReleasableContainer(const allocator_type& alloc, size_t reserve = 10)
+      : allocator(alloc),
+        data(reserve > 0 ? allocator.allocate(reserve) : nullptr),
+        size(0),
+        capacity(reserve) {
+  }
+
+  ~ReleasableContainer() {
+    if (data != nullptr) {
+      allocator.deallocate(data, capacity);
+      capacity = 0;
+      size = 0;
+    }
+  }
+
+  Storage* Release() {
+    Storage* tmp = data;
+
+    data = nullptr;
+    size = 0;
+    capacity = 0;
+
+    return tmp;
+  }
+
+  void Resize(size_t new_capacity) {
+    CHECK_GT(new_capacity, capacity);
+
+    Storage* tmp = allocator.allocate(new_capacity);
+    DCHECK(tmp != nullptr);
+    if (data != nullptr) {
+      memcpy(tmp, data, sizeof(Storage) * size);
+    }
+    Storage* old = data;
+    data = tmp;
+    allocator.deallocate(old, capacity);
+    capacity = new_capacity;
+  }
+
+  void Pushback(const Storage& elem) {
+    if (size == capacity) {
+      size_t new_capacity = 2 * capacity + 1;
+      Resize(new_capacity);
+    }
+    data[size++] = elem;
+  }
+
+  Allocator allocator;
+  Storage* data;
+  size_t size;
+  size_t capacity;
+};
+
+template <typename T>
+jvmtiError JvmtiWeakTable<T>::GetTaggedObjects(jvmtiEnv* jvmti_env,
+                                               jint tag_count,
+                                               const T* tags,
+                                               jint* count_ptr,
+                                               jobject** object_result_ptr,
+                                               T** tag_result_ptr) {
+  if (tag_count < 0) {
+    return ERR(ILLEGAL_ARGUMENT);
+  }
+  if (tag_count > 0) {
+    for (size_t i = 0; i != static_cast<size_t>(tag_count); ++i) {
+      if (tags[i] == 0) {
+        return ERR(ILLEGAL_ARGUMENT);
+      }
+    }
+  }
+  if (tags == nullptr) {
+    return ERR(NULL_POINTER);
+  }
+  if (count_ptr == nullptr) {
+    return ERR(NULL_POINTER);
+  }
+
+  art::Thread* self = art::Thread::Current();
+  art::MutexLock mu(self, allow_disallow_lock_);
+  Wait(self);
+
+  art::JNIEnvExt* jni_env = self->GetJniEnv();
+
+  constexpr size_t kDefaultSize = 10;
+  size_t initial_object_size;
+  size_t initial_tag_size;
+  if (tag_count == 0) {
+    initial_object_size = (object_result_ptr != nullptr) ? tagged_objects_.size() : 0;
+    initial_tag_size = (tag_result_ptr != nullptr) ? tagged_objects_.size() : 0;
+  } else {
+    initial_object_size = initial_tag_size = kDefaultSize;
+  }
+  JvmtiAllocator<void> allocator(jvmti_env);
+  ReleasableContainer<jobject, JvmtiAllocator<jobject>> selected_objects(allocator,
+                                                                         initial_object_size);
+  ReleasableContainer<T, JvmtiAllocator<T>> selected_tags(allocator, initial_tag_size);
+
+  size_t count = 0;
+  for (auto& pair : tagged_objects_) {
+    bool select;
+    if (tag_count > 0) {
+      select = false;
+      for (size_t i = 0; i != static_cast<size_t>(tag_count); ++i) {
+        if (tags[i] == pair.second) {
+          select = true;
+          break;
+        }
+      }
+    } else {
+      select = true;
+    }
+
+    if (select) {
+      art::mirror::Object* obj = pair.first.template Read<art::kWithReadBarrier>();
+      if (obj != nullptr) {
+        count++;
+        if (object_result_ptr != nullptr) {
+          selected_objects.Pushback(jni_env->AddLocalReference<jobject>(obj));
+        }
+        if (tag_result_ptr != nullptr) {
+          selected_tags.Pushback(pair.second);
+        }
+      }
+    }
+  }
+
+  if (object_result_ptr != nullptr) {
+    *object_result_ptr = selected_objects.Release();
+  }
+  if (tag_result_ptr != nullptr) {
+    *tag_result_ptr = selected_tags.Release();
+  }
+  *count_ptr = static_cast<jint>(count);
+  return ERR(NONE);
+}
+
+}  // namespace openjdkjvmti
+
+#endif  // ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_INL_H_
diff --git a/runtime/openjdkjvmti/jvmti_weak_table.h b/runtime/openjdkjvmti/jvmti_weak_table.h
new file mode 100644
index 0000000..ae36122
--- /dev/null
+++ b/runtime/openjdkjvmti/jvmti_weak_table.h
@@ -0,0 +1,219 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h.  The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_H_
+#define ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_H_
+
+#include <unordered_map>
+
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "gc/system_weak.h"
+#include "gc_root-inl.h"
+#include "globals.h"
+#include "jvmti.h"
+#include "mirror/object.h"
+#include "thread-inl.h"
+
+namespace openjdkjvmti {
+
+class EventHandler;
+
+// A system-weak container mapping objects to elements of the template type. This corresponds
+// to a weak hash map. For historical reasons the stored value is called "tag."
+template <typename T>
+class JvmtiWeakTable : public art::gc::SystemWeakHolder {
+ public:
+  JvmtiWeakTable()
+      : art::gc::SystemWeakHolder(kTaggingLockLevel),
+        update_since_last_sweep_(false) {
+  }
+
+  // Remove the mapping for the given object, returning whether such a mapping existed (and the old
+  // value).
+  bool Remove(art::mirror::Object* obj, /* out */ T* tag)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(!allow_disallow_lock_);
+  bool RemoveLocked(art::mirror::Object* obj, /* out */ T* tag)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(allow_disallow_lock_);
+
+  // Set the mapping for the given object. Returns true if this overwrites an already existing
+  // mapping.
+  virtual bool Set(art::mirror::Object* obj, T tag)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(!allow_disallow_lock_);
+  virtual bool SetLocked(art::mirror::Object* obj, T tag)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(allow_disallow_lock_);
+
+  // Return the value associated with the given object. Returns true if the mapping exists, false
+  // otherwise.
+  bool GetTag(art::mirror::Object* obj, /* out */ T* result)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(!allow_disallow_lock_) {
+    art::Thread* self = art::Thread::Current();
+    art::MutexLock mu(self, allow_disallow_lock_);
+    Wait(self);
+
+    return GetTagLocked(self, obj, result);
+  }
+  bool GetTagLocked(art::mirror::Object* obj, /* out */ T* result)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(allow_disallow_lock_) {
+    art::Thread* self = art::Thread::Current();
+    allow_disallow_lock_.AssertHeld(self);
+    Wait(self);
+
+    return GetTagLocked(self, obj, result);
+  }
+
+  // Sweep the container. DO NOT CALL MANUALLY.
+  void Sweep(art::IsMarkedVisitor* visitor)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(!allow_disallow_lock_);
+
+  // Return all objects that have a value mapping in tags.
+  jvmtiError GetTaggedObjects(jvmtiEnv* jvmti_env,
+                              jint tag_count,
+                              const T* tags,
+                              /* out */ jint* count_ptr,
+                              /* out */ jobject** object_result_ptr,
+                              /* out */ T** tag_result_ptr)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(!allow_disallow_lock_);
+
+  // Locking functions, to allow coarse-grained locking and amortization.
+  void Lock() ACQUIRE(allow_disallow_lock_);
+  void Unlock() RELEASE(allow_disallow_lock_);
+  void AssertLocked() ASSERT_CAPABILITY(allow_disallow_lock_);
+
+ protected:
+  // Should HandleNullSweep be called when Sweep detects the release of an object?
+  virtual bool DoesHandleNullOnSweep() {
+    return false;
+  }
+  // If DoesHandleNullOnSweep returns true, this function will be called.
+  virtual void HandleNullSweep(T tag ATTRIBUTE_UNUSED) {}
+
+ private:
+  bool SetLocked(art::Thread* self, art::mirror::Object* obj, T tag)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(allow_disallow_lock_);
+
+  bool RemoveLocked(art::Thread* self, art::mirror::Object* obj, /* out */ T* tag)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(allow_disallow_lock_);
+
+  bool GetTagLocked(art::Thread* self, art::mirror::Object* obj, /* out */ T* result)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(allow_disallow_lock_) {
+    auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
+    if (it != tagged_objects_.end()) {
+      *result = it->second;
+      return true;
+    }
+
+    // Performance optimization: To avoid multiple table updates, ensure that during GC we
+    // only update once. See the comment on the implementation of GetTagSlowPath.
+    if (art::kUseReadBarrier &&
+        self != nullptr &&
+        self->GetIsGcMarking() &&
+        !update_since_last_sweep_) {
+      return GetTagSlowPath(self, obj, result);
+    }
+
+    return false;
+  }
+
+  // Slow-path for GetTag. We didn't find the object, but we might be storing from-pointers and
+  // are asked to retrieve with a to-pointer.
+  bool GetTagSlowPath(art::Thread* self, art::mirror::Object* obj, /* out */ T* result)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(allow_disallow_lock_);
+
+  // Update the table by doing read barriers on each element, ensuring that to-space pointers
+  // are stored.
+  void UpdateTableWithReadBarrier()
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(allow_disallow_lock_);
+
+  template <bool kHandleNull>
+  void SweepImpl(art::IsMarkedVisitor* visitor)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(!allow_disallow_lock_);
+
+  enum TableUpdateNullTarget {
+    kIgnoreNull,
+    kRemoveNull,
+    kCallHandleNull
+  };
+
+  template <typename Updater, TableUpdateNullTarget kTargetNull>
+  void UpdateTableWith(Updater& updater)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(allow_disallow_lock_);
+
+  template <typename Storage, class Allocator = std::allocator<T>>
+  struct ReleasableContainer;
+
+  struct HashGcRoot {
+    size_t operator()(const art::GcRoot<art::mirror::Object>& r) const
+        REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      return reinterpret_cast<uintptr_t>(r.Read<art::kWithoutReadBarrier>());
+    }
+  };
+
+  struct EqGcRoot {
+    bool operator()(const art::GcRoot<art::mirror::Object>& r1,
+                    const art::GcRoot<art::mirror::Object>& r2) const
+        REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      return r1.Read<art::kWithoutReadBarrier>() == r2.Read<art::kWithoutReadBarrier>();
+    }
+  };
+
+  // The tag table is used when visiting roots. So it needs to have a low lock level.
+  static constexpr art::LockLevel kTaggingLockLevel =
+      static_cast<art::LockLevel>(art::LockLevel::kAbortLock + 1);
+
+  std::unordered_map<art::GcRoot<art::mirror::Object>,
+                     T,
+                     HashGcRoot,
+                     EqGcRoot> tagged_objects_
+      GUARDED_BY(allow_disallow_lock_)
+      GUARDED_BY(art::Locks::mutator_lock_);
+  // To avoid repeatedly scanning the whole table, remember if we did that since the last sweep.
+  bool update_since_last_sweep_;
+};
+
+}  // namespace openjdkjvmti
+
+#endif  // ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_H_
diff --git a/runtime/openjdkjvmti/object_tagging.cc b/runtime/openjdkjvmti/object_tagging.cc
index b27c2a3..4215588 100644
--- a/runtime/openjdkjvmti/object_tagging.cc
+++ b/runtime/openjdkjvmti/object_tagging.cc
@@ -33,355 +33,34 @@
 
 #include <limits>
 
-#include "art_jvmti.h"
-#include "base/logging.h"
 #include "events-inl.h"
-#include "gc/allocation_listener.h"
-#include "instrumentation.h"
-#include "jni_env_ext-inl.h"
-#include "jvmti_allocator.h"
-#include "mirror/class.h"
-#include "mirror/object.h"
-#include "runtime.h"
-#include "ScopedLocalRef.h"
+#include "jvmti_weak_table-inl.h"
 
 namespace openjdkjvmti {
 
-void ObjectTagTable::Lock() {
-  allow_disallow_lock_.ExclusiveLock(art::Thread::Current());
-}
-void ObjectTagTable::Unlock() {
-  allow_disallow_lock_.ExclusiveUnlock(art::Thread::Current());
-}
-void ObjectTagTable::AssertLocked() {
-  allow_disallow_lock_.AssertHeld(art::Thread::Current());
-}
-
-void ObjectTagTable::UpdateTableWithReadBarrier() {
-  update_since_last_sweep_ = true;
-
-  auto WithReadBarrierUpdater = [&](const art::GcRoot<art::mirror::Object>& original_root,
-                                    art::mirror::Object* original_obj ATTRIBUTE_UNUSED)
-     REQUIRES_SHARED(art::Locks::mutator_lock_) {
-    return original_root.Read<art::kWithReadBarrier>();
-  };
-
-  UpdateTableWith<decltype(WithReadBarrierUpdater), kIgnoreNull>(WithReadBarrierUpdater);
-}
-
-bool ObjectTagTable::GetTagSlowPath(art::Thread* self, art::mirror::Object* obj, jlong* result) {
-  // Under concurrent GC, there is a window between moving objects and sweeping of system
-  // weaks in which mutators are active. We may receive a to-space object pointer in obj,
-  // but still have from-space pointers in the table. Explicitly update the table once.
-  // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
-  UpdateTableWithReadBarrier();
-  return GetTagLocked(self, obj, result);
-}
-
-void ObjectTagTable::Add(art::mirror::Object* obj, jlong tag) {
-  // Same as Set(), as we don't have duplicates in an unordered_map.
-  Set(obj, tag);
-}
-
-bool ObjectTagTable::Remove(art::mirror::Object* obj, jlong* tag) {
-  art::Thread* self = art::Thread::Current();
-  art::MutexLock mu(self, allow_disallow_lock_);
-  Wait(self);
-
-  return RemoveLocked(self, obj, tag);
-}
-bool ObjectTagTable::RemoveLocked(art::mirror::Object* obj, jlong* tag) {
-  art::Thread* self = art::Thread::Current();
-  allow_disallow_lock_.AssertHeld(self);
-  Wait(self);
-
-  return RemoveLocked(self, obj, tag);
-}
-
-bool ObjectTagTable::RemoveLocked(art::Thread* self, art::mirror::Object* obj, jlong* tag) {
-  auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
-  if (it != tagged_objects_.end()) {
-    if (tag != nullptr) {
-      *tag = it->second;
-    }
-    tagged_objects_.erase(it);
-    return true;
-  }
-
-  if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
-    // Under concurrent GC, there is a window between moving objects and sweeping of system
-    // weaks in which mutators are active. We may receive a to-space object pointer in obj,
-    // but still have from-space pointers in the table. Explicitly update the table once.
-    // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
-
-    // Update the table.
-    UpdateTableWithReadBarrier();
-
-    // And try again.
-    return RemoveLocked(self, obj, tag);
-  }
-
-  // Not in here.
-  return false;
-}
+// Instantiate for jlong = JVMTI tags.
+template class JvmtiWeakTable<jlong>;
 
 bool ObjectTagTable::Set(art::mirror::Object* obj, jlong new_tag) {
   if (new_tag == 0) {
     jlong tmp;
     return Remove(obj, &tmp);
   }
-
-  art::Thread* self = art::Thread::Current();
-  art::MutexLock mu(self, allow_disallow_lock_);
-  Wait(self);
-
-  return SetLocked(self, obj, new_tag);
+  return JvmtiWeakTable<jlong>::Set(obj, new_tag);
 }
 bool ObjectTagTable::SetLocked(art::mirror::Object* obj, jlong new_tag) {
   if (new_tag == 0) {
     jlong tmp;
     return RemoveLocked(obj, &tmp);
   }
-
-  art::Thread* self = art::Thread::Current();
-  allow_disallow_lock_.AssertHeld(self);
-  Wait(self);
-
-  return SetLocked(self, obj, new_tag);
+  return JvmtiWeakTable<jlong>::SetLocked(obj, new_tag);
 }
 
-bool ObjectTagTable::SetLocked(art::Thread* self, art::mirror::Object* obj, jlong new_tag) {
-  auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
-  if (it != tagged_objects_.end()) {
-    it->second = new_tag;
-    return true;
-  }
-
-  if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
-    // Under concurrent GC, there is a window between moving objects and sweeping of system
-    // weaks in which mutators are active. We may receive a to-space object pointer in obj,
-    // but still have from-space pointers in the table. Explicitly update the table once.
-    // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
-
-    // Update the table.
-    UpdateTableWithReadBarrier();
-
-    // And try again.
-    return SetLocked(self, obj, new_tag);
-  }
-
-  // New element.
-  auto insert_it = tagged_objects_.emplace(art::GcRoot<art::mirror::Object>(obj), new_tag);
-  DCHECK(insert_it.second);
-  return false;
+bool ObjectTagTable::DoesHandleNullOnSweep() {
+  return event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kObjectFree);
 }
-
-void ObjectTagTable::Sweep(art::IsMarkedVisitor* visitor) {
-  if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kObjectFree)) {
-    SweepImpl<true>(visitor);
-  } else {
-    SweepImpl<false>(visitor);
-  }
-
-  // Under concurrent GC, there is a window between moving objects and sweeping of system
-  // weaks in which mutators are active. We may receive a to-space object pointer in obj,
-  // but still have from-space pointers in the table. We explicitly update the table then
-  // to ensure we compare against to-space pointers. But we want to do this only once. Once
-  // sweeping is done, we know all objects are to-space pointers until the next GC cycle,
-  // so we re-enable the explicit update for the next marking.
-  update_since_last_sweep_ = false;
-}
-
-template <bool kHandleNull>
-void ObjectTagTable::SweepImpl(art::IsMarkedVisitor* visitor) {
-  art::Thread* self = art::Thread::Current();
-  art::MutexLock mu(self, allow_disallow_lock_);
-
-  auto IsMarkedUpdater = [&](const art::GcRoot<art::mirror::Object>& original_root ATTRIBUTE_UNUSED,
-                             art::mirror::Object* original_obj) {
-    return visitor->IsMarked(original_obj);
-  };
-
-  UpdateTableWith<decltype(IsMarkedUpdater),
-                  kHandleNull ? kCallHandleNull : kRemoveNull>(IsMarkedUpdater);
-}
-
 void ObjectTagTable::HandleNullSweep(jlong tag) {
   event_handler_->DispatchEvent<ArtJvmtiEvent::kObjectFree>(nullptr, tag);
 }
 
-template <typename T, ObjectTagTable::TableUpdateNullTarget kTargetNull>
-ALWAYS_INLINE inline void ObjectTagTable::UpdateTableWith(T& updater) {
-  // We optimistically hope that elements will still be well-distributed when re-inserting them.
-  // So play with the map mechanics, and postpone rehashing. This avoids the need of a side
-  // vector and two passes.
-  float original_max_load_factor = tagged_objects_.max_load_factor();
-  tagged_objects_.max_load_factor(std::numeric_limits<float>::max());
-  // For checking that a max load-factor actually does what we expect.
-  size_t original_bucket_count = tagged_objects_.bucket_count();
-
-  for (auto it = tagged_objects_.begin(); it != tagged_objects_.end();) {
-    DCHECK(!it->first.IsNull());
-    art::mirror::Object* original_obj = it->first.Read<art::kWithoutReadBarrier>();
-    art::mirror::Object* target_obj = updater(it->first, original_obj);
-    if (original_obj != target_obj) {
-      if (kTargetNull == kIgnoreNull && target_obj == nullptr) {
-        // Ignore null target, don't do anything.
-      } else {
-        jlong tag = it->second;
-        it = tagged_objects_.erase(it);
-        if (target_obj != nullptr) {
-          tagged_objects_.emplace(art::GcRoot<art::mirror::Object>(target_obj), tag);
-          DCHECK_EQ(original_bucket_count, tagged_objects_.bucket_count());
-        } else if (kTargetNull == kCallHandleNull) {
-          HandleNullSweep(tag);
-        }
-        continue;  // Iterator was implicitly updated by erase.
-      }
-    }
-    it++;
-  }
-
-  tagged_objects_.max_load_factor(original_max_load_factor);
-  // TODO: consider rehash here.
-}
-
-template <typename T, class Allocator = std::allocator<T>>
-struct ReleasableContainer {
-  using allocator_type = Allocator;
-
-  explicit ReleasableContainer(const allocator_type& alloc, size_t reserve = 10)
-      : allocator(alloc),
-        data(reserve > 0 ? allocator.allocate(reserve) : nullptr),
-        size(0),
-        capacity(reserve) {
-  }
-
-  ~ReleasableContainer() {
-    if (data != nullptr) {
-      allocator.deallocate(data, capacity);
-      capacity = 0;
-      size = 0;
-    }
-  }
-
-  T* Release() {
-    T* tmp = data;
-
-    data = nullptr;
-    size = 0;
-    capacity = 0;
-
-    return tmp;
-  }
-
-  void Resize(size_t new_capacity) {
-    CHECK_GT(new_capacity, capacity);
-
-    T* tmp = allocator.allocate(new_capacity);
-    DCHECK(tmp != nullptr);
-    if (data != nullptr) {
-      memcpy(tmp, data, sizeof(T) * size);
-    }
-    T* old = data;
-    data = tmp;
-    allocator.deallocate(old, capacity);
-    capacity = new_capacity;
-  }
-
-  void Pushback(const T& elem) {
-    if (size == capacity) {
-      size_t new_capacity = 2 * capacity + 1;
-      Resize(new_capacity);
-    }
-    data[size++] = elem;
-  }
-
-  Allocator allocator;
-  T* data;
-  size_t size;
-  size_t capacity;
-};
-
-jvmtiError ObjectTagTable::GetTaggedObjects(jvmtiEnv* jvmti_env,
-                                            jint tag_count,
-                                            const jlong* tags,
-                                            jint* count_ptr,
-                                            jobject** object_result_ptr,
-                                            jlong** tag_result_ptr) {
-  if (tag_count < 0) {
-    return ERR(ILLEGAL_ARGUMENT);
-  }
-  if (tag_count > 0) {
-    for (size_t i = 0; i != static_cast<size_t>(tag_count); ++i) {
-      if (tags[i] == 0) {
-        return ERR(ILLEGAL_ARGUMENT);
-      }
-    }
-  }
-  if (tags == nullptr) {
-    return ERR(NULL_POINTER);
-  }
-  if (count_ptr == nullptr) {
-    return ERR(NULL_POINTER);
-  }
-
-  art::Thread* self = art::Thread::Current();
-  art::MutexLock mu(self, allow_disallow_lock_);
-  Wait(self);
-
-  art::JNIEnvExt* jni_env = self->GetJniEnv();
-
-  constexpr size_t kDefaultSize = 10;
-  size_t initial_object_size;
-  size_t initial_tag_size;
-  if (tag_count == 0) {
-    initial_object_size = (object_result_ptr != nullptr) ? tagged_objects_.size() : 0;
-    initial_tag_size = (tag_result_ptr != nullptr) ? tagged_objects_.size() : 0;
-  } else {
-    initial_object_size = initial_tag_size = kDefaultSize;
-  }
-  JvmtiAllocator<void> allocator(jvmti_env);
-  ReleasableContainer<jobject, JvmtiAllocator<jobject>> selected_objects(allocator, initial_object_size);
-  ReleasableContainer<jlong, JvmtiAllocator<jlong>> selected_tags(allocator, initial_tag_size);
-
-  size_t count = 0;
-  for (auto& pair : tagged_objects_) {
-    bool select;
-    if (tag_count > 0) {
-      select = false;
-      for (size_t i = 0; i != static_cast<size_t>(tag_count); ++i) {
-        if (tags[i] == pair.second) {
-          select = true;
-          break;
-        }
-      }
-    } else {
-      select = true;
-    }
-
-    if (select) {
-      art::mirror::Object* obj = pair.first.Read<art::kWithReadBarrier>();
-      if (obj != nullptr) {
-        count++;
-        if (object_result_ptr != nullptr) {
-          selected_objects.Pushback(jni_env->AddLocalReference<jobject>(obj));
-        }
-        if (tag_result_ptr != nullptr) {
-          selected_tags.Pushback(pair.second);
-        }
-      }
-    }
-  }
-
-  if (object_result_ptr != nullptr) {
-    *object_result_ptr = selected_objects.Release();
-  }
-  if (tag_result_ptr != nullptr) {
-    *tag_result_ptr = selected_tags.Release();
-  }
-  *count_ptr = static_cast<jint>(count);
-  return ERR(NONE);
-}
-
 }  // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/object_tagging.h b/runtime/openjdkjvmti/object_tagging.h
index 0296f1a..b5a601c 100644
--- a/runtime/openjdkjvmti/object_tagging.h
+++ b/runtime/openjdkjvmti/object_tagging.h
@@ -1,17 +1,32 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
+/* Copyright (C) 2016 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h.  The
+ * copyright and license information for the file jvmti.h follows.
  *
- *      http://www.apache.org/licenses/LICENSE-2.0
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
  */
 
 #ifndef ART_RUNTIME_OPENJDKJVMTI_OBJECT_TAGGING_H_
@@ -20,62 +35,27 @@
 #include <unordered_map>
 
 #include "base/mutex.h"
-#include "gc/system_weak.h"
-#include "gc_root-inl.h"
 #include "globals.h"
 #include "jvmti.h"
+#include "jvmti_weak_table.h"
 #include "mirror/object.h"
-#include "thread-inl.h"
 
 namespace openjdkjvmti {
 
 class EventHandler;
 
-class ObjectTagTable : public art::gc::SystemWeakHolder {
+class ObjectTagTable FINAL : public JvmtiWeakTable<jlong> {
  public:
-  explicit ObjectTagTable(EventHandler* event_handler)
-      : art::gc::SystemWeakHolder(kTaggingLockLevel),
-        update_since_last_sweep_(false),
-        event_handler_(event_handler) {
+  explicit ObjectTagTable(EventHandler* event_handler) : event_handler_(event_handler) {
   }
 
-  void Add(art::mirror::Object* obj, jlong tag)
+  bool Set(art::mirror::Object* obj, jlong tag) OVERRIDE
       REQUIRES_SHARED(art::Locks::mutator_lock_)
       REQUIRES(!allow_disallow_lock_);
-
-  bool Remove(art::mirror::Object* obj, jlong* tag)
-      REQUIRES_SHARED(art::Locks::mutator_lock_)
-      REQUIRES(!allow_disallow_lock_);
-  bool RemoveLocked(art::mirror::Object* obj, jlong* tag)
+  bool SetLocked(art::mirror::Object* obj, jlong tag) OVERRIDE
       REQUIRES_SHARED(art::Locks::mutator_lock_)
       REQUIRES(allow_disallow_lock_);
 
-  bool Set(art::mirror::Object* obj, jlong tag)
-      REQUIRES_SHARED(art::Locks::mutator_lock_)
-      REQUIRES(!allow_disallow_lock_);
-  bool SetLocked(art::mirror::Object* obj, jlong tag)
-      REQUIRES_SHARED(art::Locks::mutator_lock_)
-      REQUIRES(allow_disallow_lock_);
-
-  bool GetTag(art::mirror::Object* obj, jlong* result)
-      REQUIRES_SHARED(art::Locks::mutator_lock_)
-      REQUIRES(!allow_disallow_lock_) {
-    art::Thread* self = art::Thread::Current();
-    art::MutexLock mu(self, allow_disallow_lock_);
-    Wait(self);
-
-    return GetTagLocked(self, obj, result);
-  }
-  bool GetTagLocked(art::mirror::Object* obj, jlong* result)
-      REQUIRES_SHARED(art::Locks::mutator_lock_)
-      REQUIRES(allow_disallow_lock_) {
-    art::Thread* self = art::Thread::Current();
-    allow_disallow_lock_.AssertHeld(self);
-    Wait(self);
-
-    return GetTagLocked(self, obj, result);
-  }
-
   jlong GetTagOrZero(art::mirror::Object* obj)
       REQUIRES_SHARED(art::Locks::mutator_lock_)
       REQUIRES(!allow_disallow_lock_) {
@@ -91,108 +71,11 @@
     return tmp;
   }
 
-  void Sweep(art::IsMarkedVisitor* visitor)
-      REQUIRES_SHARED(art::Locks::mutator_lock_)
-      REQUIRES(!allow_disallow_lock_);
-
-  jvmtiError GetTaggedObjects(jvmtiEnv* jvmti_env,
-                              jint tag_count,
-                              const jlong* tags,
-                              jint* count_ptr,
-                              jobject** object_result_ptr,
-                              jlong** tag_result_ptr)
-      REQUIRES_SHARED(art::Locks::mutator_lock_)
-      REQUIRES(!allow_disallow_lock_);
-
-  void Lock() ACQUIRE(allow_disallow_lock_);
-  void Unlock() RELEASE(allow_disallow_lock_);
-  void AssertLocked() ASSERT_CAPABILITY(allow_disallow_lock_);
+ protected:
+  bool DoesHandleNullOnSweep() OVERRIDE;
+  void HandleNullSweep(jlong tag) OVERRIDE;
 
  private:
-  bool SetLocked(art::Thread* self, art::mirror::Object* obj, jlong tag)
-      REQUIRES_SHARED(art::Locks::mutator_lock_)
-      REQUIRES(allow_disallow_lock_);
-
-  bool RemoveLocked(art::Thread* self, art::mirror::Object* obj, jlong* tag)
-      REQUIRES_SHARED(art::Locks::mutator_lock_)
-      REQUIRES(allow_disallow_lock_);
-
-  bool GetTagLocked(art::Thread* self, art::mirror::Object* obj, jlong* result)
-      REQUIRES_SHARED(art::Locks::mutator_lock_)
-      REQUIRES(allow_disallow_lock_) {
-    auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
-    if (it != tagged_objects_.end()) {
-      *result = it->second;
-      return true;
-    }
-
-    if (art::kUseReadBarrier &&
-        self != nullptr &&
-        self->GetIsGcMarking() &&
-        !update_since_last_sweep_) {
-      return GetTagSlowPath(self, obj, result);
-    }
-
-    return false;
-  }
-
-  // Slow-path for GetTag. We didn't find the object, but we might be storing from-pointers and
-  // are asked to retrieve with a to-pointer.
-  bool GetTagSlowPath(art::Thread* self, art::mirror::Object* obj, jlong* result)
-      REQUIRES_SHARED(art::Locks::mutator_lock_)
-      REQUIRES(allow_disallow_lock_);
-
-  // Update the table by doing read barriers on each element, ensuring that to-space pointers
-  // are stored.
-  void UpdateTableWithReadBarrier()
-      REQUIRES_SHARED(art::Locks::mutator_lock_)
-      REQUIRES(allow_disallow_lock_);
-
-  template <bool kHandleNull>
-  void SweepImpl(art::IsMarkedVisitor* visitor)
-      REQUIRES_SHARED(art::Locks::mutator_lock_)
-      REQUIRES(!allow_disallow_lock_);
-  void HandleNullSweep(jlong tag);
-
-  enum TableUpdateNullTarget {
-    kIgnoreNull,
-    kRemoveNull,
-    kCallHandleNull
-  };
-
-  template <typename T, TableUpdateNullTarget kTargetNull>
-  void UpdateTableWith(T& updater)
-      REQUIRES_SHARED(art::Locks::mutator_lock_)
-      REQUIRES(allow_disallow_lock_);
-
-  struct HashGcRoot {
-    size_t operator()(const art::GcRoot<art::mirror::Object>& r) const
-        REQUIRES_SHARED(art::Locks::mutator_lock_) {
-      return reinterpret_cast<uintptr_t>(r.Read<art::kWithoutReadBarrier>());
-    }
-  };
-
-  struct EqGcRoot {
-    bool operator()(const art::GcRoot<art::mirror::Object>& r1,
-                    const art::GcRoot<art::mirror::Object>& r2) const
-        REQUIRES_SHARED(art::Locks::mutator_lock_) {
-      return r1.Read<art::kWithoutReadBarrier>() == r2.Read<art::kWithoutReadBarrier>();
-    }
-  };
-
-  // The tag table is used when visiting roots. So it needs to have a low lock level.
-  static constexpr art::LockLevel kTaggingLockLevel =
-      static_cast<art::LockLevel>(art::LockLevel::kAbortLock + 1);
-
-  std::unordered_map<art::GcRoot<art::mirror::Object>,
-                     jlong,
-                     HashGcRoot,
-                     EqGcRoot> tagged_objects_
-      GUARDED_BY(allow_disallow_lock_)
-      GUARDED_BY(art::Locks::mutator_lock_);
-  // To avoid repeatedly scanning the whole table, remember if we did that since the last sweep.
-  bool update_since_last_sweep_;
-
   EventHandler* event_handler_;
 };