ART: Add templated VisitObjects
Add templated versions of VisitObjects that accept visitors. This
allows to use more modern lambdas.
Test: m
Change-Id: I71a7f59bcae02090b9493bf8b477bb8b6ba649de
diff --git a/runtime/gc/accounting/heap_bitmap-inl.h b/runtime/gc/accounting/heap_bitmap-inl.h
index 8fcc87d..edf2e5b 100644
--- a/runtime/gc/accounting/heap_bitmap-inl.h
+++ b/runtime/gc/accounting/heap_bitmap-inl.h
@@ -26,7 +26,7 @@
namespace accounting {
template <typename Visitor>
-inline void HeapBitmap::Visit(const Visitor& visitor) {
+inline void HeapBitmap::Visit(Visitor&& visitor) {
for (const auto& bitmap : continuous_space_bitmaps_) {
bitmap->VisitMarkedRange(bitmap->HeapBegin(), bitmap->HeapLimit(), visitor);
}
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index 7097f87..2007bef 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -51,7 +51,7 @@
REQUIRES_SHARED(Locks::heap_bitmap_lock_);
template <typename Visitor>
- void Visit(const Visitor& visitor)
+ ALWAYS_INLINE void Visit(Visitor&& visitor)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index 9feaf41..20508c1 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -62,8 +62,9 @@
}
template<size_t kAlignment> template<typename Visitor>
-inline void SpaceBitmap<kAlignment>::VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end,
- const Visitor& visitor) const {
+inline void SpaceBitmap<kAlignment>::VisitMarkedRange(uintptr_t visit_begin,
+ uintptr_t visit_end,
+ Visitor&& visitor) const {
DCHECK_LE(visit_begin, visit_end);
#if 0
for (uintptr_t i = visit_begin; i < visit_end; i += kAlignment) {
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 889f57b..6188c9f 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -134,7 +134,7 @@
// TODO: Use lock annotations when clang is fixed.
// REQUIRES(Locks::heap_bitmap_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
template <typename Visitor>
- void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor) const
+ void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, Visitor&& visitor) const
NO_THREAD_SAFETY_ANALYSIS;
// Visits set bits in address order. The callback is not permitted to change the bitmap bits or
diff --git a/runtime/gc/heap-visit-objects-inl.h b/runtime/gc/heap-visit-objects-inl.h
new file mode 100644
index 0000000..b6ccb277
--- /dev/null
+++ b/runtime/gc/heap-visit-objects-inl.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_HEAP_VISIT_OBJECTS_INL_H_
+#define ART_RUNTIME_GC_HEAP_VISIT_OBJECTS_INL_H_
+
+#include "heap.h"
+
+#include "base/mutex-inl.h"
+#include "gc/accounting/heap_bitmap-inl.h"
+#include "gc/space/bump_pointer_space-walk-inl.h"
+#include "gc/space/region_space-inl.h"
+#include "mirror/object-inl.h"
+#include "obj_ptr-inl.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-current-inl.h"
+#include "thread_list.h"
+
+namespace art {
+namespace gc {
+
+// Visit objects when threads aren't suspended. If concurrent moving
+// GC, disable moving GC and suspend threads and then visit objects.
+template <typename Visitor>
+inline void Heap::VisitObjects(Visitor&& visitor) {
+ Thread* self = Thread::Current();
+ Locks::mutator_lock_->AssertSharedHeld(self);
+ DCHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)) << "Call VisitObjectsPaused() instead";
+ if (IsGcConcurrentAndMoving()) {
+ // Concurrent moving GC. Just suspending threads isn't sufficient
+ // because a collection isn't one big pause and we could suspend
+ // threads in the middle (between phases) of a concurrent moving
+ // collection where it's not easily known which objects are alive
+ // (both the region space and the non-moving space) or which
+ // copies of objects to visit, and the to-space invariant could be
+ // easily broken. Visit objects while GC isn't running by using
+ // IncrementDisableMovingGC() and threads are suspended.
+ IncrementDisableMovingGC(self);
+ {
+ ScopedThreadSuspension sts(self, kWaitingForVisitObjects);
+ ScopedSuspendAll ssa(__FUNCTION__);
+ VisitObjectsInternalRegionSpace(visitor);
+ VisitObjectsInternal(visitor);
+ }
+ DecrementDisableMovingGC(self);
+ } else {
+ // Since concurrent moving GC has thread suspension, also poison ObjPtr the normal case to
+ // catch bugs.
+ self->PoisonObjectPointers();
+ // GCs can move objects, so don't allow this.
+ ScopedAssertNoThreadSuspension ants("Visiting objects");
+ DCHECK(region_space_ == nullptr);
+ VisitObjectsInternal(visitor);
+ self->PoisonObjectPointers();
+ }
+}
+
+template <typename Visitor>
+inline void Heap::VisitObjectsPaused(Visitor&& visitor) {
+ Thread* self = Thread::Current();
+ Locks::mutator_lock_->AssertExclusiveHeld(self);
+ VisitObjectsInternalRegionSpace(visitor);
+ VisitObjectsInternal(visitor);
+}
+
+// Visit objects in the region spaces.
+template <typename Visitor>
+inline void Heap::VisitObjectsInternalRegionSpace(Visitor&& visitor) {
+ Thread* self = Thread::Current();
+ Locks::mutator_lock_->AssertExclusiveHeld(self);
+ if (region_space_ != nullptr) {
+ DCHECK(IsGcConcurrentAndMoving());
+ if (!zygote_creation_lock_.IsExclusiveHeld(self)) {
+ // Exclude the pre-zygote fork time where the semi-space collector
+ // calls VerifyHeapReferences() as part of the zygote compaction
+ // which then would call here without the moving GC disabled,
+ // which is fine.
+ bool is_thread_running_gc = false;
+ if (kIsDebugBuild) {
+ MutexLock mu(self, *gc_complete_lock_);
+ is_thread_running_gc = self == thread_running_gc_;
+ }
+ // If we are not the thread running the GC on in a GC exclusive region, then moving GC
+ // must be disabled.
+ DCHECK(is_thread_running_gc || IsMovingGCDisabled(self));
+ }
+ region_space_->Walk(visitor);
+ }
+}
+
+// Visit objects in the other spaces.
+template <typename Visitor>
+inline void Heap::VisitObjectsInternal(Visitor&& visitor) {
+ if (bump_pointer_space_ != nullptr) {
+ // Visit objects in bump pointer space.
+ bump_pointer_space_->Walk(visitor);
+ }
+ // TODO: Switch to standard begin and end to use ranged a based loop.
+ for (auto* it = allocation_stack_->Begin(), *end = allocation_stack_->End(); it < end; ++it) {
+ mirror::Object* const obj = it->AsMirrorPtr();
+
+ mirror::Class* kls = nullptr;
+ if (obj != nullptr && (kls = obj->GetClass()) != nullptr) {
+ // Below invariant is safe regardless of what space the Object is in.
+ // For speed reasons, only perform it when Rosalloc could possibly be used.
+ // (Disabled for read barriers because it never uses Rosalloc).
+ // (See the DCHECK in RosAllocSpace constructor).
+ if (!kUseReadBarrier) {
+ // Rosalloc has a race in allocation. Objects can be written into the allocation
+ // stack before their header writes are visible to this thread.
+ // See b/28790624 for more details.
+ //
+ // obj.class will either be pointing to a valid Class*, or it will point
+ // to a rosalloc free buffer.
+ //
+ // If it's pointing to a valid Class* then that Class's Class will be the
+ // ClassClass (whose Class is itself).
+ //
+ // A rosalloc free buffer will point to another rosalloc free buffer
+ // (or to null), and never to itself.
+ //
+ // Either way dereferencing while its not-null is safe because it will
+ // always point to another valid pointer or to null.
+ mirror::Class* klsClass = kls->GetClass();
+
+ if (klsClass == nullptr) {
+ continue;
+ } else if (klsClass->GetClass() != klsClass) {
+ continue;
+ }
+ } else {
+ // Ensure the invariant is not broken for non-rosalloc cases.
+ DCHECK(Heap::rosalloc_space_ == nullptr)
+ << "unexpected rosalloc with read barriers";
+ DCHECK(kls->GetClass() != nullptr)
+ << "invalid object: class does not have a class";
+ DCHECK_EQ(kls->GetClass()->GetClass(), kls->GetClass())
+ << "invalid object: class's class is not ClassClass";
+ }
+
+ // Avoid the race condition caused by the object not yet being written into the allocation
+ // stack or the class not yet being written in the object. Or, if
+ // kUseThreadLocalAllocationStack, there can be nulls on the allocation stack.
+ visitor(obj);
+ }
+ }
+ {
+ ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ GetLiveBitmap()->Visit<Visitor>(visitor);
+ }
+}
+
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_HEAP_VISIT_OBJECTS_INL_H_
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index ad4c0d5..dfa3ff9 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -65,6 +65,7 @@
#include "gc_pause_listener.h"
#include "gc_root.h"
#include "heap-inl.h"
+#include "heap-visit-objects-inl.h"
#include "image.h"
#include "intern_table.h"
#include "java_vm_ext.h"
@@ -2935,7 +2936,7 @@
class VerifyReferenceVisitor : public SingleRootVisitor {
public:
VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
- REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
size_t GetFailureCount() const {
@@ -3089,8 +3090,7 @@
VerifyObjectVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
: heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
- void operator()(mirror::Object* obj)
- REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ void operator()(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
// Note: we are verifying the references in obj but not obj itself, this is because obj must
// be live or else how did we find it in the live bitmap?
VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
@@ -3098,12 +3098,6 @@
obj->VisitReferences(visitor, visitor);
}
- static void VisitCallback(mirror::Object* obj, void* arg)
- REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
- VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
- visitor->operator()(obj);
- }
-
void VerifyRoots() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) {
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
@@ -3175,7 +3169,7 @@
// 2. Allocated during the GC (pre sweep GC verification).
// We don't want to verify the objects in the live stack since they themselves may be
// pointing to dead objects if they are not reachable.
- VisitObjectsPaused(VerifyObjectVisitor::VisitCallback, &visitor);
+ VisitObjectsPaused(visitor);
// Verify the roots:
visitor.VerifyRoots();
if (visitor.GetFailureCount() > 0) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 9e55081..78a21de 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -25,6 +25,7 @@
#include "allocator_type.h"
#include "arch/instruction_set.h"
#include "atomic.h"
+#include "base/mutex.h"
#include "base/time_utils.h"
#include "gc/gc_cause.h"
#include "gc/collector/gc_type.h"
@@ -256,6 +257,14 @@
void VisitObjectsPaused(ObjectCallback callback, void* arg)
REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
+ template <typename Visitor>
+ ALWAYS_INLINE void VisitObjects(Visitor&& visitor)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
+ template <typename Visitor>
+ ALWAYS_INLINE void VisitObjectsPaused(Visitor&& visitor)
+ REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
+
void CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1057,6 +1066,14 @@
void VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg)
REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
+ template <typename Visitor>
+ ALWAYS_INLINE void VisitObjectsInternal(Visitor&& visitor)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
+ template <typename Visitor>
+ ALWAYS_INLINE void VisitObjectsInternalRegionSpace(Visitor&& visitor)
+ REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
+
void UpdateGcCountRateHistograms() REQUIRES(gc_complete_lock_);
// GC stress mode attempts to do one GC per unique backtrace.
diff --git a/runtime/gc/space/bump_pointer_space-walk-inl.h b/runtime/gc/space/bump_pointer_space-walk-inl.h
new file mode 100644
index 0000000..611b3d0
--- /dev/null
+++ b/runtime/gc/space/bump_pointer_space-walk-inl.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_WALK_INL_H_
+#define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_WALK_INL_H_
+
+#include "bump_pointer_space.h"
+
+#include "base/bit_utils.h"
+#include "mirror/object-inl.h"
+#include "thread-current-inl.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+template <typename Visitor>
+inline void BumpPointerSpace::Walk(Visitor&& visitor) {
+ uint8_t* pos = Begin();
+ uint8_t* end = End();
+ uint8_t* main_end = pos;
+ {
+ MutexLock mu(Thread::Current(), block_lock_);
+ // If we have 0 blocks then we need to update the main header since we have bump pointer style
+ // allocation into an unbounded region (actually bounded by Capacity()).
+ if (num_blocks_ == 0) {
+ UpdateMainBlock();
+ }
+ main_end = Begin() + main_block_size_;
+ if (num_blocks_ == 0) {
+ // We don't have any other blocks, this means someone else may be allocating into the main
+ // block. In this case, we don't want to try and visit the other blocks after the main block
+ // since these could actually be part of the main block.
+ end = main_end;
+ }
+ }
+ // Walk all of the objects in the main block first.
+ while (pos < main_end) {
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
+ // No read barrier because obj may not be a valid object.
+ if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() == nullptr) {
+ // There is a race condition where a thread has just allocated an object but not set the
+ // class. We can't know the size of this object, so we don't visit it and exit the function
+ // since there is guaranteed to be not other blocks.
+ return;
+ } else {
+ visitor(obj);
+ pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
+ }
+ }
+ // Walk the other blocks (currently only TLABs).
+ while (pos < end) {
+ BlockHeader* header = reinterpret_cast<BlockHeader*>(pos);
+ size_t block_size = header->size_;
+ pos += sizeof(BlockHeader); // Skip the header so that we know where the objects
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
+ const mirror::Object* end_obj = reinterpret_cast<const mirror::Object*>(pos + block_size);
+ CHECK_LE(reinterpret_cast<const uint8_t*>(end_obj), End());
+ // We don't know how many objects are allocated in the current block. When we hit a null class
+ // assume its the end. TODO: Have a thread update the header when it flushes the block?
+ // No read barrier because obj may not be a valid object.
+ while (obj < end_obj && obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
+ visitor(obj);
+ obj = GetNextObject(obj);
+ }
+ pos += block_size;
+ }
+}
+
+} // namespace space
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_WALK_INL_H_
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 566dc5d..cf152e1 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -151,6 +151,9 @@
// Go through all of the blocks and visit the continuous objects.
void Walk(ObjectCallback* callback, void* arg)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!block_lock_);
+ template <typename Visitor>
+ ALWAYS_INLINE void Walk(Visitor&& visitor)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!block_lock_);
accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE;
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 2fba4a8..34b552b 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -186,6 +186,14 @@
template<bool kToSpaceOnly>
void RegionSpace::WalkInternal(ObjectCallback* callback, void* arg) {
+ auto visitor = [callback, arg](mirror::Object* obj) {
+ callback(obj, arg);
+ };
+ WalkInternal<kToSpaceOnly>(visitor);
+}
+
+template<bool kToSpaceOnly, typename Visitor>
+void RegionSpace::WalkInternal(Visitor&& visitor) {
// TODO: MutexLock on region_lock_ won't work due to lock order
// issues (the classloader classes lock and the monitor lock). We
// call this with threads suspended.
@@ -201,7 +209,7 @@
DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object";
mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
DCHECK(obj->GetClass() != nullptr);
- callback(obj, arg);
+ visitor(obj);
} else if (r->IsLargeTail()) {
// Do nothing.
} else {
@@ -215,14 +223,12 @@
GetLiveBitmap()->VisitMarkedRange(
reinterpret_cast<uintptr_t>(pos),
reinterpret_cast<uintptr_t>(top),
- [callback, arg](mirror::Object* obj) {
- callback(obj, arg);
- });
+ visitor);
} else {
while (pos < top) {
mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
- callback(obj, arg);
+ visitor(obj);
pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
} else {
break;
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 6412158..54a56b3 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -156,6 +156,10 @@
REQUIRES(Locks::mutator_lock_) {
WalkInternal<false>(callback, arg);
}
+ template <typename Visitor>
+ ALWAYS_INLINE void Walk(Visitor&& visitor) REQUIRES(Locks::mutator_lock_) {
+ WalkInternal<false /* kToSpaceOnly */>(visitor);
+ }
void WalkToSpace(ObjectCallback* callback, void* arg)
REQUIRES(Locks::mutator_lock_) {
@@ -250,6 +254,9 @@
template<bool kToSpaceOnly>
void WalkInternal(ObjectCallback* callback, void* arg) NO_THREAD_SAFETY_ANALYSIS;
+ template<bool kToSpaceOnly, typename Visitor>
+ ALWAYS_INLINE void WalkInternal(Visitor&& visitor) NO_THREAD_SAFETY_ANALYSIS;
+
class Region {
public:
Region()