ART: Replace or remove some ObjectCallback Walk functions
Replace with visitor functions in RegionSpace and SpaceBitmap. Remove
old ObjectCallback version in HeapBitmap. Fix up users. Move some
thread-safety annotations.
Move ObjectCallback definition to the only remaining user (ModUnionTable).
Test: m
Change-Id: I10307aeacad0c60d21fbade2081ec040d6a6ac4c
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 066c66a..ae26e7d 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -42,6 +42,7 @@
#include "dex_instruction-inl.h"
#include "disassembler.h"
#include "elf_builder.h"
+#include "gc/accounting/space_bitmap-inl.h"
#include "gc/space/image_space.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
@@ -1930,9 +1931,12 @@
}
}
}
+ auto dump_visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+ DumpObject(obj);
+ };
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
// Dump the normal objects before ArtMethods.
- image_space_.GetLiveBitmap()->Walk(ImageDumper::Callback, this);
+ image_space_.GetLiveBitmap()->Walk(dump_visitor);
indent_os << "\n";
// TODO: Dump fields.
// Dump methods after.
@@ -1941,7 +1945,7 @@
image_space_.Begin(),
image_header_.GetPointerSize());
// Dump the large objects separately.
- heap->GetLargeObjectsSpace()->GetLiveBitmap()->Walk(ImageDumper::Callback, this);
+ heap->GetLargeObjectsSpace()->GetLiveBitmap()->Walk(dump_visitor);
indent_os << "\n";
}
os << "STATS:\n" << std::flush;
@@ -2156,20 +2160,18 @@
return oat_code_begin + GetQuickOatCodeSize(m);
}
- static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
+ void DumpObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
- DCHECK(arg != nullptr);
- ImageDumper* state = reinterpret_cast<ImageDumper*>(arg);
- if (!state->InDumpSpace(obj)) {
+ if (!InDumpSpace(obj)) {
return;
}
size_t object_bytes = obj->SizeOf();
size_t alignment_bytes = RoundUp(object_bytes, kObjectAlignment) - object_bytes;
- state->stats_.object_bytes += object_bytes;
- state->stats_.alignment_bytes += alignment_bytes;
+ stats_.object_bytes += object_bytes;
+ stats_.alignment_bytes += alignment_bytes;
- std::ostream& os = state->vios_.Stream();
+ std::ostream& os = vios_.Stream();
mirror::Class* obj_class = obj->GetClass();
if (obj_class->IsArrayClass()) {
@@ -2186,9 +2188,9 @@
} else {
os << StringPrintf("%p: %s\n", obj, obj_class->PrettyDescriptor().c_str());
}
- ScopedIndentation indent1(&state->vios_);
+ ScopedIndentation indent1(&vios_);
DumpFields(os, obj, obj_class);
- const PointerSize image_pointer_size = state->image_header_.GetPointerSize();
+ const PointerSize image_pointer_size = image_header_.GetPointerSize();
if (obj->IsObjectArray()) {
auto* obj_array = obj->AsObjectArray<mirror::Object>();
for (int32_t i = 0, length = obj_array->GetLength(); i < length; i++) {
@@ -2215,22 +2217,22 @@
mirror::Class* klass = obj->AsClass();
if (klass->NumStaticFields() != 0) {
os << "STATICS:\n";
- ScopedIndentation indent2(&state->vios_);
+ ScopedIndentation indent2(&vios_);
for (ArtField& field : klass->GetSFields()) {
PrintField(os, &field, field.GetDeclaringClass());
}
}
} else {
- auto it = state->dex_caches_.find(obj);
- if (it != state->dex_caches_.end()) {
+ auto it = dex_caches_.find(obj);
+ if (it != dex_caches_.end()) {
auto* dex_cache = down_cast<mirror::DexCache*>(obj);
- const auto& field_section = state->image_header_.GetImageSection(
+ const auto& field_section = image_header_.GetImageSection(
ImageHeader::kSectionArtFields);
- const auto& method_section = state->image_header_.GetMethodsSection();
+ const auto& method_section = image_header_.GetMethodsSection();
size_t num_methods = dex_cache->NumResolvedMethods();
if (num_methods != 0u) {
os << "Methods (size=" << num_methods << "):\n";
- ScopedIndentation indent2(&state->vios_);
+ ScopedIndentation indent2(&vios_);
auto* resolved_methods = dex_cache->GetResolvedMethods();
for (size_t i = 0, length = dex_cache->NumResolvedMethods(); i < length; ++i) {
auto* elem = mirror::DexCache::GetElementPtrSize(resolved_methods,
@@ -2254,7 +2256,7 @@
if (elem == nullptr) {
msg = "null";
} else if (method_section.Contains(
- reinterpret_cast<uint8_t*>(elem) - state->image_space_.Begin())) {
+ reinterpret_cast<uint8_t*>(elem) - image_space_.Begin())) {
msg = reinterpret_cast<ArtMethod*>(elem)->PrettyMethod();
} else {
msg = "<not in method section>";
@@ -2265,7 +2267,7 @@
size_t num_fields = dex_cache->NumResolvedFields();
if (num_fields != 0u) {
os << "Fields (size=" << num_fields << "):\n";
- ScopedIndentation indent2(&state->vios_);
+ ScopedIndentation indent2(&vios_);
auto* resolved_fields = dex_cache->GetResolvedFields();
for (size_t i = 0, length = dex_cache->NumResolvedFields(); i < length; ++i) {
auto* elem = mirror::DexCache::GetNativePairPtrSize(
@@ -2288,7 +2290,7 @@
if (elem == nullptr) {
msg = "null";
} else if (field_section.Contains(
- reinterpret_cast<uint8_t*>(elem) - state->image_space_.Begin())) {
+ reinterpret_cast<uint8_t*>(elem) - image_space_.Begin())) {
msg = reinterpret_cast<ArtField*>(elem)->PrettyField();
} else {
msg = "<not in field section>";
@@ -2299,7 +2301,7 @@
size_t num_types = dex_cache->NumResolvedTypes();
if (num_types != 0u) {
os << "Types (size=" << num_types << "):\n";
- ScopedIndentation indent2(&state->vios_);
+ ScopedIndentation indent2(&vios_);
auto* resolved_types = dex_cache->GetResolvedTypes();
for (size_t i = 0; i < num_types; ++i) {
auto pair = resolved_types[i].load(std::memory_order_relaxed);
@@ -2331,7 +2333,7 @@
}
}
std::string temp;
- state->stats_.Update(obj_class->GetDescriptor(&temp), object_bytes);
+ stats_.Update(obj_class->GetDescriptor(&temp), object_bytes);
}
void DumpMethod(ArtMethod* method, std::ostream& indent_os)
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 149960e..a93969f 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -614,7 +614,10 @@
TimingLogger::ScopedTiming t("Walk Bitmap", timings_);
// Walk the bitmap.
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
- bitmap_->Walk(PatchOat::BitmapCallback, this);
+ auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+ VisitObject(obj);
+ };
+ bitmap_->Walk(visitor);
}
return true;
}
@@ -638,7 +641,7 @@
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object);
}
-// Called by BitmapCallback
+// Called by PatchImage.
void PatchOat::VisitObject(mirror::Object* object) {
mirror::Object* copy = RelocatedCopyOf(object);
CHECK(copy != nullptr);
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index e15a6bc..182ce94 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -79,11 +79,6 @@
static bool ReplaceOatFileWithSymlink(const std::string& input_oat_filename,
const std::string& output_oat_filename);
- static void BitmapCallback(mirror::Object* obj, void* arg)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- reinterpret_cast<PatchOat*>(arg)->VisitObject(obj);
- }
-
void VisitObject(mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_);
void FixupMethod(ArtMethod* object, ArtMethod* copy)
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 1ee4026..41adae4 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -864,24 +864,6 @@
bool error;
};
-static void CheckTrampolines(mirror::Object* obj, void* arg) NO_THREAD_SAFETY_ANALYSIS {
- if (obj->IsClass()) {
- ObjPtr<mirror::Class> klass = obj->AsClass();
- TrampolineCheckData* d = reinterpret_cast<TrampolineCheckData*>(arg);
- for (ArtMethod& m : klass->GetMethods(d->pointer_size)) {
- const void* entrypoint = m.GetEntryPointFromQuickCompiledCodePtrSize(d->pointer_size);
- if (entrypoint == d->quick_resolution_trampoline ||
- entrypoint == d->quick_imt_conflict_trampoline ||
- entrypoint == d->quick_generic_jni_trampoline ||
- entrypoint == d->quick_to_interpreter_bridge_trampoline) {
- d->m = &m;
- d->error = true;
- return;
- }
- }
- }
-}
-
bool ClassLinker::InitFromBootImage(std::string* error_msg) {
VLOG(startup) << __FUNCTION__ << " entering";
CHECK(!init_done_);
@@ -946,7 +928,24 @@
data.quick_generic_jni_trampoline = ith_quick_generic_jni_trampoline;
data.quick_to_interpreter_bridge_trampoline = ith_quick_to_interpreter_bridge_trampoline;
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
- spaces[i]->GetLiveBitmap()->Walk(CheckTrampolines, &data);
+ auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (obj->IsClass()) {
+ ObjPtr<mirror::Class> klass = obj->AsClass();
+ for (ArtMethod& m : klass->GetMethods(data.pointer_size)) {
+ const void* entrypoint =
+ m.GetEntryPointFromQuickCompiledCodePtrSize(data.pointer_size);
+ if (entrypoint == data.quick_resolution_trampoline ||
+ entrypoint == data.quick_imt_conflict_trampoline ||
+ entrypoint == data.quick_generic_jni_trampoline ||
+ entrypoint == data.quick_to_interpreter_bridge_trampoline) {
+ data.m = &m;
+ data.error = true;
+ return;
+ }
+ }
+ }
+ };
+ spaces[i]->GetLiveBitmap()->Walk(visitor);
if (data.error) {
ArtMethod* m = data.m;
LOG(ERROR) << "Found a broken ArtMethod: " << ArtMethod::PrettyMethod(m);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 0f15e8b..778b928 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -39,6 +39,7 @@
#include "gc/accounting/card_table-inl.h"
#include "gc/allocation_record.h"
#include "gc/scoped_gc_critical_section.h"
+#include "gc/space/bump_pointer_space-walk-inl.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
#include "handle_scope-inl.h"
@@ -4813,13 +4814,6 @@
DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
};
-static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
- const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
- HeapChunkContext::HeapChunkJavaCallback(
- obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
-}
-
void Dbg::DdmSendHeapSegments(bool native) {
Dbg::HpsgWhen when = native ? gDdmNhsgWhen : gDdmHpsgWhen;
Dbg::HpsgWhat what = native ? gDdmNhsgWhat : gDdmHpsgWhat;
@@ -4839,6 +4833,12 @@
// Send a series of heap segment chunks.
HeapChunkContext context(what == HPSG_WHAT_MERGED_OBJECTS, native);
+ auto bump_pointer_space_visitor = [&](mirror::Object* obj)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+ const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
+ HeapChunkContext::HeapChunkJavaCallback(
+ obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, &context);
+ };
if (native) {
UNIMPLEMENTED(WARNING) << "Native heap inspection is not supported";
} else {
@@ -4861,7 +4861,7 @@
} else if (space->IsBumpPointerSpace()) {
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
context.SetChunkOverhead(0);
- space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context);
+ space->AsBumpPointerSpace()->Walk(bump_pointer_space_visitor);
HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
} else if (space->IsRegionSpace()) {
heap->IncrementDisableMovingGC(self);
@@ -4870,7 +4870,7 @@
ScopedSuspendAll ssa(__FUNCTION__);
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
context.SetChunkOverhead(0);
- space->AsRegionSpace()->Walk(BumpPointerSpaceCallback, &context);
+ space->AsRegionSpace()->Walk(bump_pointer_space_visitor);
HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
}
heap->DecrementDisableMovingGC(self);
diff --git a/runtime/gc/accounting/heap_bitmap.cc b/runtime/gc/accounting/heap_bitmap.cc
index a5d59bf..1d729ff 100644
--- a/runtime/gc/accounting/heap_bitmap.cc
+++ b/runtime/gc/accounting/heap_bitmap.cc
@@ -71,15 +71,6 @@
large_object_bitmaps_.erase(it);
}
-void HeapBitmap::Walk(ObjectCallback* callback, void* arg) {
- for (const auto& bitmap : continuous_space_bitmaps_) {
- bitmap->Walk(callback, arg);
- }
- for (const auto& bitmap : large_object_bitmaps_) {
- bitmap->Walk(callback, arg);
- }
-}
-
} // namespace accounting
} // namespace gc
} // namespace art
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index 2007bef..36426e9 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -47,9 +47,6 @@
ContinuousSpaceBitmap* GetContinuousSpaceBitmap(const mirror::Object* obj) const;
LargeObjectBitmap* GetLargeObjectBitmap(const mirror::Object* obj) const;
- void Walk(ObjectCallback* callback, void* arg)
- REQUIRES_SHARED(Locks::heap_bitmap_lock_);
-
template <typename Visitor>
ALWAYS_INLINE void Visit(Visitor&& visitor)
REQUIRES(Locks::heap_bitmap_lock_)
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 57c290e..2901995 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -27,6 +27,7 @@
#include "gc/space/space.h"
#include "mirror/object-inl.h"
#include "mirror/object-refvisitor-inl.h"
+#include "object_callbacks.h"
#include "space_bitmap-inl.h"
#include "thread-current-inl.h"
@@ -383,7 +384,7 @@
}
}
-void ModUnionTableReferenceCache::VisitObjects(ObjectCallback* callback, void* arg) {
+void ModUnionTableReferenceCache::VisitObjects(ObjectCallback callback, void* arg) {
CardTable* const card_table = heap_->GetCardTable();
ContinuousSpaceBitmap* live_bitmap = space_->GetLiveBitmap();
for (uint8_t* card : cleared_cards_) {
@@ -550,7 +551,7 @@
0, RoundUp(space_->Size(), CardTable::kCardSize) / CardTable::kCardSize, bit_visitor);
}
-void ModUnionTableCardCache::VisitObjects(ObjectCallback* callback, void* arg) {
+void ModUnionTableCardCache::VisitObjects(ObjectCallback callback, void* arg) {
card_bitmap_->VisitSetBits(
0,
RoundUp(space_->Size(), CardTable::kCardSize) / CardTable::kCardSize,
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index 591365f..9e261fd 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -21,21 +21,25 @@
#include "base/allocator.h"
#include "card_table.h"
#include "globals.h"
-#include "object_callbacks.h"
+#include "mirror/object_reference.h"
#include "safe_map.h"
#include <set>
#include <vector>
namespace art {
+
namespace mirror {
class Object;
} // namespace mirror
+class MarkObjectVisitor;
+
namespace gc {
namespace space {
class ContinuousSpace;
} // namespace space
+
class Heap;
namespace accounting {
@@ -44,6 +48,9 @@
// cleared between GC phases, reducing the number of dirty cards that need to be scanned.
class ModUnionTable {
public:
+ // A callback for visiting an object in the heap.
+ using ObjectCallback = void (*)(mirror::Object*, void*);
+
typedef std::set<uint8_t*, std::less<uint8_t*>,
TrackingAllocator<uint8_t*, kAllocatorTagModUnionCardSet>> CardSet;
typedef MemoryRangeBitmap<CardTable::kCardSize> CardBitmap;
@@ -72,7 +79,7 @@
virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) = 0;
// Visit all of the objects that may contain references to other spaces.
- virtual void VisitObjects(ObjectCallback* callback, void* arg) = 0;
+ virtual void VisitObjects(ObjectCallback callback, void* arg) = 0;
// Verification, sanity checks that we don't have clean cards which conflict with out cached data
// for said cards. Exclusive lock is required since verify sometimes uses
@@ -124,7 +131,7 @@
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_);
- virtual void VisitObjects(ObjectCallback* callback, void* arg) OVERRIDE
+ virtual void VisitObjects(ObjectCallback callback, void* arg) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -171,7 +178,7 @@
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- virtual void VisitObjects(ObjectCallback* callback, void* arg) OVERRIDE
+ virtual void VisitObjects(ObjectCallback callback, void* arg) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index 20508c1..b37dd96 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -156,6 +156,26 @@
#endif
}
+template<size_t kAlignment> template<typename Visitor>
+void SpaceBitmap<kAlignment>::Walk(Visitor&& visitor) {
+ CHECK(bitmap_begin_ != nullptr);
+
+ uintptr_t end = OffsetToIndex(HeapLimit() - heap_begin_ - 1);
+ Atomic<uintptr_t>* bitmap_begin = bitmap_begin_;
+ for (uintptr_t i = 0; i <= end; ++i) {
+ uintptr_t w = bitmap_begin[i].LoadRelaxed();
+ if (w != 0) {
+ uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
+ do {
+ const size_t shift = CTZ(w);
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
+ visitor(obj);
+ w ^= (static_cast<uintptr_t>(1)) << shift;
+ } while (w != 0);
+ }
+ }
+}
+
template<size_t kAlignment> template<bool kSetBit>
inline bool SpaceBitmap<kAlignment>::Modify(const mirror::Object* obj) {
uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index eb9f039..317e2fc 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -137,27 +137,6 @@
}
template<size_t kAlignment>
-void SpaceBitmap<kAlignment>::Walk(ObjectCallback* callback, void* arg) {
- CHECK(bitmap_begin_ != nullptr);
- CHECK(callback != nullptr);
-
- uintptr_t end = OffsetToIndex(HeapLimit() - heap_begin_ - 1);
- Atomic<uintptr_t>* bitmap_begin = bitmap_begin_;
- for (uintptr_t i = 0; i <= end; ++i) {
- uintptr_t w = bitmap_begin[i].LoadRelaxed();
- if (w != 0) {
- uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
- do {
- const size_t shift = CTZ(w);
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
- (*callback)(obj, arg);
- w ^= (static_cast<uintptr_t>(1)) << shift;
- } while (w != 0);
- }
- }
-}
-
-template<size_t kAlignment>
void SpaceBitmap<kAlignment>::SweepWalk(const SpaceBitmap<kAlignment>& live_bitmap,
const SpaceBitmap<kAlignment>& mark_bitmap,
uintptr_t sweep_begin, uintptr_t sweep_end,
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 6188c9f..2fe6394 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -34,9 +34,6 @@
} // namespace mirror
class MemMap;
-// Same as in object_callbacks.h. Just avoid the include.
-typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
-
namespace gc {
namespace accounting {
@@ -108,8 +105,6 @@
return index < bitmap_size_ / sizeof(intptr_t);
}
- void VisitRange(uintptr_t base, uintptr_t max, ObjectCallback* callback, void* arg) const;
-
class ClearVisitor {
public:
explicit ClearVisitor(SpaceBitmap* const bitmap)
@@ -139,8 +134,9 @@
// Visits set bits in address order. The callback is not permitted to change the bitmap bits or
// max during the traversal.
- void Walk(ObjectCallback* callback, void* arg)
- REQUIRES_SHARED(Locks::heap_bitmap_lock_);
+ template <typename Visitor>
+ void Walk(Visitor&& visitor)
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Walk through the bitmaps in increasing address order, and find the object pointers that
// correspond to garbage objects. Call <callback> zero or more times with lists of these object
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 8d3c62f..9d672b1 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -583,23 +583,22 @@
ObjPtr<mirror::Object> const holder_;
};
-void ConcurrentCopying::VerifyNoMissingCardMarkCallback(mirror::Object* obj, void* arg) {
- auto* collector = reinterpret_cast<ConcurrentCopying*>(arg);
- // Objects not on dirty or aged cards should never have references to newly allocated regions.
- if (collector->heap_->GetCardTable()->GetCard(obj) == gc::accounting::CardTable::kCardClean) {
- VerifyNoMissingCardMarkVisitor visitor(collector, /*holder*/ obj);
- obj->VisitReferences</*kVisitNativeRoots*/true, kVerifyNone, kWithoutReadBarrier>(
- visitor,
- visitor);
- }
-}
-
void ConcurrentCopying::VerifyNoMissingCardMarks() {
+ auto visitor = [&](mirror::Object* obj)
+ REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_) {
+ // Objects not on dirty or aged cards should never have references to newly allocated regions.
+ if (heap_->GetCardTable()->GetCard(obj) == gc::accounting::CardTable::kCardClean) {
+ VerifyNoMissingCardMarkVisitor internal_visitor(this, /*holder*/ obj);
+ obj->VisitReferences</*kVisitNativeRoots*/true, kVerifyNone, kWithoutReadBarrier>(
+ internal_visitor, internal_visitor);
+ }
+ };
TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
- region_space_->Walk(&VerifyNoMissingCardMarkCallback, this);
+ region_space_->Walk(visitor);
{
ReaderMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
- heap_->GetLiveBitmap()->Walk(&VerifyNoMissingCardMarkCallback, this);
+ heap_->GetLiveBitmap()->Visit(visitor);
}
}
@@ -1212,34 +1211,6 @@
ConcurrentCopying* const collector_;
};
-class ConcurrentCopying::VerifyNoFromSpaceRefsObjectVisitor {
- public:
- explicit VerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
- : collector_(collector) {}
- void operator()(mirror::Object* obj) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ObjectCallback(obj, collector_);
- }
- static void ObjectCallback(mirror::Object* obj, void *arg)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- CHECK(obj != nullptr);
- ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
- space::RegionSpace* region_space = collector->RegionSpace();
- CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
- VerifyNoFromSpaceRefsFieldVisitor visitor(collector);
- obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
- visitor,
- visitor);
- if (kUseBakerReadBarrier) {
- CHECK_EQ(obj->GetReadBarrierState(), ReadBarrier::WhiteState())
- << "obj=" << obj << " non-white rb_state " << obj->GetReadBarrierState();
- }
- }
-
- private:
- ConcurrentCopying* const collector_;
-};
-
// Verify there's no from-space references left after the marking phase.
void ConcurrentCopying::VerifyNoFromSpaceReferences() {
Thread* self = Thread::Current();
@@ -1252,7 +1223,21 @@
CHECK(!thread->GetIsGcMarking());
}
}
- VerifyNoFromSpaceRefsObjectVisitor visitor(this);
+
+ auto verify_no_from_space_refs_visitor = [&](mirror::Object* obj)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ CHECK(obj != nullptr);
+ space::RegionSpace* region_space = RegionSpace();
+ CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
+ VerifyNoFromSpaceRefsFieldVisitor visitor(this);
+ obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+ visitor,
+ visitor);
+ if (kUseBakerReadBarrier) {
+ CHECK_EQ(obj->GetReadBarrierState(), ReadBarrier::WhiteState())
+ << "obj=" << obj << " non-white rb_state " << obj->GetReadBarrierState();
+ }
+ };
// Roots.
{
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -1260,11 +1245,11 @@
Runtime::Current()->VisitRoots(&ref_visitor);
}
// The to-space.
- region_space_->WalkToSpace(VerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, this);
+ region_space_->WalkToSpace(verify_no_from_space_refs_visitor);
// Non-moving spaces.
{
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- heap_->GetMarkBitmap()->Visit(visitor);
+ heap_->GetMarkBitmap()->Visit(verify_no_from_space_refs_visitor);
}
// The alloc stack.
{
@@ -1275,7 +1260,7 @@
if (obj != nullptr && obj->GetClass() != nullptr) {
// TODO: need to call this only if obj is alive?
ref_visitor(obj);
- visitor(obj);
+ verify_no_from_space_refs_visitor(obj);
}
}
}
@@ -1337,31 +1322,6 @@
ConcurrentCopying* const collector_;
};
-class ConcurrentCopying::AssertToSpaceInvariantObjectVisitor {
- public:
- explicit AssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
- : collector_(collector) {}
- void operator()(mirror::Object* obj) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ObjectCallback(obj, collector_);
- }
- static void ObjectCallback(mirror::Object* obj, void *arg)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- CHECK(obj != nullptr);
- ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
- space::RegionSpace* region_space = collector->RegionSpace();
- CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
- collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj);
- AssertToSpaceInvariantFieldVisitor visitor(collector);
- obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
- visitor,
- visitor);
- }
-
- private:
- ConcurrentCopying* const collector_;
-};
-
class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
public:
RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
@@ -1599,8 +1559,14 @@
region_space_->AddLiveBytes(to_ref, alloc_size);
}
if (ReadBarrier::kEnableToSpaceInvariantChecks) {
- AssertToSpaceInvariantObjectVisitor visitor(this);
- visitor(to_ref);
+ CHECK(to_ref != nullptr);
+ space::RegionSpace* region_space = RegionSpace();
+ CHECK(!region_space->IsInFromSpace(to_ref)) << "Scanning object " << to_ref << " in from space";
+ AssertToSpaceInvariant(nullptr, MemberOffset(0), to_ref);
+ AssertToSpaceInvariantFieldVisitor visitor(this);
+ to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+ visitor,
+ visitor);
}
}
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 7b4340e..ab60990 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -181,9 +181,6 @@
void VerifyGrayImmuneObjects()
REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
- static void VerifyNoMissingCardMarkCallback(mirror::Object* obj, void* arg)
- REQUIRES(Locks::mutator_lock_)
- REQUIRES(!mark_stack_lock_);
void VerifyNoMissingCardMarks()
REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
@@ -348,7 +345,6 @@
class ActivateReadBarrierEntrypointsCallback;
class ActivateReadBarrierEntrypointsCheckpoint;
class AssertToSpaceInvariantFieldVisitor;
- class AssertToSpaceInvariantObjectVisitor;
class AssertToSpaceInvariantRefsVisitor;
class ClearBlackPtrsVisitor;
class ComputeUnevacFromSpaceLiveRatioVisitor;
@@ -365,7 +361,6 @@
class ThreadFlipVisitor;
class VerifyGrayImmuneObjectsVisitor;
class VerifyNoFromSpaceRefsFieldVisitor;
- class VerifyNoFromSpaceRefsObjectVisitor;
class VerifyNoFromSpaceRefsVisitor;
class VerifyNoMissingCardMarkVisitor;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index ba1161f..6ab9827 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1512,13 +1512,17 @@
}
}
-void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
- reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
-}
-
void Heap::VerifyHeap() {
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
- GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
+ auto visitor = [&](mirror::Object* obj) {
+ VerifyObjectBody(obj);
+ };
+ // Technically we need the mutator lock here to call Visit. However, VerifyObjectBody is already
+ // NO_THREAD_SAFETY_ANALYSIS.
+ auto no_thread_safety_analysis = [&]() NO_THREAD_SAFETY_ANALYSIS {
+ GetLiveBitmap()->Visit(visitor);
+ };
+ no_thread_safety_analysis();
}
void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
@@ -2176,24 +2180,25 @@
bin_mark_bitmap_(nullptr),
is_running_on_memory_tool_(is_running_on_memory_tool) {}
- void BuildBins(space::ContinuousSpace* space) {
+ void BuildBins(space::ContinuousSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) {
bin_live_bitmap_ = space->GetLiveBitmap();
bin_mark_bitmap_ = space->GetMarkBitmap();
- BinContext context;
- context.prev_ = reinterpret_cast<uintptr_t>(space->Begin());
- context.collector_ = this;
+ uintptr_t prev = reinterpret_cast<uintptr_t>(space->Begin());
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
// Note: This requires traversing the space in increasing order of object addresses.
- bin_live_bitmap_->Walk(Callback, reinterpret_cast<void*>(&context));
+ auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+ uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
+ size_t bin_size = object_addr - prev;
+ // Add the bin consisting of the end of the previous object to the start of the current object.
+ AddBin(bin_size, prev);
+ prev = object_addr + RoundUp(obj->SizeOf<kDefaultVerifyFlags>(), kObjectAlignment);
+ };
+ bin_live_bitmap_->Walk(visitor);
// Add the last bin which spans after the last object to the end of the space.
- AddBin(reinterpret_cast<uintptr_t>(space->End()) - context.prev_, context.prev_);
+ AddBin(reinterpret_cast<uintptr_t>(space->End()) - prev, prev);
}
private:
- struct BinContext {
- uintptr_t prev_; // The end of the previous object.
- ZygoteCompactingCollector* collector_;
- };
// Maps from bin sizes to locations.
std::multimap<size_t, uintptr_t> bins_;
// Live bitmap of the space which contains the bins.
@@ -2202,18 +2207,6 @@
accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
const bool is_running_on_memory_tool_;
- static void Callback(mirror::Object* obj, void* arg)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(arg != nullptr);
- BinContext* context = reinterpret_cast<BinContext*>(arg);
- ZygoteCompactingCollector* collector = context->collector_;
- uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
- size_t bin_size = object_addr - context->prev_;
- // Add the bin consisting of the end of the previous object to the start of the current object.
- collector->AddBin(bin_size, context->prev_);
- context->prev_ = object_addr + RoundUp(obj->SizeOf<kDefaultVerifyFlags>(), kObjectAlignment);
- }
-
void AddBin(size_t size, uintptr_t position) {
if (is_running_on_memory_tool_) {
MEMORY_TOOL_MAKE_DEFINED(reinterpret_cast<void*>(position), size);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index d1e8d4f..e172d2d 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -52,9 +52,6 @@
class TimingLogger;
class VariableSizedHandleScope;
-// Same as in object_callbacks.h. Just avoid the include.
-typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
-
namespace mirror {
class Class;
class Object;
@@ -1010,9 +1007,6 @@
size_t GetPercentFree();
- static void VerificationCallback(mirror::Object* obj, void* arg)
- REQUIRES_SHARED(Locks::heap_bitmap_lock_);
-
// Swap the allocation stack with the live stack.
void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/gc/space/bump_pointer_space-walk-inl.h b/runtime/gc/space/bump_pointer_space-walk-inl.h
index 611b3d0..5d05ea2 100644
--- a/runtime/gc/space/bump_pointer_space-walk-inl.h
+++ b/runtime/gc/space/bump_pointer_space-walk-inl.h
@@ -32,6 +32,19 @@
uint8_t* pos = Begin();
uint8_t* end = End();
uint8_t* main_end = pos;
+ // Internal indirection w/ NO_THREAD_SAFETY_ANALYSIS. Optimally, we'd like to have an annotation
+ // like
+ // REQUIRES_AS(visitor.operator(mirror::Object*))
+ // on Walk to expose the interprocedural nature of locks here without having to duplicate the
+ // function.
+ //
+ // NO_THREAD_SAFETY_ANALYSIS is a workaround. The problem with the workaround of course is that
+ // it doesn't complain at the callsite. However, that is strictly not worse than the
+ // ObjectCallback version it replaces.
+ auto no_thread_safety_analysis_visit = [&](mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS {
+ visitor(obj);
+ };
+
{
MutexLock mu(Thread::Current(), block_lock_);
// If we have 0 blocks then we need to update the main header since we have bump pointer style
@@ -57,7 +70,7 @@
// since there is guaranteed to be not other blocks.
return;
} else {
- visitor(obj);
+ no_thread_safety_analysis_visit(obj);
pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
}
}
@@ -73,7 +86,7 @@
// assume its the end. TODO: Have a thread update the header when it flushes the block?
// No read barrier because obj may not be a valid object.
while (obj < end_obj && obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
- visitor(obj);
+ no_thread_safety_analysis_visit(obj);
obj = GetNextObject(obj);
}
pos += block_size;
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index bb1ede1..5d91f4b 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -153,58 +153,6 @@
return storage;
}
-void BumpPointerSpace::Walk(ObjectCallback* callback, void* arg) {
- uint8_t* pos = Begin();
- uint8_t* end = End();
- uint8_t* main_end = pos;
- {
- MutexLock mu(Thread::Current(), block_lock_);
- // If we have 0 blocks then we need to update the main header since we have bump pointer style
- // allocation into an unbounded region (actually bounded by Capacity()).
- if (num_blocks_ == 0) {
- UpdateMainBlock();
- }
- main_end = Begin() + main_block_size_;
- if (num_blocks_ == 0) {
- // We don't have any other blocks, this means someone else may be allocating into the main
- // block. In this case, we don't want to try and visit the other blocks after the main block
- // since these could actually be part of the main block.
- end = main_end;
- }
- }
- // Walk all of the objects in the main block first.
- while (pos < main_end) {
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
- // No read barrier because obj may not be a valid object.
- if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() == nullptr) {
- // There is a race condition where a thread has just allocated an object but not set the
- // class. We can't know the size of this object, so we don't visit it and exit the function
- // since there is guaranteed to be not other blocks.
- return;
- } else {
- callback(obj, arg);
- pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
- }
- }
- // Walk the other blocks (currently only TLABs).
- while (pos < end) {
- BlockHeader* header = reinterpret_cast<BlockHeader*>(pos);
- size_t block_size = header->size_;
- pos += sizeof(BlockHeader); // Skip the header so that we know where the objects
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
- const mirror::Object* end_obj = reinterpret_cast<const mirror::Object*>(pos + block_size);
- CHECK_LE(reinterpret_cast<const uint8_t*>(end_obj), End());
- // We don't know how many objects are allocated in the current block. When we hit a null class
- // assume its the end. TODO: Have a thread update the header when it flushes the block?
- // No read barrier because obj may not be a valid object.
- while (obj < end_obj && obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
- callback(obj, arg);
- obj = GetNextObject(obj);
- }
- pos += block_size;
- }
-}
-
accounting::ContinuousSpaceBitmap::SweepCallback* BumpPointerSpace::GetSweepCallback() {
UNIMPLEMENTED(FATAL);
UNREACHABLE();
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index cf152e1..4197d0c 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -25,9 +25,6 @@
class Object;
}
-// Same as in object_callbacks.h. Just avoid the include.
-typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
-
namespace gc {
namespace collector {
@@ -149,11 +146,10 @@
}
// Go through all of the blocks and visit the continuous objects.
- void Walk(ObjectCallback* callback, void* arg)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!block_lock_);
template <typename Visitor>
ALWAYS_INLINE void Walk(Visitor&& visitor)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!block_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!block_lock_);
accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE;
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 34b552b..a3b53b4 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -184,14 +184,6 @@
return bytes;
}
-template<bool kToSpaceOnly>
-void RegionSpace::WalkInternal(ObjectCallback* callback, void* arg) {
- auto visitor = [callback, arg](mirror::Object* obj) {
- callback(obj, arg);
- };
- WalkInternal<kToSpaceOnly>(visitor);
-}
-
template<bool kToSpaceOnly, typename Visitor>
void RegionSpace::WalkInternal(Visitor&& visitor) {
// TODO: MutexLock on region_lock_ won't work due to lock order
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 54a56b3..77d76fb 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -17,7 +17,8 @@
#ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
#define ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
-#include "object_callbacks.h"
+#include "base/macros.h"
+#include "base/mutex.h"
#include "space.h"
#include "thread.h"
@@ -152,18 +153,14 @@
}
// Go through all of the blocks and visit the continuous objects.
- void Walk(ObjectCallback* callback, void* arg)
- REQUIRES(Locks::mutator_lock_) {
- WalkInternal<false>(callback, arg);
- }
template <typename Visitor>
ALWAYS_INLINE void Walk(Visitor&& visitor) REQUIRES(Locks::mutator_lock_) {
WalkInternal<false /* kToSpaceOnly */>(visitor);
}
-
- void WalkToSpace(ObjectCallback* callback, void* arg)
+ template <typename Visitor>
+ ALWAYS_INLINE void WalkToSpace(Visitor&& visitor)
REQUIRES(Locks::mutator_lock_) {
- WalkInternal<true>(callback, arg);
+ WalkInternal<true>(visitor);
}
accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE {
@@ -251,9 +248,6 @@
private:
RegionSpace(const std::string& name, MemMap* mem_map);
- template<bool kToSpaceOnly>
- void WalkInternal(ObjectCallback* callback, void* arg) NO_THREAD_SAFETY_ANALYSIS;
-
template<bool kToSpaceOnly, typename Visitor>
ALWAYS_INLINE void WalkInternal(Visitor&& visitor) NO_THREAD_SAFETY_ANALYSIS;
diff --git a/runtime/object_callbacks.h b/runtime/object_callbacks.h
index ea5e698..9eccb5a 100644
--- a/runtime/object_callbacks.h
+++ b/runtime/object_callbacks.h
@@ -25,9 +25,6 @@
template<class MirrorType> class HeapReference;
} // namespace mirror
-// A callback for visiting an object in the heap.
-typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
-
class IsMarkedVisitor {
public:
virtual ~IsMarkedVisitor() {}