Add histogram for native allocations
Shows up in traces.txt, sample output:
Histogram of native allocation 0:4315,131072:33,1179648:3 bucket size 131072
Histogram of native free 0:995,131072:19,1179648:2 bucket size 131072
Bug: 28680116
(cherry picked from commit 0dce75dc6945c221a054eb9c479fb60efd193719)
Change-Id: Iaa07c6b8da0d6bab64a8fd4af8a02fc0b8c70e9e
diff --git a/runtime/base/histogram.h b/runtime/base/histogram.h
index bcb7b3b..0e3bc8e 100644
--- a/runtime/base/histogram.h
+++ b/runtime/base/histogram.h
@@ -85,6 +85,10 @@
return max_value_added_;
}
+ Value BucketWidth() const {
+ return bucket_width_;
+ }
+
const std::string& Name() const {
return name_;
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index df5aa0a..fa540c0 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -119,6 +119,8 @@
// Dump the rosalloc stats on SIGQUIT.
static constexpr bool kDumpRosAllocStatsOnSigQuit = false;
+static constexpr size_t kNativeAllocationHistogramBuckets = 16;
+
static inline bool CareAboutPauseTimes() {
return Runtime::Current()->InJankPerceptibleProcessState();
}
@@ -186,6 +188,11 @@
total_objects_freed_ever_(0),
num_bytes_allocated_(0),
native_bytes_allocated_(0),
+ native_histogram_lock_("Native allocation lock"),
+ native_allocation_histogram_("Native allocation sizes",
+ 1U,
+ kNativeAllocationHistogramBuckets),
+ native_free_histogram_("Native free sizes", 1U, kNativeAllocationHistogramBuckets),
num_bytes_freed_revoke_(0),
verify_missing_card_marks_(false),
verify_system_weaks_(false),
@@ -1185,6 +1192,20 @@
rosalloc_space_->DumpStats(os);
}
+ {
+ MutexLock mu(Thread::Current(), native_histogram_lock_);
+ if (native_allocation_histogram_.SampleSize() > 0u) {
+ os << "Histogram of native allocation ";
+ native_allocation_histogram_.DumpBins(os);
+ os << " bucket size " << native_allocation_histogram_.BucketWidth() << "\n";
+ }
+ if (native_free_histogram_.SampleSize() > 0u) {
+ os << "Histogram of native free ";
+ native_free_histogram_.DumpBins(os);
+ os << " bucket size " << native_free_histogram_.BucketWidth() << "\n";
+ }
+ }
+
BaseMutex::DumpAll(os);
}
@@ -3848,6 +3869,10 @@
void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
Thread* self = ThreadForEnv(env);
+ {
+ MutexLock mu(self, native_histogram_lock_);
+ native_allocation_histogram_.AddValue(bytes);
+ }
if (native_need_to_run_finalization_) {
RunFinalization(env, kNativeAllocationFinalizeTimeout);
UpdateMaxNativeFootprint();
@@ -3892,6 +3917,10 @@
void Heap::RegisterNativeFree(JNIEnv* env, size_t bytes) {
size_t expected_size;
+ {
+ MutexLock mu(Thread::Current(), native_histogram_lock_);
+ native_free_histogram_.AddValue(bytes);
+ }
do {
expected_size = native_bytes_allocated_.LoadRelaxed();
if (UNLIKELY(bytes > expected_size)) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index fada1a2..2a1a4a1 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -241,9 +241,9 @@
SHARED_REQUIRES(Locks::mutator_lock_);
void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
- REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
+ REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !native_histogram_lock_);
void RegisterNativeFree(JNIEnv* env, size_t bytes)
- REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
+ REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !native_histogram_lock_);
// Change the allocator, updates entrypoints.
void ChangeAllocator(AllocatorType allocator)
@@ -532,7 +532,7 @@
space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const
SHARED_REQUIRES(Locks::mutator_lock_);
- void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_);
+ void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
// Do a pending collector transition.
void DoPendingCollectorTransition() REQUIRES(!*gc_complete_lock_);
@@ -654,7 +654,8 @@
std::string SafePrettyTypeOf(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
// GC performance measuring
- void DumpGcPerformanceInfo(std::ostream& os) REQUIRES(!*gc_complete_lock_);
+ void DumpGcPerformanceInfo(std::ostream& os)
+ REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
void ResetGcPerformanceInfo() REQUIRES(!*gc_complete_lock_);
// Thread pool.
@@ -1156,6 +1157,11 @@
// Bytes which are allocated and managed by native code but still need to be accounted for.
Atomic<size_t> native_bytes_allocated_;
+ // Native allocation stats.
+ Mutex native_histogram_lock_;
+ Histogram<uint64_t> native_allocation_histogram_;
+ Histogram<uint64_t> native_free_histogram_;
+
// Number of bytes freed by thread local buffer revokes. This will
// cancel out the ahead-of-time bulk counting of bytes allocated in
// rosalloc thread-local buffers. It is temporarily accumulated