Revert "Revert "Prevent overflows for concurrent_start_bytes_ sum""
This reverts commit 83ba9b979d932a5b6430c1affd171429b70b6c3f.
It also fixes a bug exposed by the original CL, and cleans up a
couple of related issues:
- CheckConcurrentGCForNative was renamed to reflect the fact that
it does not just deal with concurrent GC.
- In the non-concurrent case, concurrent_start_bytes_ is not
meaningful; use target_footprint_ instead.
- UnsignedSum should use >= instead of > .
The middle one of these caused the test failures observed with the
previous CL.
Test: Build without read barrier, and ran with --runtime-option=-Xgc:SS
Change-Id: Iae004c453bf2cae2739df66b6797af4a792886fc
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 5473b52..8335799 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3999,7 +3999,7 @@
// Return the ratio of the weighted native + java allocated bytes to its target value.
// A return value > 1.0 means we should collect. Significantly larger values mean we're falling
// behind.
-inline float Heap::NativeMemoryOverTarget(size_t current_native_bytes) {
+inline float Heap::NativeMemoryOverTarget(size_t current_native_bytes, bool is_gc_concurrent) {
// Collection check for native allocation. Does not enforce Java heap bounds.
// With adj_start_bytes defined below, effectively checks
// <java bytes allocd> + c1*<old native allocd> + c2*<new native allocd) >= adj_start_bytes,
@@ -4016,17 +4016,22 @@
+ old_native_bytes / kOldNativeDiscountFactor;
size_t add_bytes_allowed = static_cast<size_t>(
NativeAllocationGcWatermark() * HeapGrowthMultiplier());
- size_t adj_start_bytes = concurrent_start_bytes_ + add_bytes_allowed / kNewNativeDiscountFactor;
+ size_t java_gc_start_bytes = is_gc_concurrent
+ ? concurrent_start_bytes_
+ : target_footprint_.load(std::memory_order_relaxed);
+ size_t adj_start_bytes = UnsignedSum(java_gc_start_bytes,
+ add_bytes_allowed / kNewNativeDiscountFactor);
return static_cast<float>(GetBytesAllocated() + weighted_native_bytes)
/ static_cast<float>(adj_start_bytes);
}
}
-inline void Heap::CheckConcurrentGCForNative(Thread* self) {
+inline void Heap::CheckGCForNative(Thread* self) {
+ bool is_gc_concurrent = IsGcConcurrent();
size_t current_native_bytes = GetNativeBytes();
- float gc_urgency = NativeMemoryOverTarget(current_native_bytes);
+ float gc_urgency = NativeMemoryOverTarget(current_native_bytes, is_gc_concurrent);
if (UNLIKELY(gc_urgency >= 1.0)) {
- if (IsGcConcurrent()) {
+ if (is_gc_concurrent) {
RequestConcurrentGC(self, kGcCauseForNativeAlloc, /*force_full=*/true);
if (gc_urgency > kStopForNativeFactor
&& current_native_bytes > kHugeNativeAllocs) {
@@ -4045,7 +4050,7 @@
// About kNotifyNativeInterval allocations have occurred. Check whether we should garbage collect.
void Heap::NotifyNativeAllocations(JNIEnv* env) {
native_objects_notified_.fetch_add(kNotifyNativeInterval, std::memory_order_relaxed);
- CheckConcurrentGCForNative(ThreadForEnv(env));
+ CheckGCForNative(ThreadForEnv(env));
}
// Register a native allocation with an explicit size.
@@ -4057,7 +4062,7 @@
native_objects_notified_.fetch_add(1, std::memory_order_relaxed);
if (objects_notified % kNotifyNativeInterval == kNotifyNativeInterval - 1
|| bytes > kCheckImmediatelyThreshold) {
- CheckConcurrentGCForNative(ThreadForEnv(env));
+ CheckGCForNative(ThreadForEnv(env));
}
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 6bdba12..18dfbf5 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -917,9 +917,13 @@
return main_space_backup_ != nullptr;
}
+ // Size_t saturating arithmetic
static ALWAYS_INLINE size_t UnsignedDifference(size_t x, size_t y) {
return x > y ? x - y : 0;
}
+ static ALWAYS_INLINE size_t UnsignedSum(size_t x, size_t y) {
+ return x + y >= x ? x + y : std::numeric_limits<size_t>::max();
+ }
static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
return
@@ -950,13 +954,13 @@
// Checks whether we should garbage collect:
ALWAYS_INLINE bool ShouldConcurrentGCForJava(size_t new_num_bytes_allocated);
- float NativeMemoryOverTarget(size_t current_native_bytes);
+ float NativeMemoryOverTarget(size_t current_native_bytes, bool is_gc_concurrent);
ALWAYS_INLINE void CheckConcurrentGCForJava(Thread* self,
size_t new_num_bytes_allocated,
ObjPtr<mirror::Object>* obj)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
- void CheckConcurrentGCForNative(Thread* self)
+ void CheckGCForNative(Thread* self)
REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
accounting::ObjectStack* GetMarkStack() {