Merge "Add RecordFree to the GarbageCollector interface"
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index f9a6abe..16add0b 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -128,6 +128,18 @@
   return (static_cast<uint64_t>(freed_bytes_) * 1000) / (NsToMs(GetDurationNs()) + 1);
 }
 
+void GarbageCollector::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
+  freed_objects_ += freed_objects;
+  freed_bytes_ += freed_bytes;
+  GetHeap()->RecordFree(freed_objects, freed_bytes);
+}
+
+void GarbageCollector::RecordFreeLargeObjects(uint64_t freed_objects, int64_t freed_bytes) {
+  freed_large_objects_ += freed_objects;
+  freed_large_object_bytes_ += freed_bytes;
+  GetHeap()->RecordFree(freed_objects, freed_bytes);
+}
+
 void GarbageCollector::ResetMeasurements() {
   cumulative_timings_.Reset();
   pause_histogram_.Reset();
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index ca4a1d5..d05f45b 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -88,19 +88,19 @@
   // this is the allocation space, for full GC then we swap the zygote bitmaps too.
   void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
-  size_t GetFreedBytes() const {
+  int64_t GetFreedBytes() const {
     return freed_bytes_;
   }
 
-  size_t GetFreedLargeObjectBytes() const {
+  int64_t GetFreedLargeObjectBytes() const {
     return freed_large_object_bytes_;
   }
 
-  size_t GetFreedObjects() const {
+  uint64_t GetFreedObjects() const {
     return freed_objects_;
   }
 
-  size_t GetFreedLargeObjects() const {
+  uint64_t GetFreedLargeObjects() const {
     return freed_large_objects_;
   }
 
@@ -108,7 +108,7 @@
     return pause_histogram_.Sum();
   }
 
-  uint64_t GetTotalFreedBytes() const {
+  int64_t GetTotalFreedBytes() const {
     return total_freed_bytes_;
   }
 
@@ -141,6 +141,11 @@
   // Revoke all the thread-local buffers.
   virtual void RevokeAllThreadLocalBuffers() = 0;
 
+  // Record that you have freed some objects or large objects, calls Heap::RecordFree.
+  // TODO: These are not thread safe, add a lock if we get have parallel sweeping.
+  void RecordFree(uint64_t freed_objects, int64_t freed_bytes);
+  void RecordFreeLargeObjects(uint64_t freed_objects, int64_t freed_bytes);
+
   static constexpr size_t kPauseBucketSize = 500;
   static constexpr size_t kPauseBucketCount = 32;
 
@@ -158,13 +163,14 @@
   Histogram<uint64_t> pause_histogram_;
   uint64_t total_time_ns_;
   uint64_t total_freed_objects_;
-  uint64_t total_freed_bytes_;
+  int64_t total_freed_bytes_;
 
-  // Single GC statitstics.
-  AtomicInteger freed_bytes_;
-  AtomicInteger freed_large_object_bytes_;
-  AtomicInteger freed_objects_;
-  AtomicInteger freed_large_objects_;
+  // Single GC statitstics, freed bytes are signed since the GC can free negative bytes if it
+  // promotes objects to a space which has a larger allocation size.
+  int64_t freed_bytes_;
+  int64_t freed_large_object_bytes_;
+  uint64_t freed_objects_;
+  uint64_t freed_large_objects_;
 
   CumulativeLogger cumulative_timings_;
 
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index e225d5a..b8051c9 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -1116,13 +1116,10 @@
   timings_.EndSplit();
 
   timings_.StartSplit("RecordFree");
-  VLOG(heap) << "Freed " << freed_objects << "/" << count
-             << " objects with size " << PrettySize(freed_bytes);
-  heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes + freed_large_object_bytes);
-  freed_objects_.FetchAndAdd(freed_objects);
-  freed_large_objects_.FetchAndAdd(freed_large_objects);
-  freed_bytes_.FetchAndAdd(freed_bytes);
-  freed_large_object_bytes_.FetchAndAdd(freed_large_object_bytes);
+  VLOG(heap) << "Freed " << freed_objects << "/" << count << " objects with size "
+             << PrettySize(freed_bytes);
+  RecordFree(freed_objects, freed_bytes);
+  RecordFreeLargeObjects(freed_large_objects, freed_large_object_bytes);
   timings_.EndSplit();
 
   timings_.StartSplit("ResetStack");
@@ -1150,9 +1147,7 @@
       size_t freed_objects = 0;
       size_t freed_bytes = 0;
       alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
-      heap_->RecordFree(freed_objects, freed_bytes);
-      freed_objects_.FetchAndAdd(freed_objects);
-      freed_bytes_.FetchAndAdd(freed_bytes);
+      RecordFree(freed_objects, freed_bytes);
     }
   }
   SweepLargeObjects(swap_bitmaps);
@@ -1163,9 +1158,7 @@
   size_t freed_objects = 0;
   size_t freed_bytes = 0;
   heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
-  freed_large_objects_.FetchAndAdd(freed_objects);
-  freed_large_object_bytes_.FetchAndAdd(freed_bytes);
-  heap_->RecordFree(freed_objects, freed_bytes);
+  RecordFreeLargeObjects(freed_objects, freed_bytes);
 }
 
 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index b53ee10..f5d6299 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -262,27 +262,18 @@
   // before they are properly counted.
   RevokeAllThreadLocalBuffers();
   // Record freed memory.
-  uint64_t from_bytes = from_space_->GetBytesAllocated();
-  uint64_t to_bytes = bytes_moved_;
-  uint64_t from_objects = from_space_->GetObjectsAllocated();
-  uint64_t to_objects = objects_moved_;
+  const int64_t from_bytes = from_space_->GetBytesAllocated();
+  const int64_t to_bytes = bytes_moved_;
+  const uint64_t from_objects = from_space_->GetObjectsAllocated();
+  const uint64_t to_objects = objects_moved_;
   CHECK_LE(to_objects, from_objects);
-  int64_t freed_bytes = from_bytes - to_bytes;
-  int64_t freed_objects = from_objects - to_objects;
-  freed_bytes_.FetchAndAdd(freed_bytes);
-  freed_objects_.FetchAndAdd(freed_objects);
   // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed
   // space.
-  heap_->RecordFree(freed_objects, freed_bytes);
-
+  RecordFree(from_objects - to_objects, from_bytes - to_bytes);
   // Clear and protect the from space.
   from_space_->Clear();
-  VLOG(heap) << "Protecting space " << *from_space_;
-  if (kProtectFromSpace) {
-    from_space_->GetMemMap()->Protect(PROT_NONE);
-  } else {
-    from_space_->GetMemMap()->Protect(PROT_READ);
-  }
+  VLOG(heap) << "Protecting from_space_: " << *from_space_;
+  from_space_->GetMemMap()->Protect(kProtectFromSpace ? PROT_NONE : PROT_READ);
   if (swap_semi_spaces_) {
     heap_->SwapSemiSpaces();
   }
@@ -687,9 +678,7 @@
       size_t freed_objects = 0;
       size_t freed_bytes = 0;
       alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
-      heap_->RecordFree(freed_objects, freed_bytes);
-      freed_objects_.FetchAndAdd(freed_objects);
-      freed_bytes_.FetchAndAdd(freed_bytes);
+      RecordFree(freed_objects, freed_bytes);
     }
   }
   if (!is_large_object_space_immune_) {
@@ -703,9 +692,7 @@
   size_t freed_objects = 0;
   size_t freed_bytes = 0;
   heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
-  freed_large_objects_.FetchAndAdd(freed_objects);
-  freed_large_object_bytes_.FetchAndAdd(freed_bytes);
-  heap_->RecordFree(freed_objects, freed_bytes);
+  RecordFreeLargeObjects(freed_objects, freed_bytes);
 }
 
 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index dab668f..4484494 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1143,13 +1143,13 @@
   GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
 }
 
-void Heap::RecordFree(ssize_t freed_objects, ssize_t freed_bytes) {
+void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
   // Use signed comparison since freed bytes can be negative when background compaction foreground
   // transitions occurs. This is caused by the moving objects from a bump pointer space to a
   // free list backed space typically increasing memory footprint due to padding and binning.
-  DCHECK_LE(freed_bytes, static_cast<ssize_t>(num_bytes_allocated_.Load()));
-  DCHECK_GE(freed_objects, 0);
-  num_bytes_allocated_.FetchAndSub(freed_bytes);
+  DCHECK_LE(freed_bytes, static_cast<int64_t>(num_bytes_allocated_.Load()));
+  // Note: This relies on 2s complement for handling negative freed_bytes.
+  num_bytes_allocated_.FetchAndSub(static_cast<ssize_t>(freed_bytes));
   if (Runtime::Current()->HasStatsEnabled()) {
     RuntimeStats* thread_stats = Thread::Current()->GetStats();
     thread_stats->freed_objects += freed_objects;
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 7eafd34..7a9ef1e 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -358,7 +358,7 @@
 
   // Freed bytes can be negative in cases where we copy objects from a compacted space to a
   // free-list backed space.
-  void RecordFree(ssize_t freed_objects, ssize_t freed_bytes);
+  void RecordFree(uint64_t freed_objects, int64_t freed_bytes);
 
   // Must be called if a field of an Object in the heap changes, and before any GC safe-point.
   // The call is not needed if NULL is stored in the field.