Shared single GC iteration accounting for all GCs.

Previously, each garbage collector had data that was only used
during collection. Since only one collector can be running at any
given time, we can make this data be shared between all collectors.
This reduces memory usage since we don't need to have redundant
information for each GC types. Also reduced how much code is required
to sweep spaces.

Bug: 9969166
Change-Id: I31caf0ee4d572f75e0c66863fe7db12c08ae08e7
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index a17c36b..8622fd6 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -31,20 +31,36 @@
 namespace gc {
 namespace collector {
 
+Iteration::Iteration()
+    : duration_ns_(0), timings_("GC iteration timing logger", true, VLOG_IS_ON(heap)) {
+  Reset(kGcCauseBackground, false);  // Reset to some place holder values.
+}
+
+void Iteration::Reset(GcCause gc_cause, bool clear_soft_references) {
+  timings_.Reset();
+  pause_times_.clear();
+  duration_ns_ = 0;
+  clear_soft_references_ = clear_soft_references;
+  gc_cause_ = gc_cause;
+  freed_ = ObjectBytePair();
+  freed_los_ = ObjectBytePair();
+}
+
+uint64_t Iteration::GetEstimatedThroughput() const {
+  // Add 1ms to prevent possible division by 0.
+  return (static_cast<uint64_t>(freed_.bytes) * 1000) / (NsToMs(GetDurationNs()) + 1);
+}
+
 GarbageCollector::GarbageCollector(Heap* heap, const std::string& name)
     : heap_(heap),
       name_(name),
-      gc_cause_(kGcCauseForAlloc),
-      clear_soft_references_(false),
-      duration_ns_(0),
-      timings_(name_.c_str(), true, VLOG_IS_ON(heap)),
       pause_histogram_((name_ + " paused").c_str(), kPauseBucketSize, kPauseBucketCount),
       cumulative_timings_(name) {
   ResetCumulativeStatistics();
 }
 
 void GarbageCollector::RegisterPause(uint64_t nano_length) {
-  pause_times_.push_back(nano_length);
+  GetCurrentIteration()->pause_times_.push_back(nano_length);
 }
 
 void GarbageCollector::ResetCumulativeStatistics() {
@@ -59,32 +75,26 @@
   ATRACE_BEGIN(StringPrintf("%s %s GC", PrettyCause(gc_cause), GetName()).c_str());
   Thread* self = Thread::Current();
   uint64_t start_time = NanoTime();
-  timings_.Reset();
-  pause_times_.clear();
-  duration_ns_ = 0;
-  clear_soft_references_ = clear_soft_references;
-  gc_cause_ = gc_cause;
-  // Reset stats.
-  freed_bytes_ = 0;
-  freed_large_object_bytes_ = 0;
-  freed_objects_ = 0;
-  freed_large_objects_ = 0;
+  Iteration* current_iteration = GetCurrentIteration();
+  current_iteration->Reset(gc_cause, clear_soft_references);
   RunPhases();  // Run all the GC phases.
   // Add the current timings to the cumulative timings.
-  cumulative_timings_.AddLogger(timings_);
+  cumulative_timings_.AddLogger(*GetTimings());
   // Update cumulative statistics with how many bytes the GC iteration freed.
-  total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects();
-  total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes();
+  total_freed_objects_ += current_iteration->GetFreedObjects() +
+      current_iteration->GetFreedLargeObjects();
+  total_freed_bytes_ += current_iteration->GetFreedBytes() +
+      current_iteration->GetFreedLargeObjectBytes();
   uint64_t end_time = NanoTime();
-  duration_ns_ = end_time - start_time;
+  current_iteration->SetDurationNs(end_time - start_time);
   if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
     // The entire GC was paused, clear the fake pauses which might be in the pause times and add
     // the whole GC duration.
-    pause_times_.clear();
-    RegisterPause(duration_ns_);
+    current_iteration->pause_times_.clear();
+    RegisterPause(current_iteration->GetDurationNs());
   }
-  total_time_ns_ += GetDurationNs();
-  for (uint64_t pause_time : pause_times_) {
+  total_time_ns_ += current_iteration->GetDurationNs();
+  for (uint64_t pause_time : current_iteration->GetPauseTimes()) {
     pause_histogram_.AddValue(pause_time / 1000);
   }
   ATRACE_END();
@@ -125,23 +135,6 @@
   return (total_freed_bytes_ * 1000) / (NsToMs(GetCumulativeTimings().GetTotalNs()) + 1);
 }
 
-uint64_t GarbageCollector::GetEstimatedLastIterationThroughput() const {
-  // Add 1ms to prevent possible division by 0.
-  return (static_cast<uint64_t>(freed_bytes_) * 1000) / (NsToMs(GetDurationNs()) + 1);
-}
-
-void GarbageCollector::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
-  freed_objects_ += freed_objects;
-  freed_bytes_ += freed_bytes;
-  GetHeap()->RecordFree(freed_objects, freed_bytes);
-}
-
-void GarbageCollector::RecordFreeLargeObjects(uint64_t freed_objects, int64_t freed_bytes) {
-  freed_large_objects_ += freed_objects;
-  freed_large_object_bytes_ += freed_bytes;
-  GetHeap()->RecordFree(freed_objects, freed_bytes);
-}
-
 void GarbageCollector::ResetMeasurements() {
   cumulative_timings_.Reset();
   pause_histogram_.Reset();
@@ -160,6 +153,23 @@
   Runtime::Current()->GetThreadList()->ResumeAll();
 }
 
+// Returns the current GC iteration and assocated info.
+Iteration* GarbageCollector::GetCurrentIteration() {
+  return heap_->GetCurrentGcIteration();
+}
+const Iteration* GarbageCollector::GetCurrentIteration() const {
+  return heap_->GetCurrentGcIteration();
+}
+
+void GarbageCollector::RecordFree(const ObjectBytePair& freed) {
+  GetCurrentIteration()->freed_.Add(freed);
+  heap_->RecordFree(freed.objects, freed.bytes);
+}
+void GarbageCollector::RecordFreeLOS(const ObjectBytePair& freed) {
+  GetCurrentIteration()->freed_los_.Add(freed);
+  heap_->RecordFree(freed.objects, freed.bytes);
+}
+
 }  // namespace collector
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index f4f9dbb..885569e 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -33,6 +33,78 @@
 
 namespace collector {
 
+struct ObjectBytePair {
+  ObjectBytePair(uint64_t num_objects = 0, int64_t num_bytes = 0)
+      : objects(num_objects), bytes(num_bytes) {}
+  void Add(const ObjectBytePair& other) {
+    objects += other.objects;
+    bytes += other.bytes;
+  }
+  // Number of objects which were freed.
+  uint64_t objects;
+  // Freed bytes are signed since the GC can free negative bytes if it promotes objects to a space
+  // which has a larger allocation size.
+  int64_t bytes;
+};
+
+// A information related single garbage collector iteration. Since we only ever have one GC running
+// at any given time, we can have a single iteration info.
+class Iteration {
+ public:
+  Iteration();
+  // Returns how long the mutators were paused in nanoseconds.
+  const std::vector<uint64_t>& GetPauseTimes() const {
+    return pause_times_;
+  }
+  TimingLogger* GetTimings() {
+    return &timings_;
+  }
+  // Returns how long the GC took to complete in nanoseconds.
+  uint64_t GetDurationNs() const {
+    return duration_ns_;
+  }
+  int64_t GetFreedBytes() const {
+    return freed_.bytes;
+  }
+  int64_t GetFreedLargeObjectBytes() const {
+    return freed_los_.bytes;
+  }
+  uint64_t GetFreedObjects() const {
+    return freed_.objects;
+  }
+  uint64_t GetFreedLargeObjects() const {
+    return freed_los_.objects;
+  }
+  void Reset(GcCause gc_cause, bool clear_soft_references);
+  // Returns the estimated throughput of the iteration.
+  uint64_t GetEstimatedThroughput() const;
+  bool GetClearSoftReferences() const {
+    return clear_soft_references_;
+  }
+  void SetClearSoftReferences(bool clear_soft_references) {
+    clear_soft_references_ = clear_soft_references;
+  }
+  GcCause GetGcCause() const {
+    return gc_cause_;
+  }
+
+ private:
+  void SetDurationNs(uint64_t duration) {
+    duration_ns_ = duration;
+  }
+
+  GcCause gc_cause_;
+  bool clear_soft_references_;
+  uint64_t duration_ns_;
+  TimingLogger timings_;
+  ObjectBytePair freed_;
+  ObjectBytePair freed_los_;
+  std::vector<uint64_t> pause_times_;
+
+  friend class GarbageCollector;
+  DISALLOW_COPY_AND_ASSIGN(Iteration);
+};
+
 class GarbageCollector {
  public:
   class SCOPED_LOCKABLE ScopedPause {
@@ -62,22 +134,7 @@
   Heap* GetHeap() const {
     return heap_;
   }
-
-  // Returns how long the mutators were paused in nanoseconds.
-  const std::vector<uint64_t>& GetPauseTimes() const {
-    return pause_times_;
-  }
-
-  // Returns how long the GC took to complete in nanoseconds.
-  uint64_t GetDurationNs() const {
-    return duration_ns_;
-  }
-
   void RegisterPause(uint64_t nano_length);
-
-  TimingLogger& GetTimings() {
-    return timings_;
-  }
   const CumulativeLogger& GetCumulativeTimings() const {
     return cumulative_timings_;
   }
@@ -87,52 +144,36 @@
   // Swap the live and mark bitmaps of spaces that are active for the collector. For partial GC,
   // this is the allocation space, for full GC then we swap the zygote bitmaps too.
   void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
-  int64_t GetFreedBytes() const {
-    return freed_bytes_;
-  }
-
-  int64_t GetFreedLargeObjectBytes() const {
-    return freed_large_object_bytes_;
-  }
-
-  uint64_t GetFreedObjects() const {
-    return freed_objects_;
-  }
-
-  uint64_t GetFreedLargeObjects() const {
-    return freed_large_objects_;
-  }
-
   uint64_t GetTotalPausedTimeNs() const {
     return pause_histogram_.AdjustedSum();
   }
-
   int64_t GetTotalFreedBytes() const {
     return total_freed_bytes_;
   }
-
   uint64_t GetTotalFreedObjects() const {
     return total_freed_objects_;
   }
-
   const Histogram<uint64_t>& GetPauseHistogram() const {
     return pause_histogram_;
   }
-
   // Reset the cumulative timings and pause histogram.
   void ResetMeasurements();
-
   // Returns the estimated throughput in bytes / second.
   uint64_t GetEstimatedMeanThroughput() const;
-
-  // Returns the estimated throughput of the last GC iteration.
-  uint64_t GetEstimatedLastIterationThroughput() const;
-
   // Returns how many GC iterations have been run.
-  size_t GetIterations() const {
+  size_t NumberOfIterations() const {
     return GetCumulativeTimings().GetIterations();
   }
+  // Returns the current GC iteration and assocated info.
+  Iteration* GetCurrentIteration();
+  const Iteration* GetCurrentIteration() const;
+  TimingLogger* GetTimings() {
+    return &GetCurrentIteration()->timings_;
+  }
+  // Record a free of normal objects.
+  void RecordFree(const ObjectBytePair& freed);
+  // Record a free of large objects.
+  void RecordFreeLOS(const ObjectBytePair& freed);
 
  protected:
   // Run all of the GC phases.
@@ -141,40 +182,17 @@
   // Revoke all the thread-local buffers.
   virtual void RevokeAllThreadLocalBuffers() = 0;
 
-  // Record that you have freed some objects or large objects, calls Heap::RecordFree.
-  // TODO: These are not thread safe, add a lock if we get parallel sweeping.
-  void RecordFree(uint64_t freed_objects, int64_t freed_bytes);
-  void RecordFreeLargeObjects(uint64_t freed_objects, int64_t freed_bytes);
-
   static constexpr size_t kPauseBucketSize = 500;
   static constexpr size_t kPauseBucketCount = 32;
 
   Heap* const heap_;
-
   std::string name_;
-
-  GcCause gc_cause_;
-  bool clear_soft_references_;
-
-  uint64_t duration_ns_;
-  TimingLogger timings_;
-
   // Cumulative statistics.
   Histogram<uint64_t> pause_histogram_;
   uint64_t total_time_ns_;
   uint64_t total_freed_objects_;
   int64_t total_freed_bytes_;
-
-  // Single GC statitstics, freed bytes are signed since the GC can free negative bytes if it
-  // promotes objects to a space which has a larger allocation size.
-  int64_t freed_bytes_;
-  int64_t freed_large_object_bytes_;
-  uint64_t freed_objects_;
-  uint64_t freed_large_objects_;
-
   CumulativeLogger cumulative_timings_;
-
-  std::vector<uint64_t> pause_times_;
 };
 
 }  // namespace collector
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 595dc8f..ebd1738 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -57,7 +57,7 @@
 namespace collector {
 
 void MarkCompact::BindBitmaps() {
-  timings_.StartSplit("BindBitmaps");
+  GetTimings()->StartSplit("BindBitmaps");
   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
   // Mark all of the spaces we never collect as immune.
   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
@@ -66,7 +66,7 @@
       CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
     }
   }
-  timings_.EndSplit();
+  GetTimings()->EndSplit();
 }
 
 MarkCompact::MarkCompact(Heap* heap, const std::string& name_prefix)
@@ -120,7 +120,7 @@
 };
 
 void MarkCompact::CalculateObjectForwardingAddresses() {
-  timings_.NewSplit(__FUNCTION__);
+  GetTimings()->NewSplit(__FUNCTION__);
   // The bump pointer in the space where the next forwarding address will be.
   bump_pointer_ = reinterpret_cast<byte*>(space_->Begin());
   // Visit all the marked objects in the bitmap.
@@ -131,7 +131,7 @@
 }
 
 void MarkCompact::InitializePhase() {
-  TimingLogger::ScopedSplit split("InitializePhase", &timings_);
+  TimingLogger::ScopedSplit split("InitializePhase", GetTimings());
   mark_stack_ = heap_->GetMarkStack();
   DCHECK(mark_stack_ != nullptr);
   immune_region_.Reset();
@@ -143,11 +143,11 @@
 }
 
 void MarkCompact::ProcessReferences(Thread* self) {
-  TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
+  TimingLogger::ScopedSplit split("ProcessReferences", GetTimings());
   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
   heap_->GetReferenceProcessor()->ProcessReferences(
-      false, &timings_, clear_soft_references_, &HeapReferenceMarkedCallback, &MarkObjectCallback,
-      &ProcessMarkStackCallback, this);
+      false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
+      &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this);
 }
 
 class BitmapSetSlowPathVisitor {
@@ -195,18 +195,18 @@
   objects_with_lockword_.reset(accounting::ContinuousSpaceBitmap::Create(
       "objects with lock words", space_->Begin(), space_->Size()));
   CHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
-  TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
+  TimingLogger::ScopedSplit split("MarkingPhase", GetTimings());
   // Assume the cleared space is already empty.
   BindBitmaps();
   // Process dirty cards and add dirty cards to mod-union tables.
-  heap_->ProcessCards(timings_, false);
+  heap_->ProcessCards(GetTimings(), false);
   // Clear the whole card table since we can not Get any additional dirty cards during the
   // paused GC. This saves memory but only works for pause the world collectors.
-  timings_.NewSplit("ClearCardTable");
+  GetTimings()->NewSplit("ClearCardTable");
   heap_->GetCardTable()->ClearCardTable();
   // Need to do this before the checkpoint since we don't want any threads to add references to
   // the live stack during the recursive mark.
-  timings_.NewSplit("SwapStacks");
+  GetTimings()->NewSplit("SwapStacks");
   if (kUseThreadLocalAllocationStack) {
     heap_->RevokeAllThreadLocalAllocationStacks(self);
   }
@@ -227,11 +227,11 @@
   // Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
   // before they are properly counted.
   RevokeAllThreadLocalBuffers();
-  timings_.StartSplit("PreSweepingGcVerification");
+  GetTimings()->StartSplit("PreSweepingGcVerification");
   // Disabled due to an issue where we have objects in the bump pointer space which reference dead
   // objects.
   // heap_->PreSweepingGcVerification(this);
-  timings_.EndSplit();
+  GetTimings()->EndSplit();
 }
 
 void MarkCompact::UpdateAndMarkModUnion() {
@@ -243,8 +243,7 @@
         // TODO: Improve naming.
         TimingLogger::ScopedSplit split(
             space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
-                                     "UpdateAndMarkImageModUnionTable",
-                                     &timings_);
+                                     "UpdateAndMarkImageModUnionTable", GetTimings());
         table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
       }
     }
@@ -252,27 +251,28 @@
 }
 
 void MarkCompact::MarkReachableObjects() {
-  timings_.StartSplit("MarkStackAsLive");
+  GetTimings()->StartSplit("MarkStackAsLive");
   accounting::ObjectStack* live_stack = heap_->GetLiveStack();
   heap_->MarkAllocStackAsLive(live_stack);
   live_stack->Reset();
   // Recursively process the mark stack.
   ProcessMarkStack();
+  GetTimings()->EndSplit();
 }
 
 void MarkCompact::ReclaimPhase() {
-  TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
+  TimingLogger::ScopedSplit split("ReclaimPhase", GetTimings());
   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
   // Reclaim unmarked objects.
   Sweep(false);
   // Swap the live and mark bitmaps for each space which we modified space. This is an
   // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
   // bitmaps.
-  timings_.StartSplit("SwapBitmapsAndUnBindBitmaps");
+  GetTimings()->StartSplit("SwapBitmapsAndUnBindBitmaps");
   SwapBitmaps();
   GetHeap()->UnBindBitmaps();  // Unbind the live and mark bitmaps.
   Compact();
-  timings_.EndSplit();
+  GetTimings()->EndSplit();
 }
 
 void MarkCompact::ResizeMarkStack(size_t new_size) {
@@ -340,7 +340,7 @@
 };
 
 void MarkCompact::UpdateReferences() {
-  timings_.NewSplit(__FUNCTION__);
+  GetTimings()->NewSplit(__FUNCTION__);
   Runtime* runtime = Runtime::Current();
   // Update roots.
   runtime->VisitRoots(UpdateRootCallback, this);
@@ -353,7 +353,7 @@
       TimingLogger::ScopedSplit split(
           space->IsZygoteSpace() ? "UpdateZygoteModUnionTableReferences" :
                                    "UpdateImageModUnionTableReferences",
-                                   &timings_);
+                                   GetTimings());
       table->UpdateAndMarkReferences(&UpdateHeapReferenceCallback, this);
     } else {
       // No mod union table, so we need to scan the space using bitmap visit.
@@ -381,7 +381,7 @@
 }
 
 void MarkCompact::Compact() {
-  timings_.NewSplit(__FUNCTION__);
+  GetTimings()->NewSplit(__FUNCTION__);
   CalculateObjectForwardingAddresses();
   UpdateReferences();
   MoveObjects();
@@ -389,9 +389,9 @@
   int64_t objects_freed = space_->GetObjectsAllocated() - live_objects_in_space_;
   int64_t bytes_freed = reinterpret_cast<int64_t>(space_->End()) -
       reinterpret_cast<int64_t>(bump_pointer_);
-  timings_.NewSplit("RecordFree");
+  GetTimings()->NewSplit("RecordFree");
   space_->RecordFree(objects_freed, bytes_freed);
-  RecordFree(objects_freed, bytes_freed);
+  RecordFree(ObjectBytePair(objects_freed, bytes_freed));
   space_->SetEnd(bump_pointer_);
   // Need to zero out the memory we freed. TODO: Use madvise for pages.
   memset(bump_pointer_, 0, bytes_freed);
@@ -399,7 +399,7 @@
 
 // Marks all objects in the root set.
 void MarkCompact::MarkRoots() {
-  timings_.NewSplit("MarkRoots");
+  GetTimings()->NewSplit("MarkRoots");
   Runtime::Current()->VisitRoots(MarkRootCallback, this);
 }
 
@@ -483,9 +483,9 @@
 }
 
 void MarkCompact::SweepSystemWeaks() {
-  timings_.StartSplit("SweepSystemWeaks");
+  GetTimings()->StartSplit("SweepSystemWeaks");
   Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
-  timings_.EndSplit();
+  GetTimings()->EndSplit();
 }
 
 bool MarkCompact::ShouldSweepSpace(space::ContinuousSpace* space) const {
@@ -523,7 +523,7 @@
 }
 
 void MarkCompact::MoveObjects() {
-  timings_.NewSplit(__FUNCTION__);
+  GetTimings()->NewSplit(__FUNCTION__);
   // Move the objects in the before forwarding bitmap.
   MoveObjectVisitor visitor(this);
   objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
@@ -534,7 +534,7 @@
 
 void MarkCompact::Sweep(bool swap_bitmaps) {
   DCHECK(mark_stack_->IsEmpty());
-  TimingLogger::ScopedSplit split("Sweep", &timings_);
+  TimingLogger::ScopedSplit split("Sweep", GetTimings());
   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
     if (space->IsContinuousMemMapAllocSpace()) {
       space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
@@ -542,22 +542,16 @@
         continue;
       }
       TimingLogger::ScopedSplit split(
-          alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_);
-      size_t freed_objects = 0;
-      size_t freed_bytes = 0;
-      alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
-      RecordFree(freed_objects, freed_bytes);
+          alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
+      RecordFree(alloc_space->Sweep(swap_bitmaps));
     }
   }
   SweepLargeObjects(swap_bitmaps);
 }
 
 void MarkCompact::SweepLargeObjects(bool swap_bitmaps) {
-  TimingLogger::ScopedSplit split("SweepLargeObjects", &timings_);
-  size_t freed_objects = 0;
-  size_t freed_bytes = 0;
-  heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
-  RecordFreeLargeObjects(freed_objects, freed_bytes);
+  TimingLogger::ScopedSplit split("SweepLargeObjects", GetTimings());
+  RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
 }
 
 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
@@ -596,13 +590,13 @@
 
 // Scan anything that's on the mark stack.
 void MarkCompact::ProcessMarkStack() {
-  timings_.StartSplit("ProcessMarkStack");
+  GetTimings()->StartSplit("ProcessMarkStack");
   while (!mark_stack_->IsEmpty()) {
     Object* obj = mark_stack_->PopBack();
     DCHECK(obj != nullptr);
     ScanObject(obj);
   }
-  timings_.EndSplit();
+  GetTimings()->EndSplit();
 }
 
 void MarkCompact::SetSpace(space::BumpPointerSpace* space) {
@@ -611,7 +605,7 @@
 }
 
 void MarkCompact::FinishPhase() {
-  TimingLogger::ScopedSplit split("FinishPhase", &timings_);
+  TimingLogger::ScopedSplit split("FinishPhase", GetTimings());
   space_ = nullptr;
   CHECK(mark_stack_->IsEmpty());
   mark_stack_->Reset();
@@ -624,9 +618,9 @@
 }
 
 void MarkCompact::RevokeAllThreadLocalBuffers() {
-  timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers");
+  GetTimings()->StartSplit("(Paused)RevokeAllThreadLocalBuffers");
   GetHeap()->RevokeAllThreadLocalBuffers();
-  timings_.EndSplit();
+  GetTimings()->EndSplit();
 }
 
 }  // namespace collector
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index fbb349e..d08796b 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -81,7 +81,7 @@
 static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true;
 
 void MarkSweep::BindBitmaps() {
-  timings_.StartSplit("BindBitmaps");
+  GetTimings()->StartSplit("BindBitmaps");
   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
   // Mark all of the spaces we never collect as immune.
   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
@@ -89,7 +89,7 @@
       CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
     }
   }
-  timings_.EndSplit();
+  GetTimings()->EndSplit();
 }
 
 MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
@@ -110,7 +110,7 @@
 }
 
 void MarkSweep::InitializePhase() {
-  TimingLogger::ScopedSplit split("InitializePhase", &timings_);
+  TimingLogger::ScopedSplit split("InitializePhase", GetTimings());
   mark_stack_ = heap_->GetMarkStack();
   DCHECK(mark_stack_ != nullptr);
   immune_region_.Reset();
@@ -132,9 +132,9 @@
     ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
     mark_bitmap_ = heap_->GetMarkBitmap();
   }
-  if (!clear_soft_references_) {
+  if (!GetCurrentIteration()->GetClearSoftReferences()) {
     // Always clear soft references if a non-sticky collection.
-    clear_soft_references_ = GetGcType() != collector::kGcTypeSticky;
+    GetCurrentIteration()->SetClearSoftReferences(GetGcType() != collector::kGcTypeSticky);
   }
 }
 
@@ -170,15 +170,15 @@
 }
 
 void MarkSweep::ProcessReferences(Thread* self) {
-  TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
+  TimingLogger::ScopedSplit split("ProcessReferences", GetTimings());
   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
   GetHeap()->GetReferenceProcessor()->ProcessReferences(
-      true, &timings_, clear_soft_references_, &HeapReferenceMarkedCallback, &MarkObjectCallback,
-      &ProcessMarkStackCallback, this);
+      true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
+      &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this);
 }
 
 void MarkSweep::PausePhase() {
-  TimingLogger::ScopedSplit split("(Paused)PausePhase", &timings_);
+  TimingLogger::ScopedSplit split("(Paused)PausePhase", GetTimings());
   Thread* self = Thread::Current();
   Locks::mutator_lock_->AssertExclusiveHeld(self);
   if (IsConcurrent()) {
@@ -190,7 +190,7 @@
     RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
   }
   {
-    TimingLogger::ScopedSplit split("SwapStacks", &timings_);
+    TimingLogger::ScopedSplit split("SwapStacks", GetTimings());
     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
     heap_->SwapStacks(self);
     live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
@@ -198,9 +198,9 @@
     // stacks and don't want anybody to allocate into the live stack.
     RevokeAllThreadLocalAllocationStacks(self);
   }
-  timings_.StartSplit("PreSweepingGcVerification");
+  GetTimings()->StartSplit("PreSweepingGcVerification");
   heap_->PreSweepingGcVerification(this);
-  timings_.EndSplit();
+  GetTimings()->EndSplit();
   // Disallow new system weaks to prevent a race which occurs when someone adds a new system
   // weak before we sweep them. Since this new system weak may not be marked, the GC may
   // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
@@ -217,7 +217,7 @@
     Thread* self = Thread::Current();
     CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
     // Process dirty cards and add dirty cards to mod union tables, also ages cards.
-    heap_->ProcessCards(timings_, false);
+    heap_->ProcessCards(GetTimings(), false);
     // The checkpoint root marking is required to avoid a race condition which occurs if the
     // following happens during a reference write:
     // 1. mutator dirties the card (write barrier)
@@ -243,22 +243,19 @@
 
 void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) {
   if (kUseThreadLocalAllocationStack) {
-    timings_.NewSplit("RevokeAllThreadLocalAllocationStacks");
+    GetTimings()->NewSplit("RevokeAllThreadLocalAllocationStacks");
     Locks::mutator_lock_->AssertExclusiveHeld(self);
     heap_->RevokeAllThreadLocalAllocationStacks(self);
   }
 }
 
 void MarkSweep::MarkingPhase() {
-  TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
+  TimingLogger::ScopedSplit split("MarkingPhase", GetTimings());
   Thread* self = Thread::Current();
-
   BindBitmaps();
   FindDefaultSpaceBitmap();
-
   // Process dirty cards and add dirty cards to mod union tables.
-  heap_->ProcessCards(timings_, false);
-
+  heap_->ProcessCards(GetTimings(), false);
   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
   MarkRoots(self);
   MarkReachableObjects();
@@ -271,7 +268,7 @@
     if (immune_region_.ContainsSpace(space)) {
       const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
           "UpdateAndMarkImageModUnionTable";
-      TimingLogger::ScopedSplit split(name, &timings_);
+      TimingLogger::ScopedSplit split(name, GetTimings());
       accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
       CHECK(mod_union_table != nullptr);
       mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
@@ -286,7 +283,7 @@
 }
 
 void MarkSweep::ReclaimPhase() {
-  TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
+  TimingLogger::ScopedSplit split("ReclaimPhase", GetTimings());
   Thread* self = Thread::Current();
   // Process the references concurrently.
   ProcessReferences(self);
@@ -301,18 +298,18 @@
     // Swap the live and mark bitmaps for each space which we modified space. This is an
     // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
     // bitmaps.
-    timings_.StartSplit("SwapBitmaps");
+    GetTimings()->StartSplit("SwapBitmaps");
     SwapBitmaps();
-    timings_.EndSplit();
+    GetTimings()->EndSplit();
 
     // Unbind the live and mark bitmaps.
-    TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
+    TimingLogger::ScopedSplit split("UnBindBitmaps", GetTimings());
     GetHeap()->UnBindBitmaps();
   }
 }
 
 void MarkSweep::FindDefaultSpaceBitmap() {
-  TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
+  TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", GetTimings());
   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
     accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap();
     // We want to have the main space instead of non moving if possible.
@@ -511,9 +508,9 @@
 void MarkSweep::MarkRoots(Thread* self) {
   if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
     // If we exclusively hold the mutator lock, all threads must be suspended.
-    timings_.StartSplit("MarkRoots");
+    GetTimings()->StartSplit("MarkRoots");
     Runtime::Current()->VisitRoots(MarkRootCallback, this);
-    timings_.EndSplit();
+    GetTimings()->EndSplit();
     RevokeAllThreadLocalAllocationStacks(self);
   } else {
     MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint);
@@ -525,16 +522,16 @@
 }
 
 void MarkSweep::MarkNonThreadRoots() {
-  timings_.StartSplit("MarkNonThreadRoots");
+  GetTimings()->StartSplit("MarkNonThreadRoots");
   Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this);
-  timings_.EndSplit();
+  GetTimings()->EndSplit();
 }
 
 void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
-  timings_.StartSplit("MarkConcurrentRoots");
+  GetTimings()->StartSplit("MarkConcurrentRoots");
   // Visit all runtime roots and clear dirty flags.
   Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, flags);
-  timings_.EndSplit();
+  GetTimings()->EndSplit();
 }
 
 class ScanObjectVisitor {
@@ -755,7 +752,7 @@
     Thread* self = Thread::Current();
     // Can't have a different split for each space since multiple spaces can have their cards being
     // scanned at the same time.
-    timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects");
+    GetTimings()->StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects");
     // Try to take some of the mark stack since we can pass this off to the worker tasks.
     Object** mark_stack_begin = mark_stack_->Begin();
     Object** mark_stack_end = mark_stack_->End();
@@ -808,28 +805,28 @@
     thread_pool->StartWorkers(self);
     thread_pool->Wait(self, true, true);
     thread_pool->StopWorkers(self);
-    timings_.EndSplit();
+    GetTimings()->EndSplit();
   } else {
     for (const auto& space : GetHeap()->GetContinuousSpaces()) {
       if (space->GetMarkBitmap() != nullptr) {
         // Image spaces are handled properly since live == marked for them.
         switch (space->GetGcRetentionPolicy()) {
           case space::kGcRetentionPolicyNeverCollect:
-            timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" :
+            GetTimings()->StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" :
                 "ScanGrayImageSpaceObjects");
             break;
           case space::kGcRetentionPolicyFullCollect:
-            timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" :
+            GetTimings()->StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" :
                 "ScanGrayZygoteSpaceObjects");
             break;
           case space::kGcRetentionPolicyAlwaysCollect:
-            timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" :
+            GetTimings()->StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" :
                 "ScanGrayAllocSpaceObjects");
             break;
           }
         ScanObjectVisitor visitor(this);
         card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age);
-        timings_.EndSplit();
+        GetTimings()->EndSplit();
       }
     }
   }
@@ -866,7 +863,7 @@
 // Populates the mark stack based on the set of marked objects and
 // recursively marks until the mark stack is emptied.
 void MarkSweep::RecursiveMark() {
-  TimingLogger::ScopedSplit split("RecursiveMark", &timings_);
+  TimingLogger::ScopedSplit split("RecursiveMark", GetTimings());
   // RecursiveMark will build the lists of known instances of the Reference classes. See
   // DelayReferenceReferent for details.
   if (kUseRecursiveMark) {
@@ -934,24 +931,24 @@
 
 void MarkSweep::ReMarkRoots() {
   Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
-  timings_.StartSplit("(Paused)ReMarkRoots");
+  GetTimings()->StartSplit("(Paused)ReMarkRoots");
   Runtime::Current()->VisitRoots(
       MarkRootCallback, this, static_cast<VisitRootFlags>(kVisitRootFlagNewRoots |
                                                           kVisitRootFlagStopLoggingNewRoots |
                                                           kVisitRootFlagClearRootLog));
-  timings_.EndSplit();
+  GetTimings()->EndSplit();
   if (kVerifyRootsMarked) {
-    timings_.StartSplit("(Paused)VerifyRoots");
+    GetTimings()->StartSplit("(Paused)VerifyRoots");
     Runtime::Current()->VisitRoots(VerifyRootMarked, this);
-    timings_.EndSplit();
+    GetTimings()->EndSplit();
   }
 }
 
 void MarkSweep::SweepSystemWeaks(Thread* self) {
   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-  timings_.StartSplit("SweepSystemWeaks");
+  GetTimings()->StartSplit("SweepSystemWeaks");
   Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
-  timings_.EndSplit();
+  GetTimings()->EndSplit();
 }
 
 mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) {
@@ -1009,7 +1006,7 @@
 void MarkSweep::MarkRootsCheckpoint(Thread* self,
                                     bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
   CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint);
-  timings_.StartSplit("MarkRootsCheckpoint");
+  GetTimings()->StartSplit("MarkRootsCheckpoint");
   ThreadList* thread_list = Runtime::Current()->GetThreadList();
   // Request the check point is run on all threads returning a count of the threads that must
   // run through the barrier including self.
@@ -1024,19 +1021,17 @@
   }
   Locks::mutator_lock_->SharedLock(self);
   Locks::heap_bitmap_lock_->ExclusiveLock(self);
-  timings_.EndSplit();
+  GetTimings()->EndSplit();
 }
 
 void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
-  timings_.StartSplit("SweepArray");
+  GetTimings()->StartSplit("SweepArray");
   Thread* self = Thread::Current();
   mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>(
       sweep_array_free_buffer_mem_map_->BaseBegin());
   size_t chunk_free_pos = 0;
-  size_t freed_bytes = 0;
-  size_t freed_large_object_bytes = 0;
-  size_t freed_objects = 0;
-  size_t freed_large_objects = 0;
+  ObjectBytePair freed;
+  ObjectBytePair freed_los;
   // How many objects are left in the array, modified after each space is swept.
   Object** objects = allocations->Begin();
   size_t count = allocations->Size();
@@ -1077,10 +1072,10 @@
         // if needed.
         if (!mark_bitmap->Test(obj)) {
           if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
-            timings_.StartSplit("FreeList");
-            freed_objects += chunk_free_pos;
-            freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
-            timings_.EndSplit();
+            GetTimings()->StartSplit("FreeList");
+            freed.objects += chunk_free_pos;
+            freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
+            GetTimings()->EndSplit();
             chunk_free_pos = 0;
           }
           chunk_free_buffer[chunk_free_pos++] = obj;
@@ -1090,10 +1085,10 @@
       }
     }
     if (chunk_free_pos > 0) {
-      timings_.StartSplit("FreeList");
-      freed_objects += chunk_free_pos;
-      freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
-      timings_.EndSplit();
+      GetTimings()->StartSplit("FreeList");
+      freed.objects += chunk_free_pos;
+      freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
+      GetTimings()->EndSplit();
       chunk_free_pos = 0;
     }
     // All of the references which space contained are no longer in the allocation stack, update
@@ -1114,23 +1109,16 @@
       continue;
     }
     if (!large_mark_objects->Test(obj)) {
-      ++freed_large_objects;
-      freed_large_object_bytes += large_object_space->Free(self, obj);
+      ++freed_los.objects;
+      freed_los.bytes += large_object_space->Free(self, obj);
     }
   }
-  timings_.EndSplit();
-
-  timings_.StartSplit("RecordFree");
-  VLOG(heap) << "Freed " << freed_objects << "/" << count << " objects with size "
-             << PrettySize(freed_bytes);
-  RecordFree(freed_objects, freed_bytes);
-  RecordFreeLargeObjects(freed_large_objects, freed_large_object_bytes);
-  timings_.EndSplit();
-
-  timings_.StartSplit("ResetStack");
+  GetTimings()->NewSplit("RecordFree");
+  RecordFree(freed);
+  RecordFreeLOS(freed_los);
+  GetTimings()->NewSplit("ResetStack");
   allocations->Reset();
-  timings_.EndSplit();
-
+  GetTimings()->EndSplit();
   sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero();
 }
 
@@ -1139,33 +1127,27 @@
   CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
   // Mark everything allocated since the last as GC live so that we can sweep concurrently,
   // knowing that new allocations won't be marked as live.
-  timings_.StartSplit("MarkStackAsLive");
+  GetTimings()->StartSplit("MarkStackAsLive");
   accounting::ObjectStack* live_stack = heap_->GetLiveStack();
   heap_->MarkAllocStackAsLive(live_stack);
   live_stack->Reset();
-  timings_.EndSplit();
+  GetTimings()->EndSplit();
 
   DCHECK(mark_stack_->IsEmpty());
   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
     if (space->IsContinuousMemMapAllocSpace()) {
       space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
       TimingLogger::ScopedSplit split(
-          alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", &timings_);
-      size_t freed_objects = 0;
-      size_t freed_bytes = 0;
-      alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
-      RecordFree(freed_objects, freed_bytes);
+          alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", GetTimings());
+      RecordFree(alloc_space->Sweep(swap_bitmaps));
     }
   }
   SweepLargeObjects(swap_bitmaps);
 }
 
 void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
-  TimingLogger::ScopedSplit split("SweepLargeObjects", &timings_);
-  size_t freed_objects = 0;
-  size_t freed_bytes = 0;
-  heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
-  RecordFreeLargeObjects(freed_objects, freed_bytes);
+  TimingLogger::ScopedSplit split("SweepLargeObjects", GetTimings());
+  RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
 }
 
 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
@@ -1233,7 +1215,7 @@
 
 // Scan anything that's on the mark stack.
 void MarkSweep::ProcessMarkStack(bool paused) {
-  timings_.StartSplit(paused ? "(Paused)ProcessMarkStack" : "ProcessMarkStack");
+  GetTimings()->StartSplit(paused ? "(Paused)ProcessMarkStack" : "ProcessMarkStack");
   size_t thread_count = GetThreadCount(paused);
   if (kParallelProcessMarkStack && thread_count > 1 &&
       mark_stack_->Size() >= kMinimumParallelMarkStackSize) {
@@ -1266,7 +1248,7 @@
       ScanObject(obj);
     }
   }
-  timings_.EndSplit();
+  GetTimings()->EndSplit();
 }
 
 inline bool MarkSweep::IsMarked(const Object* object) const {
@@ -1280,7 +1262,7 @@
 }
 
 void MarkSweep::FinishPhase() {
-  TimingLogger::ScopedSplit split("FinishPhase", &timings_);
+  TimingLogger::ScopedSplit split("FinishPhase", GetTimings());
   if (kCountScannedTypes) {
     VLOG(gc) << "MarkSweep scanned classes=" << class_count_.LoadRelaxed()
         << " arrays=" << array_count_.LoadRelaxed() << " other=" << other_count_.LoadRelaxed();
@@ -1317,9 +1299,9 @@
     // not be in use.
     GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
   } else {
-    timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers");
+    GetTimings()->StartSplit("(Paused)RevokeAllThreadLocalBuffers");
     GetHeap()->RevokeAllThreadLocalBuffers();
-    timings_.EndSplit();
+    GetTimings()->EndSplit();
   }
 }
 
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 54e77a7..8a3ac9d 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -59,7 +59,7 @@
 static constexpr size_t kLargeObjectBytesAllocatedThreshold = 16 * MB;
 
 void SemiSpace::BindBitmaps() {
-  timings_.StartSplit("BindBitmaps");
+  GetTimings()->StartSplit("BindBitmaps");
   WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
   // Mark all of the spaces we never collect as immune.
   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
@@ -83,7 +83,7 @@
     // We won't collect the large object space if a bump pointer space only collection.
     is_large_object_space_immune_ = true;
   }
-  timings_.EndSplit();
+  GetTimings()->EndSplit();
 }
 
 SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix)
@@ -131,7 +131,7 @@
 }
 
 void SemiSpace::InitializePhase() {
-  TimingLogger::ScopedSplit split("InitializePhase", &timings_);
+  TimingLogger::ScopedSplit split("InitializePhase", GetTimings());
   mark_stack_ = heap_->GetMarkStack();
   DCHECK(mark_stack_ != nullptr);
   immune_region_.Reset();
@@ -151,11 +151,11 @@
 }
 
 void SemiSpace::ProcessReferences(Thread* self) {
-  TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
+  TimingLogger::ScopedSplit split("ProcessReferences", GetTimings());
   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
   GetHeap()->GetReferenceProcessor()->ProcessReferences(
-      false, &timings_, clear_soft_references_, &HeapReferenceMarkedCallback,
-      &MarkObjectCallback, &ProcessMarkStackCallback, this);
+      false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
+      &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this);
 }
 
 void SemiSpace::MarkingPhase() {
@@ -176,8 +176,9 @@
   // to prevent fragmentation.
   RevokeAllThreadLocalBuffers();
   if (generational_) {
-    if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc ||
-        clear_soft_references_) {
+    if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
+        GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
+        GetCurrentIteration()->GetClearSoftReferences()) {
       // If an explicit, native allocation-triggered, or last attempt
       // collection, collect the whole heap.
       whole_heap_collection_ = true;
@@ -191,21 +192,15 @@
     }
   }
 
-  if (!clear_soft_references_) {
-    if (!generational_) {
-      // If non-generational, always clear soft references.
-      clear_soft_references_ = true;
-    } else {
-      // If generational, clear soft references if a whole heap collection.
-      if (whole_heap_collection_) {
-        clear_soft_references_ = true;
-      }
-    }
+  if (!generational_ || whole_heap_collection_) {
+    // If non-generational, always clear soft references.
+    // If generational, clear soft references if a whole heap collection.
+    GetCurrentIteration()->SetClearSoftReferences(true);
   }
 
   Locks::mutator_lock_->AssertExclusiveHeld(self_);
 
-  TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
+  TimingLogger::ScopedSplit split("MarkingPhase", GetTimings());
   if (generational_) {
     // If last_gc_to_space_end_ is out of the bounds of the from-space
     // (the to-space from last GC), then point it to the beginning of
@@ -220,14 +215,14 @@
   // Assume the cleared space is already empty.
   BindBitmaps();
   // Process dirty cards and add dirty cards to mod-union tables.
-  heap_->ProcessCards(timings_, kUseRememberedSet && generational_);
+  heap_->ProcessCards(GetTimings(), kUseRememberedSet && generational_);
   // Clear the whole card table since we can not Get any additional dirty cards during the
   // paused GC. This saves memory but only works for pause the world collectors.
-  timings_.NewSplit("ClearCardTable");
+  GetTimings()->NewSplit("ClearCardTable");
   heap_->GetCardTable()->ClearCardTable();
   // Need to do this before the checkpoint since we don't want any threads to add references to
   // the live stack during the recursive mark.
-  timings_.NewSplit("SwapStacks");
+  GetTimings()->NewSplit("SwapStacks");
   if (kUseThreadLocalAllocationStack) {
     heap_->RevokeAllThreadLocalAllocationStacks(self_);
   }
@@ -245,7 +240,7 @@
     ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_);
     SweepSystemWeaks();
   }
-  timings_.NewSplit("RecordFree");
+  GetTimings()->NewSplit("RecordFree");
   // Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
   // before they are properly counted.
   RevokeAllThreadLocalBuffers();
@@ -257,14 +252,14 @@
   CHECK_LE(to_objects, from_objects);
   // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed
   // space.
-  RecordFree(from_objects - to_objects, from_bytes - to_bytes);
+  RecordFree(ObjectBytePair(from_objects - to_objects, from_bytes - to_bytes));
   // Clear and protect the from space.
   from_space_->Clear();
   VLOG(heap) << "Protecting from_space_: " << *from_space_;
   from_space_->GetMemMap()->Protect(kProtectFromSpace ? PROT_NONE : PROT_READ);
-  timings_.StartSplit("PreSweepingGcVerification");
+  GetTimings()->StartSplit("PreSweepingGcVerification");
   heap_->PreSweepingGcVerification(this);
-  timings_.EndSplit();
+  GetTimings()->EndSplit();
   if (swap_semi_spaces_) {
     heap_->SwapSemiSpaces();
   }
@@ -280,7 +275,7 @@
         TimingLogger::ScopedSplit split(
             space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
                                      "UpdateAndMarkImageModUnionTable",
-                                     &timings_);
+                                     GetTimings());
         table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
       } else if (heap_->FindRememberedSetFromSpace(space) != nullptr) {
         DCHECK(kUseRememberedSet);
@@ -359,12 +354,12 @@
 };
 
 void SemiSpace::MarkReachableObjects() {
-  timings_.StartSplit("MarkStackAsLive");
+  GetTimings()->StartSplit("MarkStackAsLive");
   accounting::ObjectStack* live_stack = heap_->GetLiveStack();
   heap_->MarkAllocStackAsLive(live_stack);
   live_stack->Reset();
 
-  timings_.NewSplit("UpdateAndMarkRememberedSets");
+  GetTimings()->NewSplit("UpdateAndMarkRememberedSets");
   for (auto& space : heap_->GetContinuousSpaces()) {
     // If the space is immune and has no mod union table (the
     // non-moving space when the bump pointer space only collection is
@@ -403,7 +398,7 @@
   }
 
   if (is_large_object_space_immune_) {
-    timings_.NewSplit("VisitLargeObjects");
+    GetTimings()->NewSplit("VisitLargeObjects");
     DCHECK(generational_ && !whole_heap_collection_);
     // Delay copying the live set to the marked set until here from
     // BindBitmaps() as the large objects on the allocation stack may
@@ -421,13 +416,13 @@
                                         reinterpret_cast<uintptr_t>(large_object_space->End()),
                                         visitor);
   }
-  timings_.EndSplit();
+  GetTimings()->EndSplit();
   // Recursively process the mark stack.
   ProcessMarkStack();
 }
 
 void SemiSpace::ReclaimPhase() {
-  TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
+  TimingLogger::ScopedSplit split("ReclaimPhase", GetTimings());
   {
     WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
     // Reclaim unmarked objects.
@@ -435,11 +430,11 @@
     // Swap the live and mark bitmaps for each space which we modified space. This is an
     // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
     // bitmaps.
-    timings_.StartSplit("SwapBitmaps");
+    GetTimings()->StartSplit("SwapBitmaps");
     SwapBitmaps();
-    timings_.EndSplit();
+    GetTimings()->EndSplit();
     // Unbind the live and mark bitmaps.
-    TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
+    TimingLogger::ScopedSplit split("UnBindBitmaps", GetTimings());
     GetHeap()->UnBindBitmaps();
   }
   if (saved_bytes_ > 0) {
@@ -634,7 +629,7 @@
 
 // Marks all objects in the root set.
 void SemiSpace::MarkRoots() {
-  timings_.NewSplit("MarkRoots");
+  GetTimings()->NewSplit("MarkRoots");
   // TODO: Visit up image roots as well?
   Runtime::Current()->VisitRoots(MarkRootCallback, this);
 }
@@ -660,9 +655,9 @@
 }
 
 void SemiSpace::SweepSystemWeaks() {
-  timings_.StartSplit("SweepSystemWeaks");
+  GetTimings()->StartSplit("SweepSystemWeaks");
   Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this);
-  timings_.EndSplit();
+  GetTimings()->EndSplit();
 }
 
 bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
@@ -671,7 +666,7 @@
 
 void SemiSpace::Sweep(bool swap_bitmaps) {
   DCHECK(mark_stack_->IsEmpty());
-  TimingLogger::ScopedSplit split("Sweep", &timings_);
+  TimingLogger::ScopedSplit split("Sweep", GetTimings());
   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
     if (space->IsContinuousMemMapAllocSpace()) {
       space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
@@ -679,11 +674,8 @@
         continue;
       }
       TimingLogger::ScopedSplit split(
-          alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_);
-      size_t freed_objects = 0;
-      size_t freed_bytes = 0;
-      alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
-      RecordFree(freed_objects, freed_bytes);
+          alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
+      RecordFree(alloc_space->Sweep(swap_bitmaps));
     }
   }
   if (!is_large_object_space_immune_) {
@@ -693,11 +685,8 @@
 
 void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
   DCHECK(!is_large_object_space_immune_);
-  TimingLogger::ScopedSplit split("SweepLargeObjects", &timings_);
-  size_t freed_objects = 0;
-  size_t freed_bytes = 0;
-  heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
-  RecordFreeLargeObjects(freed_objects, freed_bytes);
+  TimingLogger::ScopedSplit split("SweepLargeObjects", GetTimings());
+  RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
 }
 
 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
@@ -750,7 +739,7 @@
     DCHECK(mark_bitmap != nullptr);
     DCHECK_EQ(live_bitmap, mark_bitmap);
   }
-  timings_.StartSplit("ProcessMarkStack");
+  GetTimings()->StartSplit("ProcessMarkStack");
   while (!mark_stack_->IsEmpty()) {
     Object* obj = mark_stack_->PopBack();
     if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) {
@@ -761,7 +750,7 @@
     }
     ScanObject(obj);
   }
-  timings_.EndSplit();
+  GetTimings()->EndSplit();
 }
 
 inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
@@ -792,7 +781,7 @@
 }
 
 void SemiSpace::FinishPhase() {
-  TimingLogger::ScopedSplit split("FinishPhase", &timings_);
+  TimingLogger::ScopedSplit split("FinishPhase", GetTimings());
   // Null the "to" and "from" spaces since compacting from one to the other isn't valid until
   // further action is done by the heap.
   to_space_ = nullptr;
@@ -833,9 +822,9 @@
 }
 
 void SemiSpace::RevokeAllThreadLocalBuffers() {
-  timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers");
+  GetTimings()->StartSplit("(Paused)RevokeAllThreadLocalBuffers");
   GetHeap()->RevokeAllThreadLocalBuffers();
-  timings_.EndSplit();
+  GetTimings()->EndSplit();
 }
 
 }  // namespace collector
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 1c94d6f..6c63e5f 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1643,8 +1643,8 @@
     if (temp_space_ != nullptr) {
       CHECK(temp_space_->IsEmpty());
     }
-    total_objects_freed_ever_ += semi_space_collector_->GetFreedObjects();
-    total_bytes_freed_ever_ += semi_space_collector_->GetFreedBytes();
+    total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
+    total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
     // Update the end and write out image.
     non_moving_space_->SetEnd(target_space.End());
     non_moving_space_->SetLimit(target_space.Limit());
@@ -1838,15 +1838,15 @@
       << "Could not find garbage collector with collector_type="
       << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
   collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
-  total_objects_freed_ever_ += collector->GetFreedObjects();
-  total_bytes_freed_ever_ += collector->GetFreedBytes();
+  total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
+  total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
   RequestHeapTrim();
   // Enqueue cleared references.
   reference_processor_.EnqueueClearedReferences(self);
   // Grow the heap so that we know when to perform the next GC.
   GrowForUtilization(collector);
-  const size_t duration = collector->GetDurationNs();
-  const std::vector<uint64_t>& pause_times = collector->GetPauseTimes();
+  const size_t duration = GetCurrentGcIteration()->GetDurationNs();
+  const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
   // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
   // (mutator time blocked >=  long_pause_log_threshold_).
   bool log_gc = gc_cause == kGcCauseExplicit;
@@ -1868,14 +1868,14 @@
                      << ((i != pause_times.size() - 1) ? "," : "");
     }
     LOG(INFO) << gc_cause << " " << collector->GetName()
-              << " GC freed "  << collector->GetFreedObjects() << "("
-              << PrettySize(collector->GetFreedBytes()) << ") AllocSpace objects, "
-              << collector->GetFreedLargeObjects() << "("
-              << PrettySize(collector->GetFreedLargeObjectBytes()) << ") LOS objects, "
+              << " GC freed "  << current_gc_iteration_.GetFreedObjects() << "("
+              << PrettySize(current_gc_iteration_.GetFreedBytes()) << ") AllocSpace objects, "
+              << current_gc_iteration_.GetFreedLargeObjects() << "("
+              << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, "
               << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
               << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
               << " total " << PrettyDuration((duration / 1000) * 1000);
-    VLOG(heap) << ConstDumpable<TimingLogger>(collector->GetTimings());
+    VLOG(heap) << ConstDumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
   }
   FinishGC(self, gc_type);
   // Inform DDMS that a GC completed.
@@ -2313,7 +2313,7 @@
   return it->second;
 }
 
-void Heap::ProcessCards(TimingLogger& timings, bool use_rem_sets) {
+void Heap::ProcessCards(TimingLogger* timings, bool use_rem_sets) {
   // Clear cards and keep track of cards cleared in the mod-union table.
   for (const auto& space : continuous_spaces_) {
     accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
@@ -2321,15 +2321,15 @@
     if (table != nullptr) {
       const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
           "ImageModUnionClearCards";
-      TimingLogger::ScopedSplit split(name, &timings);
+      TimingLogger::ScopedSplit split(name, timings);
       table->ClearCards();
     } else if (use_rem_sets && rem_set != nullptr) {
       DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS)
           << static_cast<int>(collector_type_);
-      TimingLogger::ScopedSplit split("AllocSpaceRemSetClearCards", &timings);
+      TimingLogger::ScopedSplit split("AllocSpaceRemSetClearCards", timings);
       rem_set->ClearCards();
     } else if (space->GetType() != space::kSpaceTypeBumpPointerSpace) {
-      TimingLogger::ScopedSplit split("AllocSpaceClearCards", &timings);
+      TimingLogger::ScopedSplit split("AllocSpaceClearCards", timings);
       // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards
       // were dirty before the GC started.
       // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
@@ -2337,7 +2337,8 @@
       // The races are we either end up with: Aged card, unaged card. Since we have the checkpoint
       // roots and then we scan / update mod union tables after. We will always scan either card.
       // If we end up with the non aged card, we scan it it in the pause.
-      card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), VoidFunctor());
+      card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(),
+                                     VoidFunctor());
     }
   }
 }
@@ -2347,7 +2348,7 @@
 
 void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
   Thread* const self = Thread::Current();
-  TimingLogger* const timings = &gc->GetTimings();
+  TimingLogger* const timings = current_gc_iteration_.GetTimings();
   if (verify_pre_gc_heap_) {
     TimingLogger::ScopedSplit split("PreGcVerifyHeapReferences", timings);
     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -2389,13 +2390,13 @@
 void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc) {
   // TODO: Add a new runtime option for this?
   if (verify_pre_gc_rosalloc_) {
-    RosAllocVerification(&gc->GetTimings(), "PreGcRosAllocVerification");
+    RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
   }
 }
 
 void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
   Thread* const self = Thread::Current();
-  TimingLogger* const timings = &gc->GetTimings();
+  TimingLogger* const timings = current_gc_iteration_.GetTimings();
   // Called before sweeping occurs since we want to make sure we are not going so reclaim any
   // reachable objects.
   if (verify_pre_sweeping_heap_) {
@@ -2421,7 +2422,7 @@
 void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
   // Only pause if we have to do some verification.
   Thread* const self = Thread::Current();
-  TimingLogger* const timings = &gc->GetTimings();
+  TimingLogger* const timings = GetCurrentGcIteration()->GetTimings();
   if (verify_system_weaks_) {
     ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
     collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
@@ -2575,9 +2576,9 @@
     // We also check that the bytes allocated aren't over the footprint limit in order to prevent a
     // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
     // if the sticky GC throughput always remained >= the full/partial throughput.
-    if (collector_ran->GetEstimatedLastIterationThroughput() * kStickyGcThroughputAdjustment >=
+    if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >=
         non_sticky_collector->GetEstimatedMeanThroughput() &&
-        non_sticky_collector->GetIterations() > 0 &&
+        non_sticky_collector->NumberOfIterations() > 0 &&
         bytes_allocated <= max_allowed_footprint_) {
       next_gc_type_ = collector::kGcTypeSticky;
     } else {
@@ -2595,7 +2596,7 @@
     if (IsGcConcurrent()) {
       // Calculate when to perform the next ConcurrentGC.
       // Calculate the estimated GC duration.
-      const double gc_duration_seconds = NsToMs(collector_ran->GetDurationNs()) / 1000.0;
+      const double gc_duration_seconds = NsToMs(current_gc_iteration_.GetDurationNs()) / 1000.0;
       // Estimate how many remaining bytes we will have when we need to start the next GC.
       size_t remaining_bytes = allocation_rate_ * gc_duration_seconds;
       remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 368a20c..a34cd38 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -27,6 +27,7 @@
 #include "gc/accounting/atomic_stack.h"
 #include "gc/accounting/card_table.h"
 #include "gc/gc_cause.h"
+#include "gc/collector/garbage_collector.h"
 #include "gc/collector/gc_type.h"
 #include "gc/collector_type.h"
 #include "globals.h"
@@ -317,6 +318,13 @@
     return discontinuous_spaces_;
   }
 
+  const collector::Iteration* GetCurrentGcIteration() const {
+    return &current_gc_iteration_;
+  }
+  collector::Iteration* GetCurrentGcIteration() {
+    return &current_gc_iteration_;
+  }
+
   // Enable verification of object references when the runtime is sufficiently initialized.
   void EnableObjectValidation() {
     verify_object_mode_ = kVerifyObjectSupport;
@@ -690,7 +698,7 @@
   void SwapStacks(Thread* self);
 
   // Clear cards and update the mod union table.
-  void ProcessCards(TimingLogger& timings, bool use_rem_sets);
+  void ProcessCards(TimingLogger* timings, bool use_rem_sets);
 
   // Signal the heap trim daemon that there is something to do, either a heap transition or heap
   // trim.
@@ -849,6 +857,9 @@
   // Data structure GC overhead.
   Atomic<size_t> gc_memory_overhead_;
 
+  // Info related to the current or previous GC iteration.
+  collector::Iteration current_gc_iteration_;
+
   // Heap verification flags.
   const bool verify_missing_card_marks_;
   const bool verify_system_weaks_;
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 54a63f0..abae8ff 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -411,28 +411,24 @@
       bitmap->Clear(ptrs[i]);
     }
   }
-  context->freed_objects += num_ptrs;
-  context->freed_bytes += space->FreeList(self, num_ptrs, ptrs);
+  context->freed.objects += num_ptrs;
+  context->freed.bytes += space->FreeList(self, num_ptrs, ptrs);
 }
 
-void LargeObjectSpace::Sweep(bool swap_bitmaps, size_t* out_freed_objects,
-                             size_t* out_freed_bytes) {
+collector::ObjectBytePair LargeObjectSpace::Sweep(bool swap_bitmaps) {
   if (Begin() >= End()) {
-    return;
+    return collector::ObjectBytePair(0, 0);
   }
   accounting::LargeObjectBitmap* live_bitmap = GetLiveBitmap();
   accounting::LargeObjectBitmap* mark_bitmap = GetMarkBitmap();
   if (swap_bitmaps) {
     std::swap(live_bitmap, mark_bitmap);
   }
-  DCHECK(out_freed_objects != nullptr);
-  DCHECK(out_freed_bytes != nullptr);
-  SweepCallbackContext scc(swap_bitmaps, this);
+  AllocSpace::SweepCallbackContext scc(swap_bitmaps, this);
   accounting::LargeObjectBitmap::SweepWalk(*live_bitmap, *mark_bitmap,
                                            reinterpret_cast<uintptr_t>(Begin()),
                                            reinterpret_cast<uintptr_t>(End()), SweepCallback, &scc);
-  *out_freed_objects += scc.freed_objects;
-  *out_freed_bytes += scc.freed_bytes;
+  return scc.freed;
 }
 
 }  // namespace space
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index a84b43a..01982d0 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -73,7 +73,7 @@
     return this;
   }
 
-  void Sweep(bool swap_bitmaps, size_t* out_freed_objects, size_t* out_freed_bytes);
+  collector::ObjectBytePair Sweep(bool swap_bitmaps);
 
   virtual bool CanMoveObjects() const OVERRIDE {
     return false;
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 57ed0bd..4d74f3c 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -242,8 +242,8 @@
   // Use a bulk free, that merges consecutive objects before freeing or free per object?
   // Documentation suggests better free performance with merging, but this may be at the expensive
   // of allocation.
-  context->freed_objects += num_ptrs;
-  context->freed_bytes += space->FreeList(self, num_ptrs, ptrs);
+  context->freed.objects += num_ptrs;
+  context->freed.bytes += space->FreeList(self, num_ptrs, ptrs);
 }
 
 }  // namespace space
diff --git a/runtime/gc/space/space.cc b/runtime/gc/space/space.cc
index 4e28416..bff28f6 100644
--- a/runtime/gc/space/space.cc
+++ b/runtime/gc/space/space.cc
@@ -81,14 +81,12 @@
   CHECK(mark_bitmap_.get() != nullptr);
 }
 
-void ContinuousMemMapAllocSpace::Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes) {
-  DCHECK(freed_objects != nullptr);
-  DCHECK(freed_bytes != nullptr);
+collector::ObjectBytePair ContinuousMemMapAllocSpace::Sweep(bool swap_bitmaps) {
   accounting::ContinuousSpaceBitmap* live_bitmap = GetLiveBitmap();
   accounting::ContinuousSpaceBitmap* mark_bitmap = GetMarkBitmap();
   // If the bitmaps are bound then sweeping this space clearly won't do anything.
   if (live_bitmap == mark_bitmap) {
-    return;
+    return collector::ObjectBytePair(0, 0);
   }
   SweepCallbackContext scc(swap_bitmaps, this);
   if (swap_bitmaps) {
@@ -98,8 +96,7 @@
   accounting::ContinuousSpaceBitmap::SweepWalk(
       *live_bitmap, *mark_bitmap, reinterpret_cast<uintptr_t>(Begin()),
       reinterpret_cast<uintptr_t>(End()), GetSweepCallback(), reinterpret_cast<void*>(&scc));
-  *freed_objects += scc.freed_objects;
-  *freed_bytes += scc.freed_bytes;
+  return scc.freed;
 }
 
 // Returns the old mark bitmap.
@@ -136,9 +133,8 @@
   mark_bitmap_->SetName(temp_name);
 }
 
-Space::SweepCallbackContext::SweepCallbackContext(bool swap_bitmaps, space::Space* space)
-    : swap_bitmaps(swap_bitmaps), space(space), self(Thread::Current()), freed_objects(0),
-      freed_bytes(0) {
+AllocSpace::SweepCallbackContext::SweepCallbackContext(bool swap_bitmaps, space::Space* space)
+    : swap_bitmaps(swap_bitmaps), space(space), self(Thread::Current()) {
 }
 
 }  // namespace space
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 8415fa1..8444a70 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -23,6 +23,7 @@
 #include "base/macros.h"
 #include "base/mutex.h"
 #include "gc/accounting/space_bitmap.h"
+#include "gc/collector/garbage_collector.h"
 #include "globals.h"
 #include "image.h"
 #include "mem_map.h"
@@ -172,16 +173,6 @@
   std::string name_;
 
  protected:
-  struct SweepCallbackContext {
-   public:
-    SweepCallbackContext(bool swap_bitmaps, space::Space* space);
-    const bool swap_bitmaps;
-    space::Space* const space;
-    Thread* const self;
-    size_t freed_objects;
-    size_t freed_bytes;
-  };
-
   // When should objects within this space be reclaimed? Not constant as we vary it in the case
   // of Zygote forking.
   GcRetentionPolicy gc_retention_policy_;
@@ -232,6 +223,14 @@
   virtual void RevokeAllThreadLocalBuffers() = 0;
 
  protected:
+  struct SweepCallbackContext {
+    SweepCallbackContext(bool swap_bitmaps, space::Space* space);
+    const bool swap_bitmaps;
+    space::Space* const space;
+    Thread* const self;
+    collector::ObjectBytePair freed;
+  };
+
   AllocSpace() {}
   virtual ~AllocSpace() {}
 
@@ -415,7 +414,7 @@
     return mark_bitmap_.get();
   }
 
-  void Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes);
+  collector::ObjectBytePair Sweep(bool swap_bitmaps);
   virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() = 0;
 
  protected: