Add standard gc log for transition collections.

Factor out the gc log logic into a function.

Note we still don't log in background as before but do for things like
OOME compactions in foreground.

Change-Id: Ifd35a796cf26aba2fba83bf07e72902357d91f8b
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 9e159c2..9dc4117 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1675,7 +1675,8 @@
   AddSpace(to_space);
   // Make sure that we will have enough room to copy.
   CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit());
-  Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
+  collector::GarbageCollector* collector = Compact(to_space, from_space,
+                                                   kGcCauseHomogeneousSpaceCompact);
   const uint64_t space_size_after_compaction = to_space->Size();
   main_space_ = to_space;
   main_space_backup_.reset(from_space);
@@ -1694,6 +1695,7 @@
   // Finish GC.
   reference_processor_.EnqueueClearedReferences(self);
   GrowForUtilization(semi_space_collector_);
+  LogGC(kGcCauseHomogeneousSpaceCompact, collector);
   FinishGC(self, collector::kGcTypeFull);
   return HomogeneousSpaceCompactResult::kSuccess;
 }
@@ -1744,6 +1746,7 @@
     FinishGC(self, collector::kGcTypeNone);
     return;
   }
+  collector::GarbageCollector* collector = nullptr;
   tl->SuspendAll();
   switch (collector_type) {
     case kCollectorTypeSS: {
@@ -1758,7 +1761,7 @@
         bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
                                                                         mem_map.release());
         AddSpace(bump_pointer_space_);
-        Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
+        collector = Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
         // Use the now empty main space mem map for the bump pointer temp space.
         mem_map.reset(main_space_->ReleaseMemMap());
         // Unset the pointers just in case.
@@ -1795,7 +1798,7 @@
         mem_map.release();
         // Compact to the main space from the bump pointer space, don't need to swap semispaces.
         AddSpace(main_space_);
-        Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
+        collector = Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
         mem_map.reset(bump_pointer_space_->ReleaseMemMap());
         RemoveSpace(bump_pointer_space_);
         bump_pointer_space_ = nullptr;
@@ -1826,6 +1829,8 @@
   reference_processor_.EnqueueClearedReferences(self);
   uint64_t duration = NanoTime() - start_time;
   GrowForUtilization(semi_space_collector_);
+  DCHECK(collector != nullptr);
+  LogGC(kGcCauseCollectorTransition, collector);
   FinishGC(self, collector::kGcTypeFull);
   int32_t after_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
   int32_t delta_allocated = before_allocated - after_allocated;
@@ -2166,9 +2171,9 @@
   std::swap(bump_pointer_space_, temp_space_);
 }
 
-void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
-                   space::ContinuousMemMapAllocSpace* source_space,
-                   GcCause gc_cause) {
+collector::GarbageCollector* Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
+                                           space::ContinuousMemMapAllocSpace* source_space,
+                                           GcCause gc_cause) {
   CHECK(kMovingCollector);
   if (target_space != source_space) {
     // Don't swap spaces since this isn't a typical semi space collection.
@@ -2176,11 +2181,13 @@
     semi_space_collector_->SetFromSpace(source_space);
     semi_space_collector_->SetToSpace(target_space);
     semi_space_collector_->Run(gc_cause, false);
+    return semi_space_collector_;
   } else {
     CHECK(target_space->IsBumpPointerSpace())
         << "In-place compaction is only supported for bump pointer spaces";
     mark_compact_collector_->SetSpace(target_space->AsBumpPointerSpace());
     mark_compact_collector_->Run(kGcCauseCollectorTransition, false);
+    return mark_compact_collector_;
   }
 }
 
@@ -2291,6 +2298,14 @@
   reference_processor_.EnqueueClearedReferences(self);
   // Grow the heap so that we know when to perform the next GC.
   GrowForUtilization(collector, bytes_allocated_before_gc);
+  LogGC(gc_cause, collector);
+  FinishGC(self, gc_type);
+  // Inform DDMS that a GC completed.
+  Dbg::GcDidFinish();
+  return gc_type;
+}
+
+void Heap::LogGC(GcCause gc_cause, collector::GarbageCollector* collector) {
   const size_t duration = GetCurrentGcIteration()->GetDurationNs();
   const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
   // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
@@ -2310,8 +2325,8 @@
     const size_t total_memory = GetTotalMemory();
     std::ostringstream pause_string;
     for (size_t i = 0; i < pause_times.size(); ++i) {
-        pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
-                     << ((i != pause_times.size() - 1) ? "," : "");
+      pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
+                   << ((i != pause_times.size() - 1) ? "," : "");
     }
     LOG(INFO) << gc_cause << " " << collector->GetName()
               << " GC freed "  << current_gc_iteration_.GetFreedObjects() << "("
@@ -2323,10 +2338,6 @@
               << " total " << PrettyDuration((duration / 1000) * 1000);
     VLOG(heap) << Dumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
   }
-  FinishGC(self, gc_type);
-  // Inform DDMS that a GC completed.
-  Dbg::GcDidFinish();
-  return gc_type;
 }
 
 void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 57c1460..b2478e6 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -666,12 +666,13 @@
   class CollectorTransitionTask;
   class HeapTrimTask;
 
-  // Compact source space to target space.
-  void Compact(space::ContinuousMemMapAllocSpace* target_space,
-               space::ContinuousMemMapAllocSpace* source_space,
-               GcCause gc_cause)
+  // Compact source space to target space. Returns the collector used.
+  collector::GarbageCollector* Compact(space::ContinuousMemMapAllocSpace* target_space,
+                                       space::ContinuousMemMapAllocSpace* source_space,
+                                       GcCause gc_cause)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  void LogGC(GcCause gc_cause, collector::GarbageCollector* collector);
   void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_);
 
   // Create a mem map with a preferred base address.