Fix JPC regression

JPC had regressed in boot time since we were incorrectly doing too many GCs.

Having the ImageSpace allocations counted as used memory caused us to incorrectly schedule concurrent GCs.

Change-Id: Ic466a1281ffc68141a5a19f3e7b8f7dea45fbdf8
diff --git a/src/heap.cc b/src/heap.cc
index 4576a90..2f7e519 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -238,14 +238,8 @@
   zygote_mod_union_table_.reset(new ModUnionTableCardCache(this));
   CHECK(zygote_mod_union_table_.get() != NULL) << "Failed to create Zygote mod-union table";
 
-  num_bytes_allocated_ = 0;
-  for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
-    if ((*it)->IsImageSpace()) {
-      num_bytes_allocated_ += (*it)->AsImageSpace()->Size();
-    }
-  }
-
   // TODO: Count objects in the image space here.
+  num_bytes_allocated_ = 0;
   num_objects_allocated_ = 0;
 
   // Max stack size in bytes.
@@ -762,6 +756,9 @@
       break;
     }
   }
+
+  // Reset this since we now count the ZygoteSpace in the total heap size.
+  num_bytes_allocated_ = 0;
 }
 
 void Heap::FlushAllocStack() {
@@ -769,6 +766,16 @@
   allocation_stack_->Reset();
 }
 
+size_t Heap::GetUsedMemorySize() const {
+  size_t total = num_bytes_allocated_;
+  for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
+    if ((*it)->IsZygoteSpace()) {
+      total += (*it)->AsAllocSpace()->Size();
+    }
+  }
+  return total;
+}
+
 void Heap::MarkStackAsLive(MarkStack* alloc_stack) {
   // We can just assume everything is inside the alloc_space_'s bitmap since we should only have
   // fresh allocations.
@@ -1021,11 +1028,11 @@
   uint64_t duration = (NanoTime() - start_time) / 1000 * 1000;
   if (duration > MsToNs(50)) {
     const size_t percent_free = GetPercentFree();
-    const size_t num_bytes_allocated = num_bytes_allocated_;
+    const size_t current_heap_size = GetUsedMemorySize();
     const size_t total_memory = GetTotalMemory();
     LOG(INFO) << (gc_type == GC_PARTIAL ? "Partial " : (gc_type == GC_STICKY ? "Sticky " : ""))
               << "GC freed " << PrettySize(bytes_freed) << ", " << percent_free << "% free, "
-              << PrettySize(num_bytes_allocated) << "/" << PrettySize(total_memory) << ", "
+              << PrettySize(current_heap_size) << "/" << PrettySize(total_memory) << ", "
               << "paused " << PrettyDuration(duration);
   }
 
@@ -1250,11 +1257,11 @@
   uint64_t duration = (NanoTime() - root_begin) / 1000 * 1000;
   if (pause_roots > MsToNs(5) || pause_dirty > MsToNs(5)) {
     const size_t percent_free = GetPercentFree();
-    const size_t num_bytes_allocated = num_bytes_allocated_;
+    const size_t current_heap_size = GetUsedMemorySize();
     const size_t total_memory = GetTotalMemory();
     LOG(INFO) << (gc_type == GC_PARTIAL ? "Partial " : (gc_type == GC_STICKY ? "Sticky " : ""))
               << "Concurrent GC freed " << PrettySize(bytes_freed) << ", " << percent_free
-              << "% free, " << PrettySize(num_bytes_allocated) << "/"
+              << "% free, " << PrettySize(current_heap_size) << "/"
               << PrettySize(total_memory) << ", " << "paused " << PrettyDuration(pause_roots)
               << "+" << PrettyDuration(pause_dirty) << " total " << PrettyDuration(duration);
   }
@@ -1339,7 +1346,7 @@
     }
 
     // Calculate when to perform the next ConcurrentGC.
-    if (GetTotalMemory() - num_bytes_allocated_ < concurrent_min_free_) {
+    if (GetTotalMemory() - GetUsedMemorySize() < concurrent_min_free_) {
       // Not enough free memory to perform concurrent GC.
       concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
     } else {
diff --git a/src/heap.h b/src/heap.h
index 1974b10..72a63bd 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -230,6 +230,7 @@
   size_t GetObjectsAllocated() const LOCKS_EXCLUDED(statistics_lock_);
   size_t GetConcurrentStartSize() const LOCKS_EXCLUDED(statistics_lock_);
   size_t GetConcurrentMinFree() const LOCKS_EXCLUDED(statistics_lock_);
+  size_t GetUsedMemorySize() const LOCKS_EXCLUDED(statistics_lock_);
 
   // Functions for getting the bitmap which corresponds to an object's address.
   // This is probably slow, TODO: use better data structure like binary tree .