Merge "Fix GC memory overhead accounting." into lmp-dev
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 8247911..47e1709 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -86,7 +86,6 @@
 static constexpr bool kUseFreeListSpaceForLOS = false;
 // Whether or not we compact the zygote in PreZygoteFork.
 static constexpr bool kCompactZygote = kMovingCollector;
-static constexpr size_t kNonMovingSpaceCapacity = 64 * MB;
 // How many reserve entries are at the end of the allocation stack, these are only needed if the
 // allocation stack overflows.
 static constexpr size_t kAllocationStackReserveSize = 1024;
@@ -99,10 +98,11 @@
 static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB;
 
 Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
-           double target_utilization, double foreground_heap_growth_multiplier, size_t capacity,
-           const std::string& image_file_name, const InstructionSet image_instruction_set,
-           CollectorType foreground_collector_type, CollectorType background_collector_type,
-           size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
+           double target_utilization, double foreground_heap_growth_multiplier,
+           size_t capacity, size_t non_moving_space_capacity, const std::string& image_file_name,
+           const InstructionSet image_instruction_set, CollectorType foreground_collector_type,
+           CollectorType background_collector_type, size_t parallel_gc_threads,
+           size_t conc_gc_threads, bool low_memory_mode,
            size_t long_pause_log_threshold, size_t long_gc_log_threshold,
            bool ignore_max_footprint, bool use_tlab,
            bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap,
@@ -215,7 +215,7 @@
   }
   /*
   requested_alloc_space_begin ->     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
-                                     +-  nonmoving space (kNonMovingSpaceCapacity) +-
+                                     +-  nonmoving space (non_moving_space_capacity)+-
                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
                                      +-main alloc space / bump space 1 (capacity_) +-
                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
@@ -242,7 +242,7 @@
   std::unique_ptr<MemMap> main_mem_map_2;
   byte* request_begin = requested_alloc_space_begin;
   if (request_begin != nullptr && separate_non_moving_space) {
-    request_begin += kNonMovingSpaceCapacity;
+    request_begin += non_moving_space_capacity;
   }
   std::string error_str;
   std::unique_ptr<MemMap> non_moving_space_mem_map;
@@ -251,7 +251,7 @@
     // address.
     non_moving_space_mem_map.reset(
         MemMap::MapAnonymous("non moving space", requested_alloc_space_begin,
-                             kNonMovingSpaceCapacity, PROT_READ | PROT_WRITE, true, &error_str));
+                             non_moving_space_capacity, PROT_READ | PROT_WRITE, true, &error_str));
     CHECK(non_moving_space_mem_map != nullptr) << error_str;
   }
   // Attempt to create 2 mem maps at or after the requested begin.
@@ -272,7 +272,7 @@
     // active rosalloc spaces.
     const size_t size = non_moving_space_mem_map->Size();
     non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(
-        non_moving_space_mem_map.release(), "zygote / non moving space", initial_size,
+        non_moving_space_mem_map.release(), "zygote / non moving space", kDefaultStartingSize,
         initial_size, size, size, false);
     non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
     CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
@@ -492,14 +492,33 @@
   }
 }
 
-void Heap::DisableCompaction() {
+void Heap::DisableMovingGc() {
   if (IsMovingGc(foreground_collector_type_)) {
-    foreground_collector_type_  = kCollectorTypeCMS;
+    foreground_collector_type_ = kCollectorTypeCMS;
   }
   if (IsMovingGc(background_collector_type_)) {
     background_collector_type_ = foreground_collector_type_;
   }
   TransitionCollector(foreground_collector_type_);
+  ThreadList* tl = Runtime::Current()->GetThreadList();
+  Thread* self = Thread::Current();
+  ScopedThreadStateChange tsc(self, kSuspended);
+  tl->SuspendAll();
+  // Something may have caused the transition to fail.
+  if (!IsMovingGc(foreground_collector_type_) && non_moving_space_ != main_space_) {
+    CHECK(main_space_ != nullptr);
+    // The allocation stack may have non movable objects in it. We need to flush it since the GC
+    // can't only handle marking allocation stack objects of one non moving space and one main
+    // space.
+    {
+      WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+      FlushAllocStack();
+    }
+    main_space_->DisableMovingObjects();
+    non_moving_space_ = main_space_;
+    CHECK(!non_moving_space_->CanMoveObjects());
+  }
+  tl->ResumeAll();
 }
 
 std::string Heap::SafeGetClassDescriptor(mirror::Class* klass) {
@@ -1279,41 +1298,73 @@
     return nullptr;
   }
   ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
-  if (ptr == nullptr && use_homogeneous_space_compaction_for_oom_) {
+  if (ptr == nullptr) {
     const uint64_t current_time = NanoTime();
-    if ((allocator == kAllocatorTypeRosAlloc || allocator == kAllocatorTypeDlMalloc) &&
-        current_time - last_time_homogeneous_space_compaction_by_oom_ >
-        min_interval_homogeneous_space_compaction_by_oom_) {
-      last_time_homogeneous_space_compaction_by_oom_ = current_time;
-      HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
-      switch (result) {
-        case HomogeneousSpaceCompactResult::kSuccess:
-          // If the allocation succeeded, we delayed an oom.
-          ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
-          if (ptr != nullptr) {
-            count_delayed_oom_++;
+    switch (allocator) {
+      case kAllocatorTypeRosAlloc:
+        // Fall-through.
+      case kAllocatorTypeDlMalloc: {
+        if (use_homogeneous_space_compaction_for_oom_ &&
+            current_time - last_time_homogeneous_space_compaction_by_oom_ >
+            min_interval_homogeneous_space_compaction_by_oom_) {
+          last_time_homogeneous_space_compaction_by_oom_ = current_time;
+          HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
+          switch (result) {
+            case HomogeneousSpaceCompactResult::kSuccess:
+              // If the allocation succeeded, we delayed an oom.
+              ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
+                                              usable_size);
+              if (ptr != nullptr) {
+                count_delayed_oom_++;
+              }
+              break;
+            case HomogeneousSpaceCompactResult::kErrorReject:
+              // Reject due to disabled moving GC.
+              break;
+            case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
+              // Throw OOM by default.
+              break;
+            default: {
+              LOG(FATAL) << "Unimplemented homogeneous space compaction result "
+                         << static_cast<size_t>(result);
+            }
           }
-          break;
-        case HomogeneousSpaceCompactResult::kErrorReject:
-          // Reject due to disabled moving GC.
-          break;
-        case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
-          // Throw OOM by default.
-          break;
-        default: {
-          LOG(FATAL) << "Unimplemented homogeneous space compaction result " << static_cast<size_t>(result);
+          // Always print that we ran homogeneous space compation since this can cause jank.
+          VLOG(heap) << "Ran heap homogeneous space compaction, "
+                    << " requested defragmentation "
+                    << count_requested_homogeneous_space_compaction_.LoadSequentiallyConsistent()
+                    << " performed defragmentation "
+                    << count_performed_homogeneous_space_compaction_.LoadSequentiallyConsistent()
+                    << " ignored homogeneous space compaction "
+                    << count_ignored_homogeneous_space_compaction_.LoadSequentiallyConsistent()
+                    << " delayed count = "
+                    << count_delayed_oom_.LoadSequentiallyConsistent();
         }
+        break;
       }
-      // Always print that we ran homogeneous space compation since this can cause jank.
-      VLOG(heap) << "Ran heap homogeneous space compaction, "
-                << " requested defragmentation "
-                << count_requested_homogeneous_space_compaction_.LoadSequentiallyConsistent()
-                << " performed defragmentation "
-                << count_performed_homogeneous_space_compaction_.LoadSequentiallyConsistent()
-                << " ignored homogeneous space compaction "
-                << count_ignored_homogeneous_space_compaction_.LoadSequentiallyConsistent()
-                << " delayed count = "
-                << count_delayed_oom_.LoadSequentiallyConsistent();
+      case kAllocatorTypeNonMoving: {
+        // Try to transition the heap if the allocation failure was due to the space being full.
+        if (!IsOutOfMemoryOnAllocation<false>(allocator, alloc_size)) {
+          // If we aren't out of memory then the OOM was probably from the non moving space being
+          // full. Attempt to disable compaction and turn the main space into a non moving space.
+          DisableMovingGc();
+          // If we are still a moving GC then something must have caused the transition to fail.
+          if (IsMovingGc(collector_type_)) {
+            MutexLock mu(self, *gc_complete_lock_);
+            // If we couldn't disable moving GC, just throw OOME and return null.
+            LOG(WARNING) << "Couldn't disable moving GC with disable GC count "
+                         << disable_moving_gc_count_;
+          } else {
+            LOG(WARNING) << "Disabled moving GC due to the non moving space being full";
+            ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
+                                            usable_size);
+          }
+        }
+        break;
+      }
+      default: {
+        // Do nothing for others allocators.
+      }
     }
   }
   // If the allocation hasn't succeeded by this point, throw an OOM error.
@@ -1490,9 +1541,10 @@
     WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
     // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable count
     // is non zero.
-    // If the collecotr type changed to something which doesn't benefit from homogeneous space compaction,
+    // If the collector type changed to something which doesn't benefit from homogeneous space compaction,
     // exit.
-    if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_)) {
+    if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) ||
+        !main_space_->CanMoveObjects()) {
       return HomogeneousSpaceCompactResult::kErrorReject;
     }
     collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact;
@@ -1551,8 +1603,9 @@
   Thread* const self = Thread::Current();
   ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
   Locks::mutator_lock_->AssertNotHeld(self);
-  const bool copying_transition =
-      IsMovingGc(background_collector_type_) || IsMovingGc(foreground_collector_type_);
+  // Currently we only need a heap transition if we switch from a moving collector to a non moving
+  // one, or visa versa.
+  const bool copying_transition = IsMovingGc(collector_type_) != IsMovingGc(collector_type);
   // Busy wait until we can GC (StartGC can fail if we have a non-zero
   // compacting_gc_disable_count_, this should rarely occurs).
   for (;;) {
@@ -1851,9 +1904,9 @@
   // The end of the non-moving space may be protected, unprotect it so that we can copy the zygote
   // there.
   non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
-  // Change the collector to the post zygote one.
-  bool same_space = non_moving_space_ == main_space_;
+  const bool same_space = non_moving_space_ == main_space_;
   if (kCompactZygote) {
+    // Can't compact if the non moving space is the same as the main space.
     DCHECK(semi_space_collector_ != nullptr);
     // Temporarily disable rosalloc verification because the zygote
     // compaction will mess up the rosalloc internal metadata.
@@ -1898,6 +1951,7 @@
     non_moving_space_->SetLimit(target_space.Limit());
     VLOG(heap) << "Zygote space size " << non_moving_space_->Size() << " bytes";
   }
+  // Change the collector to the post zygote one.
   ChangeCollector(foreground_collector_type_);
   // Save the old space so that we can remove it after we complete creating the zygote space.
   space::MallocSpace* old_alloc_space = non_moving_space_;
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index d91f5af..c2ba9f8 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -134,6 +134,7 @@
   static constexpr size_t kDefaultStartingSize = kPageSize;
   static constexpr size_t kDefaultInitialSize = 2 * MB;
   static constexpr size_t kDefaultMaximumSize = 256 * MB;
+  static constexpr size_t kDefaultNonMovingSpaceCapacity = 64 * MB;
   static constexpr size_t kDefaultMaxFree = 2 * MB;
   static constexpr size_t kDefaultMinFree = kDefaultMaxFree / 4;
   static constexpr size_t kDefaultLongPauseLogThreshold = MsToNs(5);
@@ -156,6 +157,7 @@
   explicit Heap(size_t initial_size, size_t growth_limit, size_t min_free,
                 size_t max_free, double target_utilization,
                 double foreground_heap_growth_multiplier, size_t capacity,
+                size_t non_moving_space_capacity,
                 const std::string& original_image_file_name,
                 InstructionSet image_instruction_set,
                 CollectorType foreground_collector_type, CollectorType background_collector_type,
@@ -255,7 +257,7 @@
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   // Returns true if there is any chance that the object (obj) will move.
-  bool IsMovableObject(const mirror::Object* obj) const;
+  bool IsMovableObject(const mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Enables us to compacting GC until objects are released.
   void IncrementDisableMovingGC(Thread* self);
@@ -514,8 +516,8 @@
   // Assumes there is only one image space.
   space::ImageSpace* GetImageSpace() const;
 
-  // Permenantly disable compaction.
-  void DisableCompaction();
+  // Permenantly disable moving garbage collection.
+  void DisableMovingGc();
 
   space::DlMallocSpace* GetDlMallocSpace() const {
     return dlmalloc_space_;
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index a52b92b..bace3f6 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -133,6 +133,10 @@
     return can_move_objects_;
   }
 
+  void DisableMovingObjects() {
+    can_move_objects_ = false;
+  }
+
  protected:
   MallocSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end,
               byte* limit, size_t growth_limit, bool create_bitmaps, bool can_move_objects,
@@ -175,7 +179,7 @@
   size_t growth_limit_;
 
   // True if objects in the space are movable.
-  const bool can_move_objects_;
+  bool can_move_objects_;
 
   // Starting and initial sized, used when you reset the space.
   const size_t starting_size_;
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 107efd7..0b6e3b2 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -246,7 +246,7 @@
     }
     // Contended.
     const bool log_contention = (lock_profiling_threshold_ != 0);
-    uint64_t wait_start_ms = log_contention ? 0 : MilliTime();
+    uint64_t wait_start_ms = log_contention ? MilliTime() : 0;
     mirror::ArtMethod* owners_method = locking_method_;
     uint32_t owners_dex_pc = locking_dex_pc_;
     // Do this before releasing the lock so that we don't get deflated.
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index dd5a6c9..9818385 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -182,6 +182,7 @@
   heap_maximum_size_ = gc::Heap::kDefaultMaximumSize;
   heap_min_free_ = gc::Heap::kDefaultMinFree;
   heap_max_free_ = gc::Heap::kDefaultMaxFree;
+  heap_non_moving_space_capacity_ = gc::Heap::kDefaultNonMovingSpaceCapacity;
   heap_target_utilization_ = gc::Heap::kDefaultTargetUtilization;
   foreground_heap_growth_multiplier_ = gc::Heap::kDefaultHeapGrowthMultiplier;
   heap_growth_limit_ = 0;  // 0 means no growth limit .
@@ -353,6 +354,14 @@
         return false;
       }
       heap_max_free_ = size;
+    } else if (StartsWith(option, "-XX:NonMovingSpaceCapacity=")) {
+      size_t size = ParseMemoryOption(
+          option.substr(strlen("-XX:NonMovingSpaceCapacity=")).c_str(), 1024);
+      if (size == 0) {
+        Usage("Failed to parse memory option %s\n", option.c_str());
+        return false;
+      }
+      heap_non_moving_space_capacity_ = size;
     } else if (StartsWith(option, "-XX:HeapTargetUtilization=")) {
       if (!ParseDouble(option, '=', 0.1, 0.9, &heap_target_utilization_)) {
         return false;
@@ -755,6 +764,7 @@
   UsageMessage(stream, "  -XX:HeapGrowthLimit=N\n");
   UsageMessage(stream, "  -XX:HeapMinFree=N\n");
   UsageMessage(stream, "  -XX:HeapMaxFree=N\n");
+  UsageMessage(stream, "  -XX:NonMovingSpaceCapacity=N\n");
   UsageMessage(stream, "  -XX:HeapTargetUtilization=doublevalue\n");
   UsageMessage(stream, "  -XX:ForegroundHeapGrowthMultiplier=doublevalue\n");
   UsageMessage(stream, "  -XX:LowMemoryMode\n");
diff --git a/runtime/parsed_options.h b/runtime/parsed_options.h
index 0377e7d..f484e78 100644
--- a/runtime/parsed_options.h
+++ b/runtime/parsed_options.h
@@ -69,6 +69,7 @@
   size_t heap_growth_limit_;
   size_t heap_min_free_;
   size_t heap_max_free_;
+  size_t heap_non_moving_space_capacity_;
   double heap_target_utilization_;
   double foreground_heap_growth_multiplier_;
   unsigned int parallel_gc_threads_;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 855effa..0f419b1 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -596,6 +596,7 @@
                        options->heap_target_utilization_,
                        options->foreground_heap_growth_multiplier_,
                        options->heap_maximum_size_,
+                       options->heap_non_moving_space_capacity_,
                        options->image_,
                        options->image_isa_,
                        options->collector_type_,