Merge "Fix heap trimmer daemon sleeping." into lmp-dev
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 4d8dbc8..3dfa8ad 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -120,7 +120,7 @@
       desired_collector_type_(foreground_collector_type_),
       heap_trim_request_lock_(nullptr),
       last_trim_time_(0),
-      heap_transition_or_trim_target_time_(0),
+      last_heap_transition_time_(0),
       heap_trim_request_pending_(false),
       parallel_gc_threads_(parallel_gc_threads),
       conc_gc_threads_(conc_gc_threads),
@@ -931,35 +931,6 @@
 }
 
 void Heap::DoPendingTransitionOrTrim() {
-  Thread* self = Thread::Current();
-  CollectorType desired_collector_type;
-  // Wait until we reach the desired transition time.
-  while (true) {
-    uint64_t wait_time;
-    {
-      MutexLock mu(self, *heap_trim_request_lock_);
-      desired_collector_type = desired_collector_type_;
-      uint64_t current_time = NanoTime();
-      if (current_time >= heap_transition_or_trim_target_time_) {
-        break;
-      }
-      wait_time = heap_transition_or_trim_target_time_ - current_time;
-    }
-    ScopedThreadStateChange tsc(self, kSleeping);
-    usleep(wait_time / 1000);  // Usleep takes microseconds.
-  }
-  // Launch homogeneous space compaction if it is desired.
-  if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) {
-    if (!CareAboutPauseTimes()) {
-      PerformHomogeneousSpaceCompact();
-    }
-    // No need to Trim(). Homogeneous space compaction may free more virtual and physical memory.
-    desired_collector_type = collector_type_;
-    return;
-  }
-  // Transition the collector if the desired collector type is not the same as the current
-  // collector type.
-  TransitionCollector(desired_collector_type);
   if (!CareAboutPauseTimes()) {
     // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
     // about pauses.
@@ -971,7 +942,23 @@
         << PrettyDuration(NanoTime() - start_time);
     runtime->GetThreadList()->ResumeAll();
   }
-  // Do a heap trim if it is needed.
+  if (NanoTime() - last_heap_transition_time_ > kCollectorTransitionWait) {
+    // Launch homogeneous space compaction if it is desired.
+    if (desired_collector_type_ == kCollectorTypeHomogeneousSpaceCompact) {
+      if (!CareAboutPauseTimes()) {
+        PerformHomogeneousSpaceCompact();
+        last_heap_transition_time_ = NanoTime();
+      }
+      desired_collector_type_ = collector_type_;
+    } else {
+      // Transition the collector if the desired collector type is not the same as the current
+      // collector type.
+      TransitionCollector(desired_collector_type_);
+      last_heap_transition_time_ = NanoTime();
+    }
+  }
+  // Do a heap trim if it is needed. This is good to do even with hspace compaction since it may
+  // trim the native heap and dlmalloc spaces.
   Trim();
 }
 
@@ -2994,8 +2981,6 @@
     if (desired_collector_type_ == desired_collector_type) {
       return;
     }
-    heap_transition_or_trim_target_time_ =
-        std::max(heap_transition_or_trim_target_time_, NanoTime() + delta_time);
     desired_collector_type_ = desired_collector_type;
   }
   SignalHeapTrimDaemon(self);
@@ -3031,10 +3016,6 @@
       return;
     }
     heap_trim_request_pending_ = true;
-    uint64_t current_time = NanoTime();
-    if (heap_transition_or_trim_target_time_ < current_time) {
-      heap_transition_or_trim_target_time_ = current_time + kHeapTrimWait;
-    }
   }
   // Notify the daemon thread which will actually do the heap trim.
   SignalHeapTrimDaemon(self);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index d3d613f..2fe63e2 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -824,8 +824,8 @@
   Mutex* heap_trim_request_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   // When we want to perform the next heap trim (nano seconds).
   uint64_t last_trim_time_ GUARDED_BY(heap_trim_request_lock_);
-  // When we want to perform the next heap transition (nano seconds) or heap trim.
-  uint64_t heap_transition_or_trim_target_time_ GUARDED_BY(heap_trim_request_lock_);
+  // When we last performed a heap transition or hspace compact.
+  uint64_t last_heap_transition_time_;
   // If we have a heap trim request pending.
   bool heap_trim_request_pending_ GUARDED_BY(heap_trim_request_lock_);