Trim the native heap too, like dalvik did.

Change-Id: I240f33937ffe7a3d3df2a66cf5b4ee5296bce16d
diff --git a/src/debugger.cc b/src/debugger.cc
index 581645d..c70ca94 100644
--- a/src/debugger.cc
+++ b/src/debugger.cc
@@ -36,13 +36,6 @@
 #include "thread_list.h"
 #include "well_known_classes.h"
 
-extern "C" void dlmalloc_walk_heap(void(*)(const void*, size_t, const void*, size_t, void*), void*);
-#ifndef HAVE_ANDROID_OS
-void dlmalloc_walk_heap(void(*)(const void*, size_t, const void*, size_t, void*), void*) {
-  // No-op for glibc.
-}
-#endif
-
 namespace art {
 
 static const size_t kMaxAllocRecordStackDepth = 16; // Max 255.
diff --git a/src/native/dalvik_system_VMRuntime.cc b/src/native/dalvik_system_VMRuntime.cc
index fbc2a01..a500f6a 100644
--- a/src/native/dalvik_system_VMRuntime.cc
+++ b/src/native/dalvik_system_VMRuntime.cc
@@ -28,6 +28,9 @@
 #include "thread_list.h"
 #include "toStringArray.h"
 
+extern "C" int dlmalloc_trim(size_t);
+extern "C" void dlmalloc_walk_free_pages(void(*)(void*, void*, void*), void*);
+
 namespace art {
 
 static jfloat VMRuntime_getTargetHeapUtilization(JNIEnv*, jobject) {
@@ -157,11 +160,22 @@
 
 static void VMRuntime_trimHeap(JNIEnv*, jobject) {
   ScopedHeapLock heap_lock;
+
+  // Trim the managed heap.
   Heap* heap = Runtime::Current()->GetHeap();
   size_t alloc_space_size = heap->GetAllocSpace()->Size();
   float utilization = static_cast<float>(heap->GetBytesAllocated()) / alloc_space_size;
   uint64_t start_ns = NanoTime();
   heap->GetAllocSpace()->Trim();
+
+  // Trim the native heap.
+  dlmalloc_trim(0);
+#if 0 // TODO: switch over to this when bionic has moved to dlmalloc 2.8.5
+  dlmalloc_inspect_all(MspaceMadviseCallback, NULL);
+#else
+  dlmalloc_walk_free_pages(MspaceMadviseCallback, NULL);
+#endif
+
   LOG(INFO) << "Parallel heap trimming took " << PrettyDuration(NanoTime() - start_ns)
             << " on a " << PrettySize(alloc_space_size)
             << " heap with " << static_cast<int>(100 * utilization) << "% utilization";
diff --git a/src/space.cc b/src/space.cc
index 15b7caf..5aceed7 100644
--- a/src/space.cc
+++ b/src/space.cc
@@ -234,29 +234,30 @@
   return mspace_usable_size(const_cast<void*>(reinterpret_cast<const void*>(obj))) + kChunkOverhead;
 }
 
-// Call back from mspace_inspect_all returning the start and end of chunks and the bytes used,
-// if used_bytes is 0 then it indicates the range isn't in use and we madvise to the system that
-// we don't need it
-static void DontNeed(void* start, void* end, size_t used_bytes, void* /*num_bytes*/) {
-  if (used_bytes == 0) {
-    start = reinterpret_cast<void*>(RoundUp((uintptr_t)start, kPageSize));
-    end = reinterpret_cast<void*>(RoundDown((uintptr_t)end, kPageSize));
-    if (end > start) {
-      // We have a page aligned region to madvise on
-      size_t length = reinterpret_cast<byte*>(end) - reinterpret_cast<byte*>(start);
-      CHECK_MEMORY_CALL(madvise, (start, length, MADV_DONTNEED), "trim");
-    }
+void MspaceMadviseCallback(void* start, void* end, void* /*arg*/) {
+  // Do we have any whole pages to give back?
+  start = reinterpret_cast<void*>(RoundUp(reinterpret_cast<uintptr_t>(start), kPageSize));
+  end = reinterpret_cast<void*>(RoundDown(reinterpret_cast<uintptr_t>(end), kPageSize));
+  if (end > start) {
+    size_t length = reinterpret_cast<byte*>(end) - reinterpret_cast<byte*>(start);
+    CHECK_MEMORY_CALL(madvise, (start, length, MADV_DONTNEED), "trim");
   }
 }
 
-void AllocSpace::Trim() {
-  // Trim to release memory at the end of the space
-  mspace_trim(mspace_, 0);
-  // Visit space looking for page size holes to advise we don't need
-  size_t num_bytes_released = 0;
-  mspace_inspect_all(mspace_, DontNeed, &num_bytes_released);
+void MspaceMadviseCallback(void* start, void* end, size_t used_bytes, void* arg) {
+  // Is this chunk in use?
+  if (used_bytes != 0) {
+    return;
+  }
+  return MspaceMadviseCallback(start, end, arg);
 }
 
+void AllocSpace::Trim() {
+  // Trim to release memory at the end of the space.
+  mspace_trim(mspace_, 0);
+  // Visit space looking for page-sized holes to advise the kernel we don't need.
+  mspace_inspect_all(mspace_, MspaceMadviseCallback, NULL);
+}
 
 void AllocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
                       void* arg) {
diff --git a/src/space.h b/src/space.h
index b1fb55d..ca7f1f1 100644
--- a/src/space.h
+++ b/src/space.h
@@ -138,7 +138,7 @@
     return mspace_;
   }
 
-  // Hand unused pages back to the system.
+  // Hands unused pages back to the system.
   void Trim();
 
   // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
@@ -240,6 +240,12 @@
   DISALLOW_COPY_AND_ASSIGN(ImageSpace);
 };
 
+// Callback for dlmalloc_inspect_all or mspace_inspect_all that will madvise(2) unused
+// pages back to the kernel.
+void MspaceMadviseCallback(void* start, void* end, size_t used_bytes, void* /*arg*/);
+// Callback for the obsolete dlmalloc_walk_free_pages.
+void MspaceMadviseCallback(void* start, void* end, void* /*arg*/);
+
 }  // namespace art
 
 #endif  // ART_SRC_SPACE_H_