Space trim and other unit tests. General space clean up.

The space unit tests now include checks on space invariants, in
particular relating to footprint and size.
Out-of-date comments have been removed.
This patch adds PrettySize and PrettyDuration methods to make these
strings more human readable.

Change-Id: I6bc05b2db0d0115b97d666b832fce57bcdd2e091
diff --git a/src/compiler.cc b/src/compiler.cc
index f7c0297..ed5f56c 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -62,23 +62,24 @@
   STLDeleteValues(&compiled_invoke_stubs_);
   if (dex_file_count_ > 0) {
     uint64_t duration_ns = NanoTime() - start_ns_;
-    uint64_t duration_ms = NsToMs(duration_ns);
     std::string stats(StringPrintf("Compiled files:%zd"
                                    " classes:%zd"
                                    " methods:(abstract:%zd"
                                    " native:%zd"
                                    " regular:%zd)"
-                                   " instructions:%zd"
-                                   " (took %llums",
+                                   " instructions:%zd",
                                    dex_file_count_,
                                    class_count_,
                                    abstract_method_count_,
                                    native_method_count_,
                                    regular_method_count_,
-                                   instruction_count_,
-                                   duration_ms));
+                                   instruction_count_));
+    stats += " (took ",
+    stats += PrettyDuration(duration_ns);
     if (instruction_count_ != 0) {
-        stats += StringPrintf(", %llu ns/instruction", duration_ns/instruction_count_);
+        stats += ", ";
+        stats += PrettyDuration(duration_ns / instruction_count_);
+        stats += "/instruction";
     }
     stats += ")";
     LOG(INFO) << stats;
@@ -450,10 +451,10 @@
                                        dex_file, kThumb2);
     CHECK(compiled_method != NULL) << PrettyMethod(method_idx, dex_file);
   }
-  uint64_t duration_ms = NsToMs(NanoTime() - start_ns);
-  if (duration_ms > 10) {
+  uint64_t duration_ns = NanoTime() - start_ns;
+  if (duration_ns > MsToNs(10)) {
     LOG(WARNING) << "Compilation of " << PrettyMethod(method_idx, dex_file)
-                 << " took " << duration_ms << "ms";
+                 << " took " << PrettyDuration(duration_ns);
   }
 
   if (compiled_method != NULL) {
diff --git a/src/dex2oat.cc b/src/dex2oat.cc
index cd977b5..171ae3a 100644
--- a/src/dex2oat.cc
+++ b/src/dex2oat.cc
@@ -96,7 +96,7 @@
 
   ~Dex2Oat() {
     delete runtime_;
-    LOG(INFO) << "dex2oat took " << NsToMs(NanoTime() - start_ns_) << "ms";
+    LOG(INFO) << "dex2oat took " << PrettyDuration(NanoTime() - start_ns_);
   }
 
   // Make a list of descriptors for classes to include in the image
diff --git a/src/heap.cc b/src/heap.cc
index 858b9b2..6272bff 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -105,7 +105,7 @@
   AddSpace(alloc_space_);
   UpdateFirstAndLastSpace(&first_space, &last_space, alloc_space_);
   byte* heap_begin = first_space->Begin();
-  size_t heap_capacity = (last_space->Begin() - first_space->Begin()) + last_space->UnimpededCapacity();
+  size_t heap_capacity = (last_space->Begin() - first_space->Begin()) + last_space->NonGrowthLimitCapacity();
 
   // Allocate the initial live bitmap.
   UniquePtr<HeapBitmap> live_bitmap(HeapBitmap::Create("dalvik-bitmap-1", heap_begin, heap_capacity));
@@ -366,8 +366,8 @@
     // OLD-TODO: may want to grow a little bit more so that the amount of
     //       free space is equal to the old free space + the
     //       utilization slop for the new allocation.
-    VLOG(gc) << "Grow heap (frag case) to " << (new_footprint/KB) << "KiB "
-             << "for a " << alloc_size << "-byte allocation";
+    VLOG(gc) << "Grow heap (frag case) to " << PrettySize(new_footprint)
+             << "for a " << PrettySize(alloc_size) << " allocation";
     return ptr;
   }
 
@@ -378,14 +378,14 @@
   // cleared before throwing an OOME.
 
   // OLD-TODO: wait for the finalizers from the previous GC to finish
-  VLOG(gc) << "Forcing collection of SoftReferences for " << alloc_size << "-byte allocation";
+  VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size) << " allocation";
   CollectGarbageInternal(true);
   ptr = space->AllocWithGrowth(alloc_size);
   if (ptr != NULL) {
     return ptr;
   }
 
-  LOG(ERROR) << "Out of memory on a " << alloc_size << "-byte allocation";
+  LOG(ERROR) << "Out of memory on a " << PrettySize(alloc_size) << " allocation";
 
   // TODO: tell the HeapSource to dump its state
   // TODO: dump stack traces for all threads
@@ -509,21 +509,22 @@
 
   EnqueueClearedReferences(&cleared_references);
 
-  // TODO: somehow make the specific GC implementation (here MarkSweep) responsible for logging.
-  size_t bytes_freed = initial_size - num_bytes_allocated_;
-  bool is_small = (bytes_freed > 0 && bytes_freed < 1024);
-  size_t kib_freed = (bytes_freed > 0 ? std::max(bytes_freed/KB, size_t(1U)) : 0);
-
-  size_t total = GetTotalMemory();
-  size_t percentFree = 100 - static_cast<size_t>(100.0f * static_cast<float>(num_bytes_allocated_) / total);
-
-  uint32_t duration = (t1 - t0)/1000/1000;
-  bool gc_was_particularly_slow = (duration > 100); // TODO: crank this down for concurrent.
+  uint64_t duration_ns = t1 - t0;
+  bool gc_was_particularly_slow = duration_ns > MsToNs(100); // TODO: crank this down for concurrent.
   if (VLOG_IS_ON(gc) || gc_was_particularly_slow) {
-    LOG(INFO) << "GC freed " << (is_small ? "<" : "") << kib_freed << "KiB, "
-              << percentFree << "% free "
-              << (num_bytes_allocated_/KB) << "KiB/" << (total/KB) << "KiB, "
-              << "paused " << duration << "ms";
+    // TODO: somehow make the specific GC implementation (here MarkSweep) responsible for logging.
+    size_t bytes_freed = initial_size - num_bytes_allocated_;
+    if (bytes_freed > KB) {  // ignore freed bytes in output if > 1KB
+      bytes_freed = RoundDown(bytes_freed, KB);
+    }
+    size_t bytes_allocated = RoundUp(num_bytes_allocated_, KB);
+    // lose low nanoseconds in duration. TODO: make this part of PrettyDuration
+    duration_ns = (duration_ns / 1000) * 1000;
+    size_t total = GetTotalMemory();
+    size_t percentFree = 100 - static_cast<size_t>(100.0f * static_cast<float>(num_bytes_allocated_) / total);
+    LOG(INFO) << "GC freed " << PrettySize(bytes_freed) << ", " << percentFree << "% free, "
+              << PrettySize(bytes_allocated) << "/" << PrettySize(total) << ", "
+              << "paused " << PrettyDuration(duration_ns);
   }
   Dbg::GcDidFinish();
   if (VLOG_IS_ON(heap)) {
@@ -535,49 +536,22 @@
   lock_->AssertHeld();
 }
 
-/* Terminology:
- *  1. Footprint: Capacity we allocate from system.
- *  2. Active space: a.k.a. alloc_space_.
- *  3. Soft footprint: external allocation + spaces footprint + active space footprint
- *  4. Overhead: soft footprint excluding active.
- *
- * Layout: (The spaces below might not be contiguous, but are lumped together to depict size.)
- * |----------------------spaces footprint--------- --------------|----active space footprint----|
- *                                                                |--active space allocated--|
- * |--------------------soft footprint (include active)--------------------------------------|
- * |----------------soft footprint excluding active---------------|
- *                                                                |------------soft limit-------...|
- * |------------------------------------ideal footprint-----------------------------------------...|
- *
- */
-
-// Sets the maximum number of bytes that the heap is allowed to
-// allocate from the system.  Clamps to the appropriate maximum
-// value.
-// Old spaces will count against the ideal size.
-//
 void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
   size_t alloc_space_capacity = alloc_space_->Capacity();
   if (max_allowed_footprint > alloc_space_capacity) {
-    VLOG(gc) << "Clamp target GC heap from " << (max_allowed_footprint/KB) << "KiB"
-             << " to " << (alloc_space_capacity/KB) << "KiB";
+    VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint)
+             << " to " << PrettySize(alloc_space_capacity);
     max_allowed_footprint = alloc_space_capacity;
   }
-
   alloc_space_->SetFootprintLimit(max_allowed_footprint);
 }
 
-// kHeapIdealFree is the ideal maximum free size, when we grow the heap for
-// utilization.
+// kHeapIdealFree is the ideal maximum free size, when we grow the heap for utilization.
 static const size_t kHeapIdealFree = 2 * MB;
-// kHeapMinFree guarantees that you always have at least 512 KB free, when
-// you grow for utilization, regardless of target utilization ratio.
+// kHeapMinFree guarantees that you always have at least 512 KB free, when you grow for utilization,
+// regardless of target utilization ratio.
 static const size_t kHeapMinFree = kHeapIdealFree / 4;
 
-// Given the current contents of the active space, increase the allowed
-// heap footprint to match the target utilization ratio.  This should
-// only be called immediately after a full garbage collection.
-//
 void Heap::GrowForUtilization() {
   lock_->AssertHeld();
 
diff --git a/src/heap.h b/src/heap.h
index 678f876..ebb1c85 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -89,7 +89,8 @@
   // Implements VMDebug.countInstancesOfClass.
   static int64_t CountInstances(Class* c, bool count_assignable);
 
-  // Implements dalvik.system.VMRuntime.clearGrowthLimit.
+  // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
+  // implement dalvik.system.VMRuntime.clearGrowthLimit.
   static void ClearGrowthLimit();
 
   // Target ideal heap utilization ratio, implements
@@ -104,8 +105,9 @@
     DCHECK_LT(target, 1.0f);
     target_utilization_ = target;
   }
-  // Sets the maximum number of bytes that the heap is allowed to allocate
-  // from the system.  Clamps to the appropriate maximum value.
+
+  // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate
+  // from the system. Doesn't allow the space to exceed its growth limit.
   static void SetIdealFootprint(size_t max_allowed_footprint);
 
   // Blocks the caller until the garbage collector becomes idle.
@@ -221,6 +223,9 @@
 
   static void CollectGarbageInternal(bool clear_soft_references);
 
+  // Given the current contents of the alloc space, increase the allowed heap footprint to match
+  // the target utilization ratio.  This should only be called immediately after a full garbage
+  // collection.
   static void GrowForUtilization();
 
   static void AddSpace(Space* space) {
@@ -283,6 +288,8 @@
   static bool verify_objects_;
 
   FRIEND_TEST(SpaceTest, AllocAndFree);
+  FRIEND_TEST(SpaceTest, AllocAndFreeList);
+  friend class SpaceTest;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
 };
diff --git a/src/space.cc b/src/space.cc
index 5cdeeeb..6ecca14 100644
--- a/src/space.cc
+++ b/src/space.cc
@@ -30,25 +30,35 @@
 AllocSpace* Space::CreateAllocSpace(const std::string& name, size_t initial_size,
                                     size_t growth_limit, size_t capacity,
                                     byte* requested_begin) {
+  // Memory we promise to dlmalloc before it asks for morecore.
+  // Note: making this value large means that large allocations are unlikely to succeed as dlmalloc
+  // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
+  // size of the large allocation) will be greater than the footprint limit.
+  size_t starting_size = kPageSize;
   uint64_t start_time = 0;
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     start_time = NanoTime();
     VLOG(startup) << "Space::CreateAllocSpace entering " << name
-                  << " initial_size=" << (initial_size / KB) << "KiB"
-                  << " growth_limit=" << (growth_limit / KB) << "KiB"
-                  << " capacity=" << (capacity / KB) << "KiB"
+                  << " initial_size=" << PrettySize(initial_size)
+                  << " growth_limit=" << PrettySize(growth_limit)
+                  << " capacity=" << PrettySize(capacity)
                   << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
   }
 
   // Sanity check arguments
+  if (starting_size > initial_size) {
+    initial_size = starting_size;
+  }
   if (initial_size > growth_limit) {
     LOG(ERROR) << "Failed to create alloc space (" << name << ") where the initial size ("
-        << initial_size << ") is larger than its capacity (" << growth_limit << ")";
+        << PrettySize(initial_size) << ") is larger than its capacity ("
+        << PrettySize(growth_limit) << ")";
     return NULL;
   }
   if (growth_limit > capacity) {
-    LOG(ERROR) << "Failed to create alloc space (" << name << ") where the growth limit capacity"
-        " (" << growth_limit << ") is larger than the capacity (" << capacity << ")";
+    LOG(ERROR) << "Failed to create alloc space (" << name << ") where the growth limit capacity ("
+        << PrettySize(growth_limit) << ") is larger than the capacity ("
+        << PrettySize(capacity) << ")";
     return NULL;
   }
 
@@ -60,18 +70,18 @@
                                                  capacity, PROT_READ | PROT_WRITE));
   if (mem_map.get() == NULL) {
     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
-        << capacity << " bytes";
+        << PrettySize(capacity);
     return NULL;
   }
 
-  void* mspace = AllocSpace::CreateMallocSpace(mem_map->Begin(), initial_size, capacity);
+  void* mspace = AllocSpace::CreateMallocSpace(mem_map->Begin(), starting_size, initial_size);
   if (mspace == NULL) {
     LOG(ERROR) << "Failed to initialize mspace for alloc space (" << name << ")";
     return NULL;
   }
 
-  // Protect memory beyond the initial size
-  byte* end = mem_map->Begin() + initial_size;
+  // Protect memory beyond the initial size.
+  byte* end = mem_map->Begin() + starting_size;
   if (capacity - initial_size > 0) {
     CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name);
   }
@@ -79,22 +89,22 @@
   // Everything is set so record in immutable structure and leave
   AllocSpace* space = new AllocSpace(name, mem_map.release(), mspace, end, growth_limit);
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
-    uint64_t duration_ms = (NanoTime() - start_time)/1000/1000;
-    LOG(INFO) << "Space::CreateAllocSpace exiting (" << duration_ms << " ms) " << *space;
+    LOG(INFO) << "Space::CreateAllocSpace exiting (" << PrettyDuration(NanoTime() - start_time)
+        << " ) " << *space;
   }
   return space;
 }
 
-void* AllocSpace::CreateMallocSpace(void* begin, size_t size, size_t capacity) {
+void* AllocSpace::CreateMallocSpace(void* begin, size_t morecore_start, size_t initial_size) {
   // clear errno to allow PLOG on error
   errno = 0;
-  // create mspace using our backing storage starting at begin and of half the specified size.
-  // Don't use an internal dlmalloc lock (as we already hold heap lock). When size is exhaused
-  // morecore will be called.
-  void* msp = create_mspace_with_base(begin, size, false /*locked*/);
+  // create mspace using our backing storage starting at begin and with a footprint of
+  // morecore_start. Don't use an internal dlmalloc lock (as we already hold heap lock). When
+  // morecore_start bytes of memory is exhaused morecore will be called.
+  void* msp = create_mspace_with_base(begin, morecore_start, false /*locked*/);
   if (msp != NULL) {
     // Do not allow morecore requests to succeed beyond the initial size of the heap
-    mspace_set_footprint_limit(msp, size);
+    mspace_set_footprint_limit(msp, initial_size);
   } else {
     PLOG(ERROR) << "create_mspace_with_base failed";
   }
@@ -175,7 +185,7 @@
 void* AllocSpace::MoreCore(intptr_t increment) {
   byte* original_end = end_;
   if (increment != 0) {
-    VLOG(heap) << "AllocSpace::MoreCore " << (increment/KB) << "KiB";
+    VLOG(heap) << "AllocSpace::MoreCore " << PrettySize(increment);
     byte* new_end = original_end + increment;
     if (increment > 0) {
 #if DEBUG_SPACES
@@ -190,6 +200,11 @@
       CHECK_GT(original_end + increment, Begin());
 #endif
       // Advise we don't need the pages and protect them
+      // TODO: by removing permissions to the pages we may be causing TLB shoot-down which can be
+      // expensive (note the same isn't true for giving permissions to a page as the protected
+      // page shouldn't be in a TLB). We should investigate performance impact of just
+      // removing ignoring the memory protection change here and in Space::CreateAllocSpace. It's
+      // likely just a useful debug feature.
       size_t size = -increment;
       CHECK_MEMORY_CALL(madvise, (new_end, size, MADV_DONTNEED), GetSpaceName());
       CHECK_MEMORY_CALL(mprotect, (new_end, size, PROT_NONE), GetSpaceName());
@@ -238,10 +253,9 @@
 }
 
 void AllocSpace::SetFootprintLimit(size_t new_size) {
-  VLOG(heap) << "AllocSpace::SetFootprintLimit " << (new_size/KB) << "KiB";
+  VLOG(heap) << "AllocSpace::SetFootprintLimit " << PrettySize(new_size);
   // Compare against the actual footprint, rather than the Size(), because the heap may not have
   // grown all the way to the allowed size yet.
-  //
   size_t current_space_size = mspace_footprint(mspace_);
   if (new_size < current_space_size) {
     // Don't let the space grow any more.
@@ -310,8 +324,8 @@
 
   ImageSpace* space = new ImageSpace(image_file_name, map.release());
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
-    uint64_t duration_ms = (NanoTime() - start_time)/1000/1000;
-    LOG(INFO) << "Space::CreateImageSpace exiting (" << duration_ms << " ms) " << *space;
+    LOG(INFO) << "Space::CreateImageSpace exiting (" << PrettyDuration(NanoTime() - start_time)
+        << ") " << *space;
   }
   return space;
 }
@@ -333,8 +347,8 @@
     current += RoundUp(obj->SizeOf(), kObjectAlignment);
   }
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
-    uint64_t duration_ms = (NanoTime() - start_time)/1000/1000;
-    LOG(INFO) << "ImageSpace::RecordImageAllocations exiting (" << duration_ms << " ms)";
+    LOG(INFO) << "ImageSpace::RecordImageAllocations exiting ("
+        << PrettyDuration(NanoTime() - start_time) << ")";
   }
 }
 
@@ -342,8 +356,7 @@
   os << (space.IsImageSpace() ? "Image" : "Alloc") << "Space["
       << "begin=" << reinterpret_cast<void*>(space.Begin())
       << ",end=" << reinterpret_cast<void*>(space.End())
-      << ",size=" << (space.Size()/KB) << "KiB"
-      << ",capacity=" << (space.Capacity()/KB) << "KiB"
+      << ",size=" << PrettySize(space.Size()) << ",capacity=" << PrettySize(space.Capacity())
       << ",name=\"" << space.GetSpaceName() << "\"]";
   return os;
 }
diff --git a/src/space.h b/src/space.h
index a932d7b..b1fb55d 100644
--- a/src/space.h
+++ b/src/space.h
@@ -78,8 +78,9 @@
     return mem_map_->Size();
   }
 
-  // Support for having an impediment (GrowthLimit) removed from the space
-  virtual size_t UnimpededCapacity() const {
+  // Size of the space without a limit on its growth. By default this is just the Capacity, but
+  // for the allocation space we support starting with a small heap and then extending it.
+  virtual size_t NonGrowthLimitCapacity() const {
     return Capacity();
   }
 
@@ -118,13 +119,13 @@
 // An alloc space is a space where objects may be allocated and garbage collected.
 class AllocSpace : public Space {
  public:
-  // Allocate num_bytes without allowing the underlying  mspace to grow
+  // Allocate num_bytes without allowing the underlying mspace to grow.
   Object* AllocWithGrowth(size_t num_bytes);
 
-  // Allocate num_bytes allowing the underlying mspace to grow
+  // Allocate num_bytes allowing the underlying mspace to grow.
   Object* AllocWithoutGrowth(size_t num_bytes);
 
-  // Return the storage space required by obj
+  // Return the storage space required by obj.
   size_t AllocationSize(const Object* obj);
 
   void Free(Object* ptr);
@@ -137,24 +138,26 @@
     return mspace_;
   }
 
-  // Hand unused pages back to the system
+  // Hand unused pages back to the system.
   void Trim();
 
   // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
-  // in use, indicated by num_bytes equaling zero
+  // in use, indicated by num_bytes equaling zero.
   void Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
             void* arg);
 
-  // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore
+  // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
   size_t GetFootprintLimit();
 
-  // Set the maximum number of bytes that the heap is allowed to obtain from the system via MoreCore
+  // Set the maximum number of bytes that the heap is allowed to obtain from the system via
+  // MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
+  // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
   void SetFootprintLimit(size_t limit);
 
-  // Removes the fork time growth limit (fence on capacity), allowing the application to allocate
-  // up to the maximum heap size.
+  // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
+  // maximum reserved size of the heap.
   void ClearGrowthLimit() {
-    growth_limit_ = UnimpededCapacity();
+    growth_limit_ = NonGrowthLimitCapacity();
   }
 
   // Override capacity so that we only return the possibly limited capacity
@@ -162,7 +165,8 @@
     return growth_limit_;
   }
 
-  virtual size_t UnimpededCapacity() const {
+  // The total amount of memory reserved for the alloc space
+  virtual size_t NonGrowthLimitCapacity() const {
     return mem_map_->End() - mem_map_->Begin();
   }
 
@@ -185,7 +189,7 @@
 
   bool Init(size_t initial_size, size_t maximum_size, size_t growth_size, byte* requested_base);
 
-  static void* CreateMallocSpace(void* base, size_t initial_size, size_t maximum_size);
+  static void* CreateMallocSpace(void* base, size_t morecore_start, size_t initial_size);
 
   // The boundary tag overhead.
   static const size_t kChunkOverhead = kWordSize;
diff --git a/src/space_test.cc b/src/space_test.cc
index f6d1191..4025805 100644
--- a/src/space_test.cc
+++ b/src/space_test.cc
@@ -3,12 +3,20 @@
 #include "space.h"
 
 #include "common_test.h"
+#include "dlmalloc.h"
 #include "globals.h"
 #include "UniquePtr.h"
 
+#include <stdint.h>
+
 namespace art {
 
-class SpaceTest : public CommonTest {};
+class SpaceTest : public CommonTest {
+ public:
+  void SizeFootPrintGrowthLimitAndTrimBody(AllocSpace* space, intptr_t object_size,
+                                           int round, size_t growth_limit);
+  void SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size);
+};
 
 TEST_F(SpaceTest, Init) {
   {
@@ -52,14 +60,14 @@
   AllocSpace* space(Space::CreateAllocSpace("test", 4 * MB, 16 * MB, 16 * MB, NULL));
   ASSERT_TRUE(space != NULL);
 
-  // Make space findable to the heap, will also delete class when runtime is cleaned up
+  // Make space findable to the heap, will also delete space when runtime is cleaned up
   Heap::AddSpace(space);
 
-  // Succeeds, fits without adjusting the max allowed footprint.
+  // Succeeds, fits without adjusting the footprint limit.
   Object* ptr1 = space->AllocWithoutGrowth(1 * MB);
   EXPECT_TRUE(ptr1 != NULL);
 
-  // Fails, requires a higher allowed footprint.
+  // Fails, requires a higher footprint limit.
   Object* ptr2 = space->AllocWithoutGrowth(8 * MB);
   EXPECT_TRUE(ptr2 == NULL);
 
@@ -67,13 +75,13 @@
   Object* ptr3 = space->AllocWithGrowth(8 * MB);
   EXPECT_TRUE(ptr3 != NULL);
 
-  // Fails, requires a higher allowed footprint.
+  // Fails, requires a higher footprint limit.
   Object* ptr4 = space->AllocWithoutGrowth(8 * MB);
-  EXPECT_FALSE(ptr4 != NULL);
+  EXPECT_TRUE(ptr4 == NULL);
 
   // Also fails, requires a higher allowed footprint.
   Object* ptr5 = space->AllocWithGrowth(8 * MB);
-  EXPECT_FALSE(ptr5 != NULL);
+  EXPECT_TRUE(ptr5 == NULL);
 
   // Release some memory.
   size_t free3 = space->AllocationSize(ptr3);
@@ -90,4 +98,236 @@
   EXPECT_LE(1U * MB, free1);
 }
 
+TEST_F(SpaceTest, AllocAndFreeList) {
+  AllocSpace* space(Space::CreateAllocSpace("test", 4 * MB, 16 * MB, 16 * MB, NULL));
+  ASSERT_TRUE(space != NULL);
+
+  // Make space findable to the heap, will also delete space when runtime is cleaned up
+  Heap::AddSpace(space);
+
+  // Succeeds, fits without adjusting the max allowed footprint.
+  Object* lots_of_objects[1024];
+  for(size_t i = 0; i < arraysize(lots_of_objects); i++) {
+    lots_of_objects[i] = space->AllocWithoutGrowth(16);
+    EXPECT_TRUE(lots_of_objects[i] != NULL);
+  }
+
+  // Release memory and check pointers are NULL
+  space->FreeList(arraysize(lots_of_objects), lots_of_objects);
+  for(size_t i = 0; i < arraysize(lots_of_objects); i++) {
+    EXPECT_TRUE(lots_of_objects[i] == NULL);
+  }
+
+  // Succeeds, fits by adjusting the max allowed footprint.
+  for(size_t i = 0; i < arraysize(lots_of_objects); i++) {
+    lots_of_objects[i] = space->AllocWithGrowth(1024);
+    EXPECT_TRUE(lots_of_objects[i] != NULL);
+  }
+
+  // Release memory and check pointers are NULL
+  space->FreeList(arraysize(lots_of_objects), lots_of_objects);
+  for(size_t i = 0; i < arraysize(lots_of_objects); i++) {
+    EXPECT_TRUE(lots_of_objects[i] == NULL);
+  }
+}
+
+static size_t test_rand() {
+  // TODO: replace this with something random yet deterministic
+  return rand();
+}
+
+void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(AllocSpace* space, intptr_t object_size,
+                                                    int round, size_t growth_limit) {
+  if (((object_size > 0 && object_size >= static_cast<intptr_t>(growth_limit))) ||
+      ((object_size < 0 && -object_size >= static_cast<intptr_t>(growth_limit)))) {
+    // No allocation can succeed
+    return;
+  }
+  // Mspace for raw dlmalloc operations
+  void* mspace = space->GetMspace();
+
+  // mspace's footprint equals amount of resources requested from system
+  size_t footprint = mspace_footprint(mspace);
+
+  // mspace must at least have its book keeping allocated
+  EXPECT_GT(footprint, 0u);
+
+  // mspace but it shouldn't exceed the initial size
+  EXPECT_LE(footprint, growth_limit);
+
+  // space's size shouldn't exceed the initial size
+  EXPECT_LE(space->Size(), growth_limit);
+
+  // this invariant should always hold or else the mspace has grown to be larger than what the
+  // space believes its size is (which will break invariants)
+  EXPECT_GE(space->Size(), footprint);
+
+  // Fill the space with lots of small objects up to the growth limit
+  size_t max_objects = (growth_limit / (object_size > 0 ? object_size : 8)) + 1;
+  UniquePtr<Object*> lots_of_objects(new Object*[max_objects]);
+  size_t last_object = 0;  // last object for which allocation succeeded
+  size_t amount_allocated = 0;  // amount of space allocated
+  for(size_t i = 0; i < max_objects; i++) {
+    size_t alloc_fails = 0;  // number of failed allocations
+    size_t max_fails = 30;  // number of times we fail allocation before giving up
+    for (; alloc_fails < max_fails; alloc_fails++) {
+      size_t alloc_size;
+      if (object_size > 0) {
+        alloc_size = object_size;
+      } else {
+        alloc_size = test_rand() % static_cast<size_t>(-object_size);
+        if (alloc_size < 8) {
+          alloc_size = 8;
+        }
+      }
+      Object* object;
+      if (round <= 1) {
+        object = space->AllocWithoutGrowth(alloc_size);
+      } else {
+        object = space->AllocWithGrowth(alloc_size);
+      }
+      footprint = mspace_footprint(mspace);
+      EXPECT_GE(space->Size(), footprint);  // invariant
+      if(object != NULL) {  // allocation succeeded
+        lots_of_objects.get()[i] = object;
+        size_t allocation_size = space->AllocationSize(object);
+        if (object_size > 0) {
+          EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
+        } else {
+          EXPECT_GE(allocation_size, 8u);
+        }
+        amount_allocated += allocation_size;
+        break;
+      }
+    }
+    if (alloc_fails == max_fails) {
+      last_object = i;
+      break;
+    }
+  }
+  CHECK_NE(last_object, 0u);  // we should have filled the space
+  EXPECT_GT(amount_allocated, 0u);
+
+  // We shouldn't have gone past the growth_limit
+  EXPECT_LE(amount_allocated, growth_limit);
+  EXPECT_LE(footprint, growth_limit);
+  EXPECT_LE(space->Size(), growth_limit);
+
+  // footprint and size should agree with amount allocated
+  EXPECT_GE(footprint, amount_allocated);
+  EXPECT_GE(space->Size(), amount_allocated);
+
+  // Release storage in a semi-adhoc manner
+  size_t free_increment = 96;
+  while(true) {
+    // Give the space a haircut
+    space->Trim();
+
+    // Bounds sanity
+    footprint = mspace_footprint(mspace);
+    EXPECT_LE(amount_allocated, growth_limit);
+    EXPECT_GE(footprint, amount_allocated);
+    EXPECT_LE(footprint, growth_limit);
+    EXPECT_GE(space->Size(), amount_allocated);
+    EXPECT_LE(space->Size(), growth_limit);
+
+    if (free_increment == 0) {
+      break;
+    }
+
+    // Free some objects
+    for(size_t i = 0; i < last_object; i += free_increment) {
+      Object* object = lots_of_objects.get()[i];
+      if (object == NULL) {
+        continue;
+      }
+      size_t allocation_size = space->AllocationSize(object);
+      if (object_size > 0) {
+        EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
+      } else {
+        EXPECT_GE(allocation_size, 8u);
+      }
+      space->Free(object);
+      lots_of_objects.get()[i] = NULL;
+      amount_allocated -= allocation_size;
+      footprint = mspace_footprint(mspace);
+      EXPECT_GE(space->Size(), footprint);  // invariant
+    }
+
+    free_increment >>= 1;
+  }
+
+  // All memory was released, try a large allocation to check freed memory is being coalesced
+  Object* large_object;
+  size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
+  if (round <= 1) {
+    large_object = space->AllocWithoutGrowth(three_quarters_space);
+  } else {
+    large_object = space->AllocWithGrowth(three_quarters_space);
+  }
+  EXPECT_TRUE(large_object != NULL);
+
+  // Sanity check footprint
+  footprint = mspace_footprint(mspace);
+  EXPECT_LE(footprint, growth_limit);
+  EXPECT_GE(space->Size(), footprint);
+  EXPECT_LE(space->Size(), growth_limit);
+
+  // Clean up
+  space->Free(large_object);
+
+  // Sanity check footprint
+  footprint = mspace_footprint(mspace);
+  EXPECT_LE(footprint, growth_limit);
+  EXPECT_GE(space->Size(), footprint);
+  EXPECT_LE(space->Size(), growth_limit);
+}
+
+void SpaceTest::SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size) {
+  size_t initial_size = 4 * MB;
+  size_t growth_limit = 8 * MB;
+  size_t capacity = 16 * MB;
+  AllocSpace* space(Space::CreateAllocSpace("test", initial_size, growth_limit, capacity, NULL));
+  ASSERT_TRUE(space != NULL);
+
+  // Basic sanity
+  EXPECT_EQ(space->Capacity(), growth_limit);
+  EXPECT_EQ(space->NonGrowthLimitCapacity(), capacity);
+
+  // Make space findable to the heap, will also delete space when runtime is cleaned up
+  Heap::AddSpace(space);
+
+  // In this round we don't allocate with growth and therefore can't grow past the initial size.
+  // This effectively makes the growth_limit the initial_size, so assert this.
+  SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 1, initial_size);
+  SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 2, growth_limit);
+  // Remove growth limit
+  space->ClearGrowthLimit();
+  EXPECT_EQ(space->Capacity(), capacity);
+  SizeFootPrintGrowthLimitAndTrimBody(space, object_size, 3, capacity);
+}
+
+#define TEST_SizeFootPrintGrowthLimitAndTrim(name, size) \
+  TEST_F(SpaceTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_##name) { \
+    SizeFootPrintGrowthLimitAndTrimDriver(size); \
+  } \
+  TEST_F(SpaceTest, SizeFootPrintGrowthLimitAndTrim_RandomAllocationsWithMax_##name) { \
+    SizeFootPrintGrowthLimitAndTrimDriver(-size); \
+  }
+
+// Each size test is its own test so that we get a fresh heap each time
+TEST_F(SpaceTest, SizeFootPrintGrowthLimitAndTrim_AllocationsOf_8B) {
+  SizeFootPrintGrowthLimitAndTrimDriver(8);
+}
+TEST_SizeFootPrintGrowthLimitAndTrim(16B, 16)
+TEST_SizeFootPrintGrowthLimitAndTrim(24B, 24)
+TEST_SizeFootPrintGrowthLimitAndTrim(32B, 32)
+TEST_SizeFootPrintGrowthLimitAndTrim(64B, 64)
+TEST_SizeFootPrintGrowthLimitAndTrim(128B, 128)
+TEST_SizeFootPrintGrowthLimitAndTrim(1KB, 1 * KB)
+TEST_SizeFootPrintGrowthLimitAndTrim(4KB, 4 * KB)
+TEST_SizeFootPrintGrowthLimitAndTrim(1MB, 1 * MB)
+TEST_SizeFootPrintGrowthLimitAndTrim(4MB, 4 * MB)
+TEST_SizeFootPrintGrowthLimitAndTrim(8MB, 8 * MB)
+
 }  // namespace art
diff --git a/src/timing_logger.h b/src/timing_logger.h
index 78b168b..b8e545e 100644
--- a/src/timing_logger.h
+++ b/src/timing_logger.h
@@ -41,9 +41,9 @@
   void Dump() {
     LOG(INFO) << name_ << ": begin";
     for (size_t i = 1; i < times_.size(); ++i) {
-      LOG(INFO) << name_ << StringPrintf(": %8lld ms, ", NsToMs(times_[i] - times_[i-1])) << labels_[i];
+      LOG(INFO) << name_ << ": " << PrettyDuration(times_[i] - times_[i-1]) << labels_[i];
     }
-    LOG(INFO) << name_ << ": end, " << NsToMs(times_.back() - times_.front()) << " ms";
+    LOG(INFO) << name_ << ": end, " << PrettyDuration(times_.back() - times_.front());
   }
 
  private:
diff --git a/src/utils.cc b/src/utils.cc
index 531424e..4f4fa87 100644
--- a/src/utils.cc
+++ b/src/utils.cc
@@ -115,10 +115,6 @@
 #endif
 }
 
-uint64_t NsToMs(uint64_t ns) {
-  return ns/1000/1000;
-}
-
 std::string PrettyDescriptor(const String* java_descriptor) {
   if (java_descriptor == NULL) {
     return "null";
@@ -272,6 +268,65 @@
   return result;
 }
 
+std::string PrettySize(size_t size_in_bytes) {
+  if ((size_in_bytes / GB) * GB == size_in_bytes) {
+    return StringPrintf("%zdGB", size_in_bytes / GB);
+  } else if ((size_in_bytes / MB) * MB == size_in_bytes) {
+    return StringPrintf("%zdMB", size_in_bytes / MB);
+  } else if ((size_in_bytes / KB) * KB == size_in_bytes) {
+    return StringPrintf("%zdKiB", size_in_bytes / KB);
+  } else {
+    return StringPrintf("%zdB", size_in_bytes);
+  }
+}
+
+std::string PrettyDuration(uint64_t nano_duration) {
+  if (nano_duration == 0) {
+    return "0";
+  } else {
+    const uint64_t one_sec = 1000 * 1000 * 1000;
+    const uint64_t one_ms  = 1000 * 1000;
+    const uint64_t one_us  = 1000;
+    const char* unit;
+    uint64_t divisor;
+    uint32_t zero_fill;
+    if (nano_duration >= one_sec) {
+      unit = "s";
+      divisor = one_sec;
+      zero_fill = 9;
+    } else if(nano_duration >= one_ms) {
+      unit = "ms";
+      divisor = one_ms;
+      zero_fill = 6;
+    } else if(nano_duration >= one_us) {
+      unit = "us";
+      divisor = one_us;
+      zero_fill = 3;
+    } else {
+      unit = "ns";
+      divisor = 1;
+      zero_fill = 0;
+    }
+    uint64_t whole_part = nano_duration / divisor;
+    uint64_t fractional_part = nano_duration % divisor;
+    if (fractional_part == 0) {
+      return StringPrintf("%llu%s", whole_part, unit);
+    } else {
+      while ((fractional_part % 1000) == 0) {
+        zero_fill -= 3;
+        fractional_part /= 1000;
+      }
+      if (zero_fill == 3) {
+        return StringPrintf("%llu.%03llu%s", whole_part, fractional_part, unit);
+      } else if (zero_fill == 6) {
+        return StringPrintf("%llu.%06llu%s", whole_part, fractional_part, unit);
+      } else {
+        return StringPrintf("%llu.%09llu%s", whole_part, fractional_part, unit);
+      }
+    }
+  }
+}
+
 std::string MangleForJni(const std::string& s) {
   std::string result;
   size_t char_count = CountModifiedUtf8Chars(s.c_str());
diff --git a/src/utils.h b/src/utils.h
index 8b22103..be7d8ba 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -183,6 +183,14 @@
 // Returns a human-readable form of the name of the given class with its class loader.
 std::string PrettyClassAndClassLoader(const Class* c);
 
+// Returns a human-readable size string. e.g. "1MB"
+std::string PrettySize(size_t size_in_bytes);
+
+// Returns a human-readable time string which prints every nanosecond while trying to limit the
+// number of trailing zeros. Prints using the largest human readable unit up to a second.
+// e.g. "1ms", "1.000000001s", "1.001us"
+std::string PrettyDuration(uint64_t nano_duration);
+
 // Performs JNI name mangling as described in section 11.3 "Linking Native Methods"
 // of the JNI spec.
 std::string MangleForJni(const std::string& s);
@@ -225,7 +233,15 @@
 uint64_t ThreadCpuMicroTime();
 
 // Converts the given number of nanoseconds to milliseconds.
-uint64_t NsToMs(uint64_t ns);
+static inline uint64_t NsToMs(uint64_t ns) {
+  return ns / 1000 / 1000;
+}
+
+// Converts the given number of milliseconds to nanoseconds
+static inline uint64_t MsToNs(uint64_t ns) {
+  return ns * 1000 * 1000;
+}
+
 
 // Splits a string using the given delimiter character into a vector of
 // strings. Empty strings will be omitted.
diff --git a/src/utils_test.cc b/src/utils_test.cc
index df47d79..1c3fe79 100644
--- a/src/utils_test.cc
+++ b/src/utils_test.cc
@@ -102,6 +102,51 @@
   EXPECT_EQ("java.lang.String.value", PrettyField(f, false));
 }
 
+TEST_F(UtilsTest, PrettySize) {
+  EXPECT_EQ("1GB", PrettySize(1 * GB));
+  EXPECT_EQ("2GB", PrettySize(2 * GB));
+  if (sizeof(size_t) > sizeof(uint32_t)) {
+    EXPECT_EQ("100GB", PrettySize(100 * GB));
+  }
+  EXPECT_EQ("1MB", PrettySize(1 * MB));
+  EXPECT_EQ("10MB", PrettySize(10 * MB));
+  EXPECT_EQ("100MB", PrettySize(100 * MB));
+  EXPECT_EQ("1KiB", PrettySize(1 * KB));
+  EXPECT_EQ("10KiB", PrettySize(10 * KB));
+  EXPECT_EQ("100KiB", PrettySize(100 * KB));
+  EXPECT_EQ("1B", PrettySize(1));
+  EXPECT_EQ("10B", PrettySize(10));
+  EXPECT_EQ("100B", PrettySize(100));
+}
+
+TEST_F(UtilsTest, PrettyDuration) {
+  const uint64_t one_sec = 1000000000;
+  const uint64_t one_ms  = 1000000;
+  const uint64_t one_us  = 1000;
+
+  EXPECT_EQ("1s", PrettyDuration(1 * one_sec));
+  EXPECT_EQ("10s", PrettyDuration(10 * one_sec));
+  EXPECT_EQ("100s", PrettyDuration(100 * one_sec));
+  EXPECT_EQ("1.001s", PrettyDuration(1 * one_sec + one_ms));
+  EXPECT_EQ("1.000001s", PrettyDuration(1 * one_sec + one_us));
+  EXPECT_EQ("1.000000001s", PrettyDuration(1 * one_sec + 1));
+
+  EXPECT_EQ("1ms", PrettyDuration(1 * one_ms));
+  EXPECT_EQ("10ms", PrettyDuration(10 * one_ms));
+  EXPECT_EQ("100ms", PrettyDuration(100 * one_ms));
+  EXPECT_EQ("1.001ms", PrettyDuration(1 * one_ms + one_us));
+  EXPECT_EQ("1.000001ms", PrettyDuration(1 * one_ms + 1));
+
+  EXPECT_EQ("1us", PrettyDuration(1 * one_us));
+  EXPECT_EQ("10us", PrettyDuration(10 * one_us));
+  EXPECT_EQ("100us", PrettyDuration(100 * one_us));
+  EXPECT_EQ("1.001us", PrettyDuration(1 * one_us + 1));
+
+  EXPECT_EQ("1ns", PrettyDuration(1));
+  EXPECT_EQ("10ns", PrettyDuration(10));
+  EXPECT_EQ("100ns", PrettyDuration(100));
+}
+
 TEST_F(UtilsTest, MangleForJni) {
   EXPECT_EQ("hello_00024world", MangleForJni("hello$world"));
   EXPECT_EQ("hello_000a9world", MangleForJni("hello\xc2\xa9world"));