Add optimization for LOW_4G allocator

The optimization uses the maps_ field to skip over regions which we
know wont be large enough for the request.

Reduces time to create heap from 500-700ms to 100ms on N9.

(cherry picked from commit 1c8bef4a71612f10b89b102434f70de5a127cc74)

Bug: 20727525

Change-Id: I4fa92d58c2e135ba31a03ababff46669089bb542
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index d8c1ec1..6b9b5d1 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -314,7 +314,31 @@
   if (low_4gb && expected_ptr == nullptr) {
     bool first_run = true;
 
+    MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
     for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
+      // Use maps_ as an optimization to skip over large maps.
+      // Find the first map which is address > ptr.
+      auto it = maps_->upper_bound(reinterpret_cast<void*>(ptr));
+      if (it != maps_->begin()) {
+        auto before_it = it;
+        --before_it;
+        // Start at the end of the map before the upper bound.
+        ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd()));
+        CHECK_ALIGNED(ptr, kPageSize);
+      }
+      while (it != maps_->end()) {
+        // How much space do we have until the next map?
+        size_t delta = reinterpret_cast<uintptr_t>(it->first) - ptr;
+        // If the space may be sufficient, break out of the loop.
+        if (delta >= page_aligned_byte_count) {
+          break;
+        }
+        // Otherwise, skip to the end of the map.
+        ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd());
+        CHECK_ALIGNED(ptr, kPageSize);
+        ++it;
+      }
+
       if (4U * GB - ptr < page_aligned_byte_count) {
         // Not enough memory until 4GB.
         if (first_run) {