Fix race condition in MemMap::MapAnonymous.

Previously we were using MAP_FIXED which introduced a serious race
condition if MAP_32BIT was set since it would possibly overwrite
an existing map at the address which we determined was free with
msync. There was a window of time after we had msynced a page where
another thread could map something at that page. The new method
avoids using MAP_FIXED and unmaps allocations which succeed but
aren't in the low 4GB when MAP_32BIT is set.

Bug: 15338094
Bug: 14974497

(cherry picked from commit c355a2a78d6ebdfdb645221275affb9136b4c667)

Change-Id: I292a74dbf2ef5ddfb8d0524ae8bc1efbcbd106c8
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 49e0b54..892e7f4 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -206,8 +206,6 @@
   // MAP_32BIT only available on x86_64.
   void* actual = MAP_FAILED;
   if (low_4gb && expected == nullptr) {
-    flags |= MAP_FIXED;
-
     bool first_run = true;
 
     for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
@@ -243,7 +241,14 @@
         actual = mmap(reinterpret_cast<void*>(ptr), page_aligned_byte_count, prot, flags, fd.get(),
                       0);
         if (actual != MAP_FAILED) {
-          break;
+          // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low
+          // 4GB. If this is the case, unmap and retry.
+          if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count < 4 * GB) {
+            break;
+          } else {
+            munmap(actual, page_aligned_byte_count);
+            actual = MAP_FAILED;
+          }
         }
       } else {
         // Skip over last page.