ART: Fix memory unmapped twice issue in ElfFile::Load(bool)

Root Cause:
  The overlapped memory region will be unmapped by
  (1) ~MemMap() of reservation MemMap (reserve) and
  (2) ~MemMap() of "reuse" MemMap (segment).
  Someone takes the memory region after (1) and it will be unmapped in (2).
  So, SIGSEGV occurs when using the unmapped memory region.

Solution:
  Fixes this issue by skip unmap "reuse" MemMap in destructor.
  And always create reservation MemMap before "reuse" MemMap. (It also solved
  the fixupELF case which does not reserve the whole needed memory region).

Bug: 16486685

(cherry picked from commit a62a588a9202f69e53fbeb3045ea8ea5ec2587f8)

Change-Id: Icb83c8e87fa168027d9d8adb34925000399d3d2a
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index e5402e1..3e6d644 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -837,6 +837,7 @@
     }
   }
 
+  bool reserved = false;
   for (Elf32_Word i = 0; i < GetProgramHeaderNum(); i++) {
     Elf32_Phdr& program_header = GetProgramHeader(i);
 
@@ -853,10 +854,8 @@
 
     // Found something to load.
 
-    // If p_vaddr is zero, it must be the first loadable segment,
-    // since they must be in order.  Since it is zero, there isn't a
-    // specific address requested, so first request a contiguous chunk
-    // of required size for all segments, but with no
+    // Before load the actual segments, reserve a contiguous chunk
+    // of required size and address for all segments, but with no
     // permissions. We'll then carve that up with the proper
     // permissions as we load the actual segments. If p_vaddr is
     // non-zero, the segments require the specific address specified,
@@ -870,18 +869,24 @@
       return false;
     }
     size_t file_length = static_cast<size_t>(temp_file_length);
-    if (program_header.p_vaddr == 0) {
+    if (!reserved) {
+      byte* reserve_base = ((program_header.p_vaddr != 0) ?
+                            reinterpret_cast<byte*>(program_header.p_vaddr) : nullptr);
       std::string reservation_name("ElfFile reservation for ");
       reservation_name += file_->GetPath();
       std::unique_ptr<MemMap> reserve(MemMap::MapAnonymous(reservation_name.c_str(),
-                                                     nullptr, GetLoadedSize(), PROT_NONE, false,
-                                                     error_msg));
+                                                           reserve_base,
+                                                           GetLoadedSize(), PROT_NONE, false,
+                                                           error_msg));
       if (reserve.get() == nullptr) {
         *error_msg = StringPrintf("Failed to allocate %s: %s",
                                   reservation_name.c_str(), error_msg->c_str());
         return false;
       }
-      base_address_ = reserve->Begin();
+      reserved = true;
+      if (reserve_base == nullptr) {
+        base_address_ = reserve->Begin();
+      }
       segments_.push_back(reserve.release());
     }
     // empty segment, nothing to map
@@ -1340,7 +1345,8 @@
   const Elf32_Shdr* symtab_sec = all.FindSectionByName(".symtab");
   Elf32_Shdr* text_sec = all.FindSectionByName(".text");
   if (debug_info == nullptr || debug_abbrev == nullptr || debug_frame == nullptr ||
-      debug_str == nullptr || text_sec == nullptr || strtab_sec == nullptr || symtab_sec == nullptr) {
+      debug_str == nullptr || text_sec == nullptr || strtab_sec == nullptr ||
+      symtab_sec == nullptr) {
     return;
   }
 #ifdef __LP64__
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 438bee9..6c7ee5b 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -130,8 +130,67 @@
 uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos();
 #endif
 
+// Return true if the address range is contained in a single /proc/self/map entry.
+static bool CheckOverlapping(uintptr_t begin,
+                             uintptr_t end,
+                             std::string* error_msg) {
+  std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
+  if (!map->Build()) {
+    *error_msg = StringPrintf("Failed to build process map");
+    return false;
+  }
+  for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
+    if ((begin >= it->start && begin < it->end)  // start of new within old
+        && (end > it->start && end <= it->end)) {  // end of new within old
+      return true;
+    }
+  }
+  std::string maps;
+  ReadFileToString("/proc/self/maps", &maps);
+  *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap "
+                            "any existing map:\n%s\n",
+                            begin, end, maps.c_str());
+  return false;
+}
+
+// Return true if the address range does not conflict with any /proc/self/maps entry.
+static bool CheckNonOverlapping(uintptr_t begin,
+                                uintptr_t end,
+                                std::string* error_msg) {
+  std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
+  if (!map->Build()) {
+    *error_msg = StringPrintf("Failed to build process map");
+    return false;
+  }
+  for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
+    if ((begin >= it->start && begin < it->end)      // start of new within old
+        || (end > it->start && end < it->end)        // end of new within old
+        || (begin <= it->start && end > it->end)) {  // start/end of new includes all of old
+      std::ostringstream map_info;
+      map_info << std::make_pair(it, map->end());
+      *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with "
+                                "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n%s",
+                                begin, end,
+                                static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end),
+                                it->name.c_str(),
+                                map_info.str().c_str());
+      return false;
+    }
+  }
+  return true;
+}
+
+// CheckMapRequest to validate a non-MAP_FAILED mmap result based on
+// the expected value, calling munmap if validation fails, giving the
+// reason in error_msg.
+//
+// If the expected_ptr is nullptr, nothing is checked beyond the fact
+// that the actual_ptr is not MAP_FAILED. However, if expected_ptr is
+// non-null, we check that pointer is the actual_ptr == expected_ptr,
+// and if not, report in error_msg what the conflict mapping was if
+// found, or a generic error in other cases.
 static bool CheckMapRequest(byte* expected_ptr, void* actual_ptr, size_t byte_count,
-                            std::ostringstream* error_msg) {
+                            std::string* error_msg) {
   // Handled first by caller for more specific error messages.
   CHECK(actual_ptr != MAP_FAILED);
 
@@ -139,6 +198,10 @@
     return true;
   }
 
+  uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
+  uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
+  uintptr_t limit = expected + byte_count;
+
   if (expected_ptr == actual_ptr) {
     return true;
   }
@@ -149,40 +212,19 @@
     PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
   }
 
-  uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
-  uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
-  uintptr_t limit = expected + byte_count;
-
-  std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
-  if (map.get() == NULL) {
-    *error_msg << StringPrintf("Failed to create process map to determine why mmap returned "
-                               "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected);
-
+  if (!CheckNonOverlapping(expected, limit, error_msg)) {
     return false;
   }
-  for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
-    if ((expected >= it->start && expected < it->end)  // start of new within old
-        || (limit > it->start && limit < it->end)      // end of new within old
-        || (expected <= it->start && limit > it->end)) {  // start/end of new includes all of old
-      *error_msg
-          << StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with "
-                          "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n",
-                          expected, limit,
-                          static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end),
-                          it->name.c_str())
-          << std::make_pair(it, map->end());
-      return false;
-    }
-  }
-  *error_msg << StringPrintf("Failed to mmap at expected address, mapped at "
-                             "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected);
+
+  *error_msg = StringPrintf("Failed to mmap at expected address, mapped at "
+                            "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected);
   return false;
 }
 
-MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count, int prot,
+MemMap* MemMap::MapAnonymous(const char* name, byte* expected_ptr, size_t byte_count, int prot,
                              bool low_4gb, std::string* error_msg) {
   if (byte_count == 0) {
-    return new MemMap(name, nullptr, 0, nullptr, 0, prot);
+    return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
   }
   size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
 
@@ -222,11 +264,11 @@
   // 4GB.
   if (low_4gb && (
       // Start out of bounds.
-      (reinterpret_cast<uintptr_t>(expected) >> 32) != 0 ||
+      (reinterpret_cast<uintptr_t>(expected_ptr) >> 32) != 0 ||
       // End out of bounds. For simplicity, this will fail for the last page of memory.
-      (reinterpret_cast<uintptr_t>(expected + page_aligned_byte_count) >> 32) != 0)) {
+      (reinterpret_cast<uintptr_t>(expected_ptr + page_aligned_byte_count) >> 32) != 0)) {
     *error_msg = StringPrintf("The requested address space (%p, %p) cannot fit in low_4gb",
-                              expected, expected + page_aligned_byte_count);
+                              expected_ptr, expected_ptr + page_aligned_byte_count);
     return nullptr;
   }
 #endif
@@ -238,7 +280,7 @@
 #if USE_ART_LOW_4G_ALLOCATOR
   // MAP_32BIT only available on x86_64.
   void* actual = MAP_FAILED;
-  if (low_4gb && expected == nullptr) {
+  if (low_4gb && expected_ptr == nullptr) {
     bool first_run = true;
 
     for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
@@ -294,18 +336,18 @@
       saved_errno = ENOMEM;
     }
   } else {
-    actual = mmap(expected, page_aligned_byte_count, prot, flags, fd.get(), 0);
+    actual = mmap(expected_ptr, page_aligned_byte_count, prot, flags, fd.get(), 0);
     saved_errno = errno;
   }
 
 #else
 #if defined(__LP64__)
-  if (low_4gb && expected == nullptr) {
+  if (low_4gb && expected_ptr == nullptr) {
     flags |= MAP_32BIT;
   }
 #endif
 
-  void* actual = mmap(expected, page_aligned_byte_count, prot, flags, fd.get(), 0);
+  void* actual = mmap(expected_ptr, page_aligned_byte_count, prot, flags, fd.get(), 0);
   saved_errno = errno;
 #endif
 
@@ -314,44 +356,51 @@
     ReadFileToString("/proc/self/maps", &maps);
 
     *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s\n%s",
-                              expected, page_aligned_byte_count, prot, flags, fd.get(),
+                              expected_ptr, page_aligned_byte_count, prot, flags, fd.get(),
                               strerror(saved_errno), maps.c_str());
     return nullptr;
   }
   std::ostringstream check_map_request_error_msg;
-  if (!CheckMapRequest(expected, actual, page_aligned_byte_count, &check_map_request_error_msg)) {
-    *error_msg = check_map_request_error_msg.str();
+  if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
     return nullptr;
   }
   return new MemMap(name, reinterpret_cast<byte*>(actual), byte_count, actual,
-                    page_aligned_byte_count, prot);
+                    page_aligned_byte_count, prot, false);
 }
 
-MemMap* MemMap::MapFileAtAddress(byte* expected, size_t byte_count, int prot, int flags, int fd,
+MemMap* MemMap::MapFileAtAddress(byte* expected_ptr, size_t byte_count, int prot, int flags, int fd,
                                  off_t start, bool reuse, const char* filename,
                                  std::string* error_msg) {
   CHECK_NE(0, prot);
   CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
+  uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
+  uintptr_t limit = expected + byte_count;
   if (reuse) {
     // reuse means it is okay that it overlaps an existing page mapping.
     // Only use this if you actually made the page reservation yourself.
-    CHECK(expected != nullptr);
+    CHECK(expected_ptr != nullptr);
+    if (!CheckOverlapping(expected, limit, error_msg)) {
+      return nullptr;
+    }
     flags |= MAP_FIXED;
   } else {
     CHECK_EQ(0, flags & MAP_FIXED);
+    if (expected_ptr != nullptr && !CheckNonOverlapping(expected, limit, error_msg)) {
+      return nullptr;
+    }
   }
 
   if (byte_count == 0) {
-    return new MemMap(filename, nullptr, 0, nullptr, 0, prot);
+    return new MemMap(filename, nullptr, 0, nullptr, 0, prot, false);
   }
   // Adjust 'offset' to be page-aligned as required by mmap.
   int page_offset = start % kPageSize;
   off_t page_aligned_offset = start - page_offset;
   // Adjust 'byte_count' to be page-aligned as we will map this anyway.
   size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
-  // The 'expected' is modified (if specified, ie non-null) to be page aligned to the file but not
-  // necessarily to virtual memory. mmap will page align 'expected' for us.
-  byte* page_aligned_expected = (expected == nullptr) ? nullptr : (expected - page_offset);
+  // The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
+  // not necessarily to virtual memory. mmap will page align 'expected' for us.
+  byte* page_aligned_expected = (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
 
   byte* actual = reinterpret_cast<byte*>(mmap(page_aligned_expected,
                                               page_aligned_byte_count,
@@ -373,21 +422,22 @@
     return nullptr;
   }
   std::ostringstream check_map_request_error_msg;
-  if (!CheckMapRequest(expected, actual, page_aligned_byte_count, &check_map_request_error_msg)) {
-    *error_msg = check_map_request_error_msg.str();
+  if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
     return nullptr;
   }
   return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count,
-                    prot);
+                    prot, reuse);
 }
 
 MemMap::~MemMap() {
   if (base_begin_ == nullptr && base_size_ == 0) {
     return;
   }
-  int result = munmap(base_begin_, base_size_);
-  if (result == -1) {
-    PLOG(FATAL) << "munmap failed";
+  if (!reuse_) {
+    int result = munmap(base_begin_, base_size_);
+    if (result == -1) {
+      PLOG(FATAL) << "munmap failed";
+    }
   }
 
   // Remove it from maps_.
@@ -405,9 +455,9 @@
 }
 
 MemMap::MemMap(const std::string& name, byte* begin, size_t size, void* base_begin,
-               size_t base_size, int prot)
+               size_t base_size, int prot, bool reuse)
     : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
-      prot_(prot) {
+      prot_(prot), reuse_(reuse) {
   if (size_ == 0) {
     CHECK(begin_ == nullptr);
     CHECK(base_begin_ == nullptr);
@@ -437,7 +487,7 @@
   byte* new_base_end = new_end;
   DCHECK_LE(new_base_end, old_base_end);
   if (new_base_end == old_base_end) {
-    return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot);
+    return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot, false);
   }
   size_ = new_end - reinterpret_cast<byte*>(begin_);
   base_size_ = new_base_end - reinterpret_cast<byte*>(base_begin_);
@@ -489,7 +539,7 @@
                               maps.c_str());
     return nullptr;
   }
-  return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot);
+  return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
 }
 
 void MemMap::MadviseDontNeedAndZero() {
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index defa6a5..872c63b 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -73,7 +73,9 @@
 
   // Map part of a file, taking care of non-page aligned offsets.  The
   // "start" offset is absolute, not relative. This version allows
-  // requesting a specific address for the base of the mapping.
+  // requesting a specific address for the base of the
+  // mapping. "reuse" allows us to create a view into an existing
+  // mapping where we do not take ownership of the memory.
   //
   // On success, returns returns a MemMap instance.  On failure, returns a NULL;
   static MemMap* MapFileAtAddress(byte* addr, size_t byte_count, int prot, int flags, int fd,
@@ -134,7 +136,7 @@
 
  private:
   MemMap(const std::string& name, byte* begin, size_t size, void* base_begin, size_t base_size,
-         int prot) LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+         int prot, bool reuse) LOCKS_EXCLUDED(Locks::mem_maps_lock_);
 
   static void DumpMaps(std::ostream& os, const std::multimap<void*, MemMap*>& mem_maps)
       LOCKS_EXCLUDED(Locks::mem_maps_lock_);
@@ -145,7 +147,7 @@
   static MemMap* GetLargestMemMapAt(void* address)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_);
 
-  std::string name_;
+  const std::string name_;
   byte* const begin_;  // Start of data.
   size_t size_;  // Length of data.
 
@@ -153,6 +155,11 @@
   size_t base_size_;  // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
   int prot_;  // Protection of the map.
 
+  // When reuse_ is true, this is just a view of an existing mapping
+  // and we do not take ownership and are not responsible for
+  // unmapping.
+  const bool reuse_;
+
 #if USE_ART_LOW_4G_ALLOCATOR
   static uintptr_t next_mem_pos_;   // Next memory location to check for low_4g extent.
 #endif