Remove unnecessary indirection from MemMap.

Avoid plain MemMap pointers being passed around by changing
the MemMap to moveable and return MemMap objects by value.
Previously we could have a valid zero-size MemMap but this
is now forbidden.

MemMap::RemapAtEnd() is changed to avoid the explicit call
to munmap(); mmap() with MAP_FIXED automatically removes
old mappings for overlapping regions.

Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Test: Pixel 2 XL boots.
Test: m test-art-target-gtest
Test: testrunner.py --target --optimizing
Change-Id: I12bd453c26a396edc20eb141bfd4dad20923f170
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 87197be..2f01766 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -221,7 +221,7 @@
   callbacks_.reset();
   verification_results_.reset();
   compiler_options_.reset();
-  image_reservation_.reset();
+  image_reservation_.Reset();
 
   CommonRuntimeTest::TearDown();
 }
@@ -323,18 +323,18 @@
   // accidentally end up colliding with the fixed memory address when we need to load the image.
   std::string error_msg;
   MemMap::Init();
-  image_reservation_.reset(MemMap::MapAnonymous("image reservation",
-                                                reinterpret_cast<uint8_t*>(ART_BASE_ADDRESS),
-                                                (size_t)120 * 1024 * 1024,  // 120MB
-                                                PROT_NONE,
-                                                false /* no need for 4gb flag with fixed mmap*/,
-                                                false /* not reusing existing reservation */,
-                                                &error_msg));
-  CHECK(image_reservation_.get() != nullptr) << error_msg;
+  image_reservation_ = MemMap::MapAnonymous("image reservation",
+                                            reinterpret_cast<uint8_t*>(ART_BASE_ADDRESS),
+                                            (size_t)120 * 1024 * 1024,  // 120MB
+                                            PROT_NONE,
+                                            false /* no need for 4gb flag with fixed mmap */,
+                                            false /* not reusing existing reservation */,
+                                            &error_msg);
+  CHECK(image_reservation_.IsValid()) << error_msg;
 }
 
 void CommonCompilerTest::UnreserveImageSpace() {
-  image_reservation_.reset();
+  image_reservation_.Reset();
 }
 
 void CommonCompilerTest::SetDexFilesForOatFile(const std::vector<const DexFile*>& dex_files) {
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index db38110..366489c 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -115,7 +115,7 @@
   std::unique_ptr<CompilerDriver> compiler_driver_;
 
  private:
-  std::unique_ptr<MemMap> image_reservation_;
+  MemMap image_reservation_;
 
   // Chunks must not move their storage after being created - use the node-based std::list.
   std::list<std::vector<uint8_t>> header_code_and_maps_chunks_;
diff --git a/compiler/utils/swap_space.cc b/compiler/utils/swap_space.cc
index 1f9ad42..dee83d1 100644
--- a/compiler/utils/swap_space.cc
+++ b/compiler/utils/swap_space.cc
@@ -141,6 +141,7 @@
           it->size -= size;
         } else {
           // Changing in place would break the std::set<> ordering, we need to remove and insert.
+          // TODO: When C++17 becomes available, use std::map<>::extract(), modify, insert.
           free_by_size_.erase(it);
           free_by_size_.insert(new_value);
         }
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 0b68620..29df067 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -669,9 +669,7 @@
       for (std::unique_ptr<const DexFile>& dex_file : opened_dex_files_) {
         dex_file.release();
       }
-      for (std::unique_ptr<MemMap>& map : opened_dex_files_maps_) {
-        map.release();
-      }
+      new std::vector<MemMap>(std::move(opened_dex_files_maps_));  // Leak MemMaps.
       for (std::unique_ptr<File>& vdex_file : vdex_files_) {
         vdex_file.release();
       }
@@ -1449,14 +1447,14 @@
         LOG(INFO) << "No " << VdexFile::kVdexNameInDmFile << " file in DexMetadata archive. "
                   << "Not doing fast verification.";
       } else {
-        std::unique_ptr<MemMap> input_file(zip_entry->MapDirectlyOrExtract(
+        MemMap input_file = zip_entry->MapDirectlyOrExtract(
             VdexFile::kVdexNameInDmFile,
             kDexMetadata,
-            &error_msg));
-        if (input_file == nullptr) {
+            &error_msg);
+        if (!input_file.IsValid()) {
           LOG(WARNING) << "Could not open vdex file in DexMetadata archive: " << error_msg;
         } else {
-          input_vdex_file_ = std::make_unique<VdexFile>(input_file.release());
+          input_vdex_file_ = std::make_unique<VdexFile>(std::move(input_file));
         }
       }
     }
@@ -1631,7 +1629,7 @@
       for (size_t i = 0, size = oat_writers_.size(); i != size; ++i) {
         rodata_.push_back(elf_writers_[i]->StartRoData());
         // Unzip or copy dex files straight to the oat file.
-        std::vector<std::unique_ptr<MemMap>> opened_dex_files_map;
+        std::vector<MemMap> opened_dex_files_map;
         std::vector<std::unique_ptr<const DexFile>> opened_dex_files;
         // No need to verify the dex file when we have a vdex file, which means it was already
         // verified.
@@ -1651,7 +1649,7 @@
         if (opened_dex_files_map.empty()) {
           DCHECK(opened_dex_files.empty());
         } else {
-          for (std::unique_ptr<MemMap>& map : opened_dex_files_map) {
+          for (MemMap& map : opened_dex_files_map) {
             opened_dex_files_maps_.push_back(std::move(map));
           }
           for (std::unique_ptr<const DexFile>& dex_file : opened_dex_files) {
@@ -1732,8 +1730,8 @@
     }
 
     // Ensure opened dex files are writable for dex-to-dex transformations.
-    for (const std::unique_ptr<MemMap>& map : opened_dex_files_maps_) {
-      if (!map->Protect(PROT_READ | PROT_WRITE)) {
+    for (MemMap& map : opened_dex_files_maps_) {
+      if (!map.Protect(PROT_READ | PROT_WRITE)) {
         PLOG(ERROR) << "Failed to make .dex files writeable.";
         return dex2oat::ReturnCode::kOther;
       }
@@ -2002,9 +2000,9 @@
     TimingLogger::ScopedTiming t("dex2oat Oat", timings_);
 
     // Sync the data to the file, in case we did dex2dex transformations.
-    for (const std::unique_ptr<MemMap>& map : opened_dex_files_maps_) {
-      if (!map->Sync()) {
-        PLOG(ERROR) << "Failed to Sync() dex2dex output. Map: " << map->GetName();
+    for (MemMap& map : opened_dex_files_maps_) {
+      if (!map.Sync()) {
+        PLOG(ERROR) << "Failed to Sync() dex2dex output. Map: " << map.GetName();
         return false;
       }
     }
@@ -2737,16 +2735,13 @@
                                 zip_filename, error_msg->c_str());
       return nullptr;
     }
-    std::unique_ptr<MemMap> input_file(zip_entry->ExtractToMemMap(zip_filename,
-                                                                  input_filename,
-                                                                  error_msg));
-    if (input_file.get() == nullptr) {
+    MemMap input_file = zip_entry->ExtractToMemMap(zip_filename, input_filename, error_msg);
+    if (!input_file.IsValid()) {
       *error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", input_filename,
                                 zip_filename, error_msg->c_str());
       return nullptr;
     }
-    const std::string input_string(reinterpret_cast<char*>(input_file->Begin()),
-                                   input_file->Size());
+    const std::string input_string(reinterpret_cast<char*>(input_file.Begin()), input_file.Size());
     std::istringstream input_stream(input_string);
     return ReadCommentedInputStream<T>(input_stream, process);
   }
@@ -2873,7 +2868,7 @@
   std::unique_ptr<linker::ImageWriter> image_writer_;
   std::unique_ptr<CompilerDriver> driver_;
 
-  std::vector<std::unique_ptr<MemMap>> opened_dex_files_maps_;
+  std::vector<MemMap> opened_dex_files_maps_;
   std::vector<std::unique_ptr<const DexFile>> opened_dex_files_;
 
   bool avoid_storing_invocation_;
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index fa8c778..440b3a4 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -252,7 +252,7 @@
       }
 
       std::vector<OutputStream*> rodata;
-      std::vector<std::unique_ptr<MemMap>> opened_dex_files_maps;
+      std::vector<MemMap> opened_dex_files_maps;
       std::vector<std::unique_ptr<const DexFile>> opened_dex_files;
       // Now that we have finalized key_value_store_, start writing the oat file.
       for (size_t i = 0, size = oat_writers.size(); i != size; ++i) {
@@ -265,7 +265,7 @@
                                             dex_file->GetLocation().c_str(),
                                             dex_file->GetLocationChecksum());
 
-        std::vector<std::unique_ptr<MemMap>> cur_opened_dex_files_maps;
+        std::vector<MemMap> cur_opened_dex_files_maps;
         std::vector<std::unique_ptr<const DexFile>> cur_opened_dex_files;
         bool dex_files_ok = oat_writers[i]->WriteAndOpenDexFiles(
             out_helper.vdex_files[i].GetFile(),
@@ -279,7 +279,7 @@
         ASSERT_TRUE(dex_files_ok);
 
         if (!cur_opened_dex_files_maps.empty()) {
-          for (std::unique_ptr<MemMap>& cur_map : cur_opened_dex_files_maps) {
+          for (MemMap& cur_map : cur_opened_dex_files_maps) {
             opened_dex_files_maps.push_back(std::move(cur_map));
           }
           for (std::unique_ptr<const DexFile>& cur_dex_file : cur_opened_dex_files) {
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index e10f9b3..27e7974 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -303,8 +303,8 @@
     }
 
     // Image data size excludes the bitmap and the header.
-    ImageHeader* const image_header = reinterpret_cast<ImageHeader*>(image_info.image_->Begin());
-    ArrayRef<const uint8_t> raw_image_data(image_info.image_->Begin() + sizeof(ImageHeader),
+    ImageHeader* const image_header = reinterpret_cast<ImageHeader*>(image_info.image_.Begin());
+    ArrayRef<const uint8_t> raw_image_data(image_info.image_.Begin() + sizeof(ImageHeader),
                                            image_header->GetImageSize() - sizeof(ImageHeader));
 
     CHECK_EQ(image_header->storage_mode_, image_storage_mode_);
@@ -362,7 +362,7 @@
     // We do not want to have a corrupted image with a valid header.
     // The header is uncompressed since it contains whether the image is compressed or not.
     image_header->data_size_ = image_data.size();
-    if (!image_file->PwriteFully(reinterpret_cast<char*>(image_info.image_->Begin()),
+    if (!image_file->PwriteFully(reinterpret_cast<char*>(image_info.image_.Begin()),
                                  sizeof(ImageHeader),
                                  0)) {
       PLOG(ERROR) << "Failed to write image file header " << image_filename;
@@ -730,14 +730,14 @@
         image_info.CreateImageSections(unused_sections), kPageSize);
 
     std::string error_msg;
-    image_info.image_.reset(MemMap::MapAnonymous("image writer image",
-                                                 nullptr,
-                                                 length,
-                                                 PROT_READ | PROT_WRITE,
-                                                 false,
-                                                 false,
-                                                 &error_msg));
-    if (UNLIKELY(image_info.image_.get() == nullptr)) {
+    image_info.image_ = MemMap::MapAnonymous("image writer image",
+                                             /* addr */ nullptr,
+                                             length,
+                                             PROT_READ | PROT_WRITE,
+                                             /* low_4gb */ false,
+                                             /* reuse */ false,
+                                             &error_msg);
+    if (UNLIKELY(!image_info.image_.IsValid())) {
       LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg;
       return false;
     }
@@ -745,7 +745,7 @@
     // Create the image bitmap, only needs to cover mirror object section which is up to image_end_.
     CHECK_LE(image_info.image_end_, length);
     image_info.image_bitmap_.reset(gc::accounting::ContinuousSpaceBitmap::Create(
-        "image bitmap", image_info.image_->Begin(), RoundUp(image_info.image_end_, kPageSize)));
+        "image bitmap", image_info.image_.Begin(), RoundUp(image_info.image_end_, kPageSize)));
     if (image_info.image_bitmap_.get() == nullptr) {
       LOG(ERROR) << "Failed to allocate memory for image bitmap";
       return false;
@@ -2025,7 +2025,7 @@
 
   // Create the header, leave 0 for data size since we will fill this in as we are writing the
   // image.
-  ImageHeader* header = new (image_info.image_->Begin()) ImageHeader(
+  ImageHeader* header = new (image_info.image_.Begin()) ImageHeader(
       PointerToLowMemUInt32(image_info.image_begin_),
       image_end,
       sections,
@@ -2163,8 +2163,8 @@
     if (relocation.oat_index != oat_index) {
       continue;
     }
-    auto* dest = image_info.image_->Begin() + relocation.offset;
-    DCHECK_GE(dest, image_info.image_->Begin() + image_info.image_end_);
+    auto* dest = image_info.image_.Begin() + relocation.offset;
+    DCHECK_GE(dest, image_info.image_.Begin() + image_info.image_end_);
     DCHECK(!IsInBootImage(pair.first));
     switch (relocation.type) {
       case NativeObjectRelocationType::kArtField: {
@@ -2219,7 +2219,7 @@
     }
   }
   // Fixup the image method roots.
-  auto* image_header = reinterpret_cast<ImageHeader*>(image_info.image_->Begin());
+  auto* image_header = reinterpret_cast<ImageHeader*>(image_info.image_.Begin());
   for (size_t i = 0; i < ImageHeader::kImageMethodsCount; ++i) {
     ArtMethod* method = image_methods_[i];
     CHECK(method != nullptr);
@@ -2235,7 +2235,7 @@
     const ImageSection& intern_table_section = image_header->GetInternedStringsSection();
     InternTable* const intern_table = image_info.intern_table_.get();
     uint8_t* const intern_table_memory_ptr =
-        image_info.image_->Begin() + intern_table_section.Offset();
+        image_info.image_.Begin() + intern_table_section.Offset();
     const size_t intern_table_bytes = intern_table->WriteToMemory(intern_table_memory_ptr);
     CHECK_EQ(intern_table_bytes, image_info.intern_table_bytes_);
     // Fixup the pointers in the newly written intern table to contain image addresses.
@@ -2260,7 +2260,7 @@
   if (image_info.class_table_bytes_ > 0u) {
     const ImageSection& class_table_section = image_header->GetClassTableSection();
     uint8_t* const class_table_memory_ptr =
-        image_info.image_->Begin() + class_table_section.Offset();
+        image_info.image_.Begin() + class_table_section.Offset();
     Thread* self = Thread::Current();
     ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
 
@@ -2342,14 +2342,14 @@
   size_t offset = GetImageOffset(obj);
   size_t oat_index = GetOatIndex(obj);
   ImageInfo& image_info = GetImageInfo(oat_index);
-  auto* dst = reinterpret_cast<Object*>(image_info.image_->Begin() + offset);
+  auto* dst = reinterpret_cast<Object*>(image_info.image_.Begin() + offset);
   DCHECK_LT(offset, image_info.image_end_);
   const auto* src = reinterpret_cast<const uint8_t*>(obj);
 
   image_info.image_bitmap_->Set(dst);  // Mark the obj as live.
 
   const size_t n = obj->SizeOf();
-  DCHECK_LE(offset + n, image_info.image_->Size());
+  DCHECK_LE(offset + n, image_info.image_.Size());
   memcpy(dst, src, n);
 
   // Write in a hash code of objects which have inflated monitors or a hash code in their monitor
@@ -2456,7 +2456,7 @@
 T* ImageWriter::NativeCopyLocation(T* obj) {
   const NativeObjectRelocation relocation = GetNativeRelocation(obj);
   const ImageInfo& image_info = GetImageInfo(relocation.oat_index);
-  return reinterpret_cast<T*>(image_info.image_->Begin() + relocation.offset);
+  return reinterpret_cast<T*>(image_info.image_.Begin() + relocation.offset);
 }
 
 class ImageWriter::NativeLocationVisitor {
@@ -3011,12 +3011,12 @@
   }
   // Calculate the offset within the image.
   ImageInfo* image_info = &image_infos_[oat_index];
-  DCHECK(image_info->image_->HasAddress(dest))
-      << "MemMap range " << static_cast<const void*>(image_info->image_->Begin())
-      << "-" << static_cast<const void*>(image_info->image_->End())
+  DCHECK(image_info->image_.HasAddress(dest))
+      << "MemMap range " << static_cast<const void*>(image_info->image_.Begin())
+      << "-" << static_cast<const void*>(image_info->image_.End())
       << " does not contain " << dest;
-  size_t offset = reinterpret_cast<const uint8_t*>(dest) - image_info->image_->Begin();
-  ImageHeader* const image_header = reinterpret_cast<ImageHeader*>(image_info->image_->Begin());
+  size_t offset = reinterpret_cast<const uint8_t*>(dest) - image_info->image_.Begin();
+  ImageHeader* const image_header = reinterpret_cast<ImageHeader*>(image_info->image_.Begin());
   size_t image_end = image_header->GetClassTableSection().End();
   DCHECK_LT(offset, image_end);
   // Calculate the location index.
diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h
index 9ab9c3e..7cf555b 100644
--- a/dex2oat/linker/image_writer.h
+++ b/dex2oat/linker/image_writer.h
@@ -307,7 +307,7 @@
     // Calculate the sum total of the bin slot sizes in [0, up_to). Defaults to all bins.
     size_t GetBinSizeSum(Bin up_to) const;
 
-    std::unique_ptr<MemMap> image_;  // Memory mapped for generating the image.
+    MemMap image_;  // Memory mapped for generating the image.
 
     // Target begin of this image. Notes: It is not valid to write here, this is the address
     // of the target image, not necessarily where image_ is mapped. The address is only valid
@@ -408,7 +408,7 @@
     size_t offset = GetImageOffset(object);
     size_t oat_index = GetOatIndex(object);
     const ImageInfo& image_info = GetImageInfo(oat_index);
-    uint8_t* dst = image_info.image_->Begin() + offset;
+    uint8_t* dst = image_info.image_.Begin() + offset;
     return reinterpret_cast<mirror::Object*>(dst);
   }
 
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index 8bac720..9045c43 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -654,7 +654,7 @@
     bool verify,
     bool update_input_vdex,
     CopyOption copy_dex_files,
-    /*out*/ std::vector<std::unique_ptr<MemMap>>* opened_dex_files_map,
+    /*out*/ std::vector<MemMap>* opened_dex_files_map,
     /*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files) {
   CHECK(write_state_ == WriteState::kAddingDexFileSources);
 
@@ -663,7 +663,7 @@
      return false;
   }
 
-  std::vector<std::unique_ptr<MemMap>> dex_files_map;
+  std::vector<MemMap> dex_files_map;
   std::vector<std::unique_ptr<const DexFile>> dex_files;
 
   // Initialize VDEX and OAT headers.
@@ -3424,12 +3424,12 @@
   const ArtDexFileLoader dex_file_loader;
   if (oat_dex_file->source_.IsZipEntry()) {
     ZipEntry* zip_entry = oat_dex_file->source_.GetZipEntry();
-    std::unique_ptr<MemMap> mem_map;
+    MemMap mem_map;
     {
       TimingLogger::ScopedTiming extract("Unzip", timings_);
-      mem_map.reset(zip_entry->ExtractToMemMap(location.c_str(), "classes.dex", &error_msg));
+      mem_map = zip_entry->ExtractToMemMap(location.c_str(), "classes.dex", &error_msg);
     }
-    if (mem_map == nullptr) {
+    if (!mem_map.IsValid()) {
       LOG(ERROR) << "Failed to extract dex file to mem map for layout: " << error_msg;
       return false;
     }
@@ -3684,7 +3684,7 @@
 bool OatWriter::OpenDexFiles(
     File* file,
     bool verify,
-    /*out*/ std::vector<std::unique_ptr<MemMap>>* opened_dex_files_map,
+    /*out*/ std::vector<MemMap>* opened_dex_files_map,
     /*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files) {
   TimingLogger::ScopedTiming split("OpenDexFiles", timings_);
 
@@ -3695,16 +3695,16 @@
 
   if (!extract_dex_files_into_vdex_) {
     std::vector<std::unique_ptr<const DexFile>> dex_files;
-    std::vector<std::unique_ptr<MemMap>> maps;
+    std::vector<MemMap> maps;
     for (OatDexFile& oat_dex_file : oat_dex_files_) {
       std::string error_msg;
-      MemMap* map = oat_dex_file.source_.GetZipEntry()->MapDirectlyOrExtract(
-          oat_dex_file.dex_file_location_data_, "zipped dex", &error_msg);
-      if (map == nullptr) {
+      maps.emplace_back(oat_dex_file.source_.GetZipEntry()->MapDirectlyOrExtract(
+          oat_dex_file.dex_file_location_data_, "zipped dex", &error_msg));
+      MemMap* map = &maps.back();
+      if (!map->IsValid()) {
         LOG(ERROR) << error_msg;
         return false;
       }
-      maps.emplace_back(map);
       // Now, open the dex file.
       const ArtDexFileLoader dex_file_loader;
       dex_files.emplace_back(dex_file_loader.Open(map->Begin(),
@@ -3735,7 +3735,7 @@
   size_t length = vdex_size_ - map_offset;
 
   std::string error_msg;
-  std::unique_ptr<MemMap> dex_files_map(MemMap::MapFile(
+  MemMap dex_files_map = MemMap::MapFile(
       length,
       PROT_READ | PROT_WRITE,
       MAP_SHARED,
@@ -3743,8 +3743,8 @@
       map_offset,
       /* low_4gb */ false,
       file->GetPath().c_str(),
-      &error_msg));
-  if (dex_files_map == nullptr) {
+      &error_msg);
+  if (!dex_files_map.IsValid()) {
     LOG(ERROR) << "Failed to mmap() dex files from oat file. File: " << file->GetPath()
                << " error: " << error_msg;
     return false;
@@ -3753,7 +3753,7 @@
   std::vector<std::unique_ptr<const DexFile>> dex_files;
   for (OatDexFile& oat_dex_file : oat_dex_files_) {
     const uint8_t* raw_dex_file =
-        dex_files_map->Begin() + oat_dex_file.dex_file_offset_ - map_offset;
+        dex_files_map.Begin() + oat_dex_file.dex_file_offset_ - map_offset;
 
     if (kIsDebugBuild) {
       // Sanity check our input files.
diff --git a/dex2oat/linker/oat_writer.h b/dex2oat/linker/oat_writer.h
index 9470f8c..5202d39 100644
--- a/dex2oat/linker/oat_writer.h
+++ b/dex2oat/linker/oat_writer.h
@@ -177,7 +177,7 @@
                             bool verify,
                             bool update_input_vdex,
                             CopyOption copy_dex_files,
-                            /*out*/ std::vector<std::unique_ptr<MemMap>>* opened_dex_files_map,
+                            /*out*/ std::vector<MemMap>* opened_dex_files_map,
                             /*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files);
   // Initialize the writer with the given parameters.
   void Initialize(const CompilerDriver* compiler_driver,
@@ -315,7 +315,7 @@
                     bool update_input_vdex);
   bool OpenDexFiles(File* file,
                     bool verify,
-                    /*out*/ std::vector<std::unique_ptr<MemMap>>* opened_dex_files_map,
+                    /*out*/ std::vector<MemMap>* opened_dex_files_map,
                     /*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files);
 
   size_t InitOatHeader(uint32_t num_dex_files, SafeMap<std::string, std::string>* key_value_store);
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index d73f10a..0264b09 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -169,7 +169,7 @@
         oat_file);
     elf_writer->Start();
     OutputStream* oat_rodata = elf_writer->StartRoData();
-    std::vector<std::unique_ptr<MemMap>> opened_dex_files_maps;
+    std::vector<MemMap> opened_dex_files_maps;
     std::vector<std::unique_ptr<const DexFile>> opened_dex_files;
     if (!oat_writer.WriteAndOpenDexFiles(
         vdex_file,
@@ -246,7 +246,7 @@
       return false;
     }
 
-    for (std::unique_ptr<MemMap>& map : opened_dex_files_maps) {
+    for (MemMap& map : opened_dex_files_maps) {
       opened_dex_files_maps_.emplace_back(std::move(map));
     }
     for (std::unique_ptr<const DexFile>& dex_file : opened_dex_files) {
@@ -261,7 +261,7 @@
 
   std::unique_ptr<QuickCompilerCallbacks> callbacks_;
 
-  std::vector<std::unique_ptr<MemMap>> opened_dex_files_maps_;
+  std::vector<MemMap> opened_dex_files_maps_;
   std::vector<std::unique_ptr<const DexFile>> opened_dex_files_;
 };
 
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index 5cea869..c417d01 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -61,6 +61,21 @@
 // All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
 static Maps* gMaps GUARDED_BY(MemMap::GetMemMapsLock()) = nullptr;
 
+// Retrieve iterator to a `gMaps` entry that is known to exist.
+Maps::iterator GetGMapsEntry(const MemMap& map) REQUIRES(MemMap::GetMemMapsLock()) {
+  DCHECK(map.IsValid());
+  DCHECK(gMaps != nullptr);
+  for (auto it = gMaps->lower_bound(map.BaseBegin()), end = gMaps->end();
+       it != end && it->first == map.BaseBegin();
+       ++it) {
+    if (it->second == &map) {
+      return it;
+    }
+  }
+  LOG(FATAL) << "MemMap not found";
+  UNREACHABLE();
+}
+
 std::ostream& operator<<(std::ostream& os, const Maps& mem_maps) {
   os << "MemMap:" << std::endl;
   for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) {
@@ -231,20 +246,21 @@
 }
 #endif
 
-MemMap* MemMap::MapAnonymous(const char* name,
-                             uint8_t* expected_ptr,
-                             size_t byte_count,
-                             int prot,
-                             bool low_4gb,
-                             bool reuse,
-                             std::string* error_msg,
-                             bool use_ashmem) {
+MemMap MemMap::MapAnonymous(const char* name,
+                            uint8_t* addr,
+                            size_t byte_count,
+                            int prot,
+                            bool low_4gb,
+                            bool reuse,
+                            std::string* error_msg,
+                            bool use_ashmem) {
 #ifndef __LP64__
   UNUSED(low_4gb);
 #endif
   use_ashmem = use_ashmem && !kIsTargetLinux && !kIsTargetFuchsia;
   if (byte_count == 0) {
-    return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
+    *error_msg = "Empty MemMap requested.";
+    return Invalid();
   }
   size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
 
@@ -252,9 +268,9 @@
   if (reuse) {
     // reuse means it is okay that it overlaps an existing page mapping.
     // Only use this if you actually made the page reservation yourself.
-    CHECK(expected_ptr != nullptr);
+    CHECK(addr != nullptr);
 
-    DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << *error_msg;
+    DCHECK(ContainedWithinExistingMap(addr, byte_count, error_msg)) << *error_msg;
     flags |= MAP_FIXED;
   }
 
@@ -296,7 +312,7 @@
   // We need to store and potentially set an error number for pretty printing of errors
   int saved_errno = 0;
 
-  void* actual = MapInternal(expected_ptr,
+  void* actual = MapInternal(addr,
                              page_aligned_byte_count,
                              prot,
                              flags,
@@ -313,28 +329,33 @@
 
       *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. "
                                     "See process maps in the log.",
-                                expected_ptr,
+                                addr,
                                 page_aligned_byte_count,
                                 prot,
                                 flags,
                                 fd.get(),
                                 strerror(saved_errno));
     }
-    return nullptr;
+    return Invalid();
   }
-  if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
-    return nullptr;
+  if (!CheckMapRequest(addr, actual, page_aligned_byte_count, error_msg)) {
+    return Invalid();
   }
-  return new MemMap(name, reinterpret_cast<uint8_t*>(actual), byte_count, actual,
-                    page_aligned_byte_count, prot, reuse);
+  return MemMap(name,
+                reinterpret_cast<uint8_t*>(actual),
+                byte_count,
+                actual,
+                page_aligned_byte_count,
+                prot,
+                reuse);
 }
 
-MemMap* MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) {
+MemMap MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) {
   if (byte_count == 0) {
-    return new MemMap(name, nullptr, 0, nullptr, 0, 0, false);
+    return Invalid();
   }
   const size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
-  return new MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */);
+  return MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */);
 }
 
 template<typename A, typename B>
@@ -342,19 +363,18 @@
   return static_cast<ptrdiff_t>(reinterpret_cast<intptr_t>(a) - reinterpret_cast<intptr_t>(b));
 }
 
-bool MemMap::ReplaceWith(MemMap** source_ptr, /*out*/std::string* error) {
+bool MemMap::ReplaceWith(MemMap* source, /*out*/std::string* error) {
 #if !HAVE_MREMAP_SYSCALL
   UNUSED(source_ptr);
   *error = "Cannot perform atomic replace because we are missing the required mremap syscall";
   return false;
 #else  // !HAVE_MREMAP_SYSCALL
-  CHECK(source_ptr != nullptr);
-  CHECK(*source_ptr != nullptr);
+  CHECK(source != nullptr);
+  CHECK(source->IsValid());
   if (!MemMap::kCanReplaceMapping) {
     *error = "Unable to perform atomic replace due to runtime environment!";
     return false;
   }
-  MemMap* source = *source_ptr;
   // neither can be reuse.
   if (source->reuse_ || reuse_) {
     *error = "One or both mappings is not a real mmap!";
@@ -406,12 +426,9 @@
   // them later.
   size_t new_base_size = std::max(source->base_size_, base_size_);
 
-  // Delete the old source, don't unmap it though (set reuse) since it is already gone.
-  *source_ptr = nullptr;
+  // Invalidate *source, don't unmap it though since it is already gone.
   size_t source_size = source->size_;
-  source->already_unmapped_ = true;
-  delete source;
-  source = nullptr;
+  source->Invalidate();
 
   size_ = source_size;
   base_size_ = new_base_size;
@@ -422,16 +439,16 @@
 #endif  // !HAVE_MREMAP_SYSCALL
 }
 
-MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
-                                 size_t byte_count,
-                                 int prot,
-                                 int flags,
-                                 int fd,
-                                 off_t start,
-                                 bool low_4gb,
-                                 bool reuse,
-                                 const char* filename,
-                                 std::string* error_msg) {
+MemMap MemMap::MapFileAtAddress(uint8_t* expected_ptr,
+                                size_t byte_count,
+                                int prot,
+                                int flags,
+                                int fd,
+                                off_t start,
+                                bool low_4gb,
+                                bool reuse,
+                                const char* filename,
+                                std::string* error_msg) {
   CHECK_NE(0, prot);
   CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
 
@@ -452,7 +469,7 @@
   }
 
   if (byte_count == 0) {
-    return new MemMap(filename, nullptr, 0, nullptr, 0, prot, false);
+    return Invalid();
   }
   // Adjust 'offset' to be page-aligned as required by mmap.
   int page_offset = start % kPageSize;
@@ -491,10 +508,10 @@
                                 static_cast<int64_t>(page_aligned_offset), filename,
                                 strerror(saved_errno));
     }
-    return nullptr;
+    return Invalid();
   }
   if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
-    return nullptr;
+    return Invalid();
   }
   if (redzone_size != 0) {
     const uint8_t *real_start = actual + page_offset;
@@ -506,14 +523,27 @@
     page_aligned_byte_count -= redzone_size;
   }
 
-  return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count,
-                    prot, reuse, redzone_size);
+  return MemMap(filename,
+                actual + page_offset,
+                byte_count,
+                actual,
+                page_aligned_byte_count,
+                prot,
+                reuse,
+                redzone_size);
+}
+
+MemMap::MemMap(MemMap&& other)
+    : MemMap() {
+  swap(other);
 }
 
 MemMap::~MemMap() {
-  if (base_begin_ == nullptr && base_size_ == 0) {
-    return;
-  }
+  Reset();
+}
+
+void MemMap::DoReset() {
+  DCHECK(IsValid());
 
   // Unlike Valgrind, AddressSanitizer requires that all manually poisoned memory is unpoisoned
   // before it is returned to the system.
@@ -533,19 +563,56 @@
     }
   }
 
+  Invalidate();
+}
+
+void MemMap::Invalidate() {
+  DCHECK(IsValid());
+
   // Remove it from gMaps.
   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
-  bool found = false;
-  DCHECK(gMaps != nullptr);
-  for (auto it = gMaps->lower_bound(base_begin_), end = gMaps->end();
-       it != end && it->first == base_begin_; ++it) {
-    if (it->second == this) {
-      found = true;
-      gMaps->erase(it);
-      break;
+  auto it = GetGMapsEntry(*this);
+  gMaps->erase(it);
+
+  // Mark it as invalid.
+  base_size_ = 0u;
+  DCHECK(!IsValid());
+}
+
+void MemMap::swap(MemMap& other) {
+  if (IsValid() || other.IsValid()) {
+    std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+    DCHECK(gMaps != nullptr);
+    auto this_it = IsValid() ? GetGMapsEntry(*this) : gMaps->end();
+    auto other_it = other.IsValid() ? GetGMapsEntry(other) : gMaps->end();
+    if (IsValid()) {
+      DCHECK(this_it != gMaps->end());
+      DCHECK_EQ(this_it->second, this);
+      this_it->second = &other;
     }
+    if (other.IsValid()) {
+      DCHECK(other_it != gMaps->end());
+      DCHECK_EQ(other_it->second, &other);
+      other_it->second = this;
+    }
+    // Swap members with the `mem_maps_lock_` held so that `base_begin_` matches
+    // with the `gMaps` key when other threads try to use `gMaps`.
+    SwapMembers(other);
+  } else {
+    SwapMembers(other);
   }
-  CHECK(found) << "MemMap not found";
+}
+
+void MemMap::SwapMembers(MemMap& other) {
+  name_.swap(other.name_);
+  std::swap(begin_, other.begin_);
+  std::swap(size_, other.size_);
+  std::swap(base_begin_, other.base_begin_);
+  std::swap(base_size_, other.base_size_);
+  std::swap(prot_, other.prot_);
+  std::swap(reuse_, other.reuse_);
+  std::swap(already_unmapped_, other.already_unmapped_);
+  std::swap(redzone_size_, other.redzone_size_);
 }
 
 MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin,
@@ -568,8 +635,11 @@
   }
 }
 
-MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
-                           std::string* error_msg, bool use_ashmem) {
+MemMap MemMap::RemapAtEnd(uint8_t* new_end,
+                          const char* tail_name,
+                          int tail_prot,
+                          std::string* error_msg,
+                          bool use_ashmem) {
   use_ashmem = use_ashmem && !kIsTargetLinux && !kIsTargetFuchsia;
   DCHECK_GE(new_end, Begin());
   DCHECK_LE(new_end, End());
@@ -583,11 +653,11 @@
   uint8_t* new_base_end = new_end;
   DCHECK_LE(new_base_end, old_base_end);
   if (new_base_end == old_base_end) {
-    return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot, false);
+    return Invalid();
   }
-  size_ = new_end - reinterpret_cast<uint8_t*>(begin_);
-  base_size_ = new_base_end - reinterpret_cast<uint8_t*>(base_begin_);
-  DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
+  size_t new_size = new_end - reinterpret_cast<uint8_t*>(begin_);
+  size_t new_base_size = new_base_end - reinterpret_cast<uint8_t*>(base_begin_);
+  DCHECK_LE(begin_ + new_size, reinterpret_cast<uint8_t*>(base_begin_) + new_base_size);
   size_t tail_size = old_end - new_end;
   uint8_t* tail_base_begin = new_base_end;
   size_t tail_base_size = old_base_end - new_base_end;
@@ -595,7 +665,7 @@
   DCHECK_ALIGNED(tail_base_size, kPageSize);
 
   unique_fd fd;
-  int flags = MAP_PRIVATE | MAP_ANONYMOUS;
+  int flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS;
   if (use_ashmem) {
     // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
     // prefixed "dalvik-".
@@ -606,23 +676,14 @@
     if (fd.get() == -1) {
       *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
                                 tail_name, strerror(errno));
-      return nullptr;
+      return Invalid();
     }
   }
 
   MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
-  // Unmap/map the tail region.
-  int result = TargetMUnmap(tail_base_begin, tail_base_size);
-  if (result == -1) {
-    PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
-    *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'. See process maps in the log.",
-                              tail_base_begin, tail_base_size, name_.c_str());
-    return nullptr;
-  }
-  // Don't cause memory allocation between the munmap and the mmap
-  // calls. Otherwise, libc (or something else) might take this memory
-  // region. Note this isn't perfect as there's no way to prevent
-  // other threads to try to take this memory region here.
+  // Note: Do not explicitly unmap the tail region, mmap() with MAP_FIXED automatically
+  // removes old mappings for the overlapping region. This makes the operation atomic
+  // and prevents other threads from racing to allocate memory in the requested region.
   uint8_t* actual = reinterpret_cast<uint8_t*>(TargetMMap(tail_base_begin,
                                                           tail_base_size,
                                                           tail_prot,
@@ -634,9 +695,18 @@
     *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
                               "maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags,
                               fd.get());
-    return nullptr;
+    return Invalid();
   }
-  return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
+  // Update *this.
+  if (new_base_size == 0u) {
+    std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+    auto it = GetGMapsEntry(*this);
+    gMaps->erase(it);
+  }
+  size_ = new_size;
+  base_size_ = new_base_size;
+  // Return the new mapping.
+  return MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
 }
 
 void MemMap::MadviseDontNeedAndZero() {
@@ -675,15 +745,15 @@
   return false;
 }
 
-bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) {
+bool MemMap::CheckNoGaps(MemMap& begin_map, MemMap& end_map) {
   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
-  CHECK(begin_map != nullptr);
-  CHECK(end_map != nullptr);
+  CHECK(begin_map.IsValid());
+  CHECK(end_map.IsValid());
   CHECK(HasMemMap(begin_map));
   CHECK(HasMemMap(end_map));
-  CHECK_LE(begin_map->BaseBegin(), end_map->BaseBegin());
-  MemMap* map = begin_map;
-  while (map->BaseBegin() != end_map->BaseBegin()) {
+  CHECK_LE(begin_map.BaseBegin(), end_map.BaseBegin());
+  MemMap* map = &begin_map;
+  while (map->BaseBegin() != end_map.BaseBegin()) {
     MemMap* next_map = GetLargestMemMapAt(map->BaseEnd());
     if (next_map == nullptr) {
       // Found a gap.
@@ -758,11 +828,11 @@
   }
 }
 
-bool MemMap::HasMemMap(MemMap* map) {
-  void* base_begin = map->BaseBegin();
+bool MemMap::HasMemMap(MemMap& map) {
+  void* base_begin = map.BaseBegin();
   for (auto it = gMaps->lower_bound(base_begin), end = gMaps->end();
        it != end && it->first == base_begin; ++it) {
-    if (it->second == map) {
+    if (it->second == &map) {
       return true;
     }
   }
@@ -1049,6 +1119,7 @@
   CHECK_EQ(size_, base_size_) << "Unsupported";
   CHECK_GT(size, static_cast<size_t>(kPageSize));
   CHECK_ALIGNED(size, kPageSize);
+  CHECK(!reuse_);
   if (IsAlignedParam(reinterpret_cast<uintptr_t>(base_begin_), size) &&
       IsAlignedParam(base_size_, size)) {
     // Already aligned.
@@ -1079,17 +1150,17 @@
         << " aligned_base_end=" << reinterpret_cast<void*>(aligned_base_end);
   }
   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+  if (base_begin < aligned_base_begin) {
+    auto it = GetGMapsEntry(*this);
+    // TODO: When C++17 becomes available, use std::map<>::extract(), modify, insert.
+    gMaps->erase(it);
+    gMaps->insert(std::make_pair(aligned_base_begin, this));
+  }
   base_begin_ = aligned_base_begin;
   base_size_ = aligned_base_size;
   begin_ = aligned_base_begin;
   size_ = aligned_base_size;
   DCHECK(gMaps != nullptr);
-  if (base_begin < aligned_base_begin) {
-    auto it = gMaps->find(base_begin);
-    CHECK(it != gMaps->end()) << "MemMap not found";
-    gMaps->erase(it);
-    gMaps->insert(std::make_pair(base_begin_, this));
-  }
 }
 
 }  // namespace art
diff --git a/libartbase/base/mem_map.h b/libartbase/base/mem_map.h
index 1979357..525fade 100644
--- a/libartbase/base/mem_map.h
+++ b/libartbase/base/mem_map.h
@@ -60,6 +60,37 @@
  public:
   static constexpr bool kCanReplaceMapping = HAVE_MREMAP_SYSCALL;
 
+  // Creates an invalid mapping.
+  MemMap() {}
+
+  // Creates an invalid mapping. Used when we want to be more explicit than MemMap().
+  static MemMap Invalid() {
+    return MemMap();
+  }
+
+  MemMap(MemMap&& other) REQUIRES(!MemMap::mem_maps_lock_);
+  MemMap& operator=(MemMap&& other) REQUIRES(!MemMap::mem_maps_lock_) {
+    Reset();
+    swap(other);
+    return *this;
+  }
+
+  // Releases the memory mapping.
+  ~MemMap() REQUIRES(!MemMap::mem_maps_lock_);
+
+  // Swap two MemMaps.
+  void swap(MemMap& other);
+
+  void Reset() {
+    if (IsValid()) {
+      DoReset();
+    }
+  }
+
+  bool IsValid() const {
+    return base_size_ != 0u;
+  }
+
   // Replace the data in this memmmap with the data in the memmap pointed to by source. The caller
   // relinquishes ownership of the source mmap.
   //
@@ -74,15 +105,14 @@
   //   * mremap must succeed when called on the mappings.
   //
   // If this call succeeds it will return true and:
-  //   * Deallocate *source
-  //   * Sets *source to nullptr
+  //   * Invalidate *source
   //   * The protection of this will remain the same.
   //   * The size of this will be the size of the source
   //   * The data in this will be the data from source.
   //
   // If this call fails it will return false and make no changes to *source or this. The ownership
   // of the source mmap is returned to the caller.
-  bool ReplaceWith(/*in-out*/MemMap** source, /*out*/std::string* error);
+  bool ReplaceWith(/*in-out*/MemMap* source, /*out*/std::string* error);
 
   // Request an anonymous region of length 'byte_count' and a requested base address.
   // Use null as the requested base address if you don't care.
@@ -92,34 +122,34 @@
   // 'name' will be used -- on systems that support it -- to give the mapping
   // a name.
   //
-  // On success, returns returns a MemMap instance.  On failure, returns null.
-  static MemMap* MapAnonymous(const char* name,
-                              uint8_t* addr,
-                              size_t byte_count,
-                              int prot,
-                              bool low_4gb,
-                              bool reuse,
-                              std::string* error_msg,
-                              bool use_ashmem = true);
+  // On success, returns returns a valid MemMap.  On failure, returns an invalid MemMap.
+  static MemMap MapAnonymous(const char* name,
+                             uint8_t* addr,
+                             size_t byte_count,
+                             int prot,
+                             bool low_4gb,
+                             bool reuse,
+                             std::string* error_msg,
+                             bool use_ashmem = true);
 
   // Create placeholder for a region allocated by direct call to mmap.
   // This is useful when we do not have control over the code calling mmap,
   // but when we still want to keep track of it in the list.
   // The region is not considered to be owned and will not be unmmaped.
-  static MemMap* MapDummy(const char* name, uint8_t* addr, size_t byte_count);
+  static MemMap MapDummy(const char* name, uint8_t* addr, size_t byte_count);
 
   // Map part of a file, taking care of non-page aligned offsets.  The
   // "start" offset is absolute, not relative.
   //
-  // On success, returns returns a MemMap instance.  On failure, returns null.
-  static MemMap* MapFile(size_t byte_count,
-                         int prot,
-                         int flags,
-                         int fd,
-                         off_t start,
-                         bool low_4gb,
-                         const char* filename,
-                         std::string* error_msg) {
+  // On success, returns returns a valid MemMap.  On failure, returns an invalid MemMap.
+  static MemMap MapFile(size_t byte_count,
+                        int prot,
+                        int flags,
+                        int fd,
+                        off_t start,
+                        bool low_4gb,
+                        const char* filename,
+                        std::string* error_msg) {
     return MapFileAtAddress(nullptr,
                             byte_count,
                             prot,
@@ -139,20 +169,17 @@
   // MapFileAtAddress fails. This helps improve performance of the fail case since reading and
   // printing /proc/maps takes several milliseconds in the worst case.
   //
-  // On success, returns returns a MemMap instance.  On failure, returns null.
-  static MemMap* MapFileAtAddress(uint8_t* addr,
-                                  size_t byte_count,
-                                  int prot,
-                                  int flags,
-                                  int fd,
-                                  off_t start,
-                                  bool low_4gb,
-                                  bool reuse,
-                                  const char* filename,
-                                  std::string* error_msg);
-
-  // Releases the memory mapping.
-  ~MemMap() REQUIRES(!MemMap::mem_maps_lock_);
+  // On success, returns returns a valid MemMap.  On failure, returns an invalid MemMap.
+  static MemMap MapFileAtAddress(uint8_t* addr,
+                                 size_t byte_count,
+                                 int prot,
+                                 int flags,
+                                 int fd,
+                                 off_t start,
+                                 bool low_4gb,
+                                 bool reuse,
+                                 const char* filename,
+                                 std::string* error_msg);
 
   const std::string& GetName() const {
     return name_;
@@ -200,13 +227,13 @@
   }
 
   // Unmap the pages at end and remap them to create another memory map.
-  MemMap* RemapAtEnd(uint8_t* new_end,
-                     const char* tail_name,
-                     int tail_prot,
-                     std::string* error_msg,
-                     bool use_ashmem = true);
+  MemMap RemapAtEnd(uint8_t* new_end,
+                    const char* tail_name,
+                    int tail_prot,
+                    std::string* error_msg,
+                    bool use_ashmem = true);
 
-  static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
+  static bool CheckNoGaps(MemMap& begin_map, MemMap& end_map)
       REQUIRES(!MemMap::mem_maps_lock_);
   static void DumpMaps(std::ostream& os, bool terse = false)
       REQUIRES(!MemMap::mem_maps_lock_);
@@ -240,9 +267,13 @@
          bool reuse,
          size_t redzone_size = 0) REQUIRES(!MemMap::mem_maps_lock_);
 
+  void DoReset();
+  void Invalidate();
+  void SwapMembers(MemMap& other);
+
   static void DumpMapsLocked(std::ostream& os, bool terse)
       REQUIRES(MemMap::mem_maps_lock_);
-  static bool HasMemMap(MemMap* map)
+  static bool HasMemMap(MemMap& map)
       REQUIRES(MemMap::mem_maps_lock_);
   static MemMap* GetLargestMemMapAt(void* address)
       REQUIRES(MemMap::mem_maps_lock_);
@@ -271,23 +302,23 @@
                               size_t byte_count,
                               std::string* error_msg);
 
-  const std::string name_;
-  uint8_t* begin_;  // Start of data. May be changed by AlignBy.
-  size_t size_;  // Length of data.
+  std::string name_;
+  uint8_t* begin_ = nullptr;    // Start of data. May be changed by AlignBy.
+  size_t size_ = 0u;            // Length of data.
 
-  void* base_begin_;  // Page-aligned base address. May be changed by AlignBy.
-  size_t base_size_;  // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
-  int prot_;  // Protection of the map.
+  void* base_begin_ = nullptr;  // Page-aligned base address. May be changed by AlignBy.
+  size_t base_size_ = 0u;       // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
+  int prot_ = 0;                // Protection of the map.
 
   // When reuse_ is true, this is just a view of an existing mapping
   // and we do not take ownership and are not responsible for
   // unmapping.
-  const bool reuse_;
+  bool reuse_ = false;
 
   // When already_unmapped_ is true the destructor will not call munmap.
-  bool already_unmapped_;
+  bool already_unmapped_ = false;
 
-  const size_t redzone_size_;
+  size_t redzone_size_ = 0u;
 
 #if USE_ART_LOW_4G_ALLOCATOR
   static uintptr_t next_mem_pos_;   // Next memory location to check for low_4g extent.
@@ -309,6 +340,10 @@
   friend class MemMapTest;  // To allow access to base_begin_ and base_size_.
 };
 
+inline void swap(MemMap& lhs, MemMap& rhs) {
+  lhs.swap(rhs);
+}
+
 std::ostream& operator<<(std::ostream& os, const MemMap& mem_map);
 
 // Zero and release pages if possible, no requirements on alignments.
diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc
index c575c7a..b2f5c72 100644
--- a/libartbase/base/mem_map_test.cc
+++ b/libartbase/base/mem_map_test.cc
@@ -30,14 +30,6 @@
 
 class MemMapTest : public CommonArtTest {
  public:
-  static uint8_t* BaseBegin(MemMap* mem_map) {
-    return reinterpret_cast<uint8_t*>(mem_map->base_begin_);
-  }
-
-  static size_t BaseSize(MemMap* mem_map) {
-    return mem_map->base_size_;
-  }
-
   static bool IsAddressMapped(void* addr) {
     bool res = msync(addr, 1, MS_SYNC) == 0;
     if (!res && errno != ENOMEM) {
@@ -60,15 +52,15 @@
   static uint8_t* GetValidMapAddress(size_t size, bool low_4gb) {
     // Find a valid map address and unmap it before returning.
     std::string error_msg;
-    std::unique_ptr<MemMap> map(MemMap::MapAnonymous("temp",
-                                                     nullptr,
-                                                     size,
-                                                     PROT_READ,
-                                                     low_4gb,
-                                                     false,
-                                                     &error_msg));
-    CHECK(map != nullptr);
-    return map->Begin();
+    MemMap map = MemMap::MapAnonymous("temp",
+                                      /* addr */ nullptr,
+                                      size,
+                                      PROT_READ,
+                                      low_4gb,
+                                      /* reuse */ false,
+                                      &error_msg);
+    CHECK(map.IsValid());
+    return map.Begin();
   }
 
   static void RemapAtEndTest(bool low_4gb) {
@@ -76,37 +68,38 @@
     // Cast the page size to size_t.
     const size_t page_size = static_cast<size_t>(kPageSize);
     // Map a two-page memory region.
-    MemMap* m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
-                                      nullptr,
-                                      2 * page_size,
-                                      PROT_READ | PROT_WRITE,
-                                      low_4gb,
-                                      false,
-                                      &error_msg);
+    MemMap m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
+                                     /* addr */ nullptr,
+                                     2 * page_size,
+                                     PROT_READ | PROT_WRITE,
+                                     low_4gb,
+                                     /* reuse */ false,
+                                     &error_msg);
     // Check its state and write to it.
-    uint8_t* base0 = m0->Begin();
+    ASSERT_TRUE(m0.IsValid());
+    uint8_t* base0 = m0.Begin();
     ASSERT_TRUE(base0 != nullptr) << error_msg;
-    size_t size0 = m0->Size();
-    EXPECT_EQ(m0->Size(), 2 * page_size);
-    EXPECT_EQ(BaseBegin(m0), base0);
-    EXPECT_EQ(BaseSize(m0), size0);
+    size_t size0 = m0.Size();
+    EXPECT_EQ(m0.Size(), 2 * page_size);
+    EXPECT_EQ(m0.BaseBegin(), base0);
+    EXPECT_EQ(m0.BaseSize(), size0);
     memset(base0, 42, 2 * page_size);
     // Remap the latter half into a second MemMap.
-    MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
-                                "MemMapTest_RemapAtEndTest_map1",
-                                PROT_READ | PROT_WRITE,
-                                &error_msg);
+    MemMap m1 = m0.RemapAtEnd(base0 + page_size,
+                              "MemMapTest_RemapAtEndTest_map1",
+                              PROT_READ | PROT_WRITE,
+                              &error_msg);
     // Check the states of the two maps.
-    EXPECT_EQ(m0->Begin(), base0) << error_msg;
-    EXPECT_EQ(m0->Size(), page_size);
-    EXPECT_EQ(BaseBegin(m0), base0);
-    EXPECT_EQ(BaseSize(m0), page_size);
-    uint8_t* base1 = m1->Begin();
-    size_t size1 = m1->Size();
+    EXPECT_EQ(m0.Begin(), base0) << error_msg;
+    EXPECT_EQ(m0.Size(), page_size);
+    EXPECT_EQ(m0.BaseBegin(), base0);
+    EXPECT_EQ(m0.BaseSize(), page_size);
+    uint8_t* base1 = m1.Begin();
+    size_t size1 = m1.Size();
     EXPECT_EQ(base1, base0 + page_size);
     EXPECT_EQ(size1, page_size);
-    EXPECT_EQ(BaseBegin(m1), base1);
-    EXPECT_EQ(BaseSize(m1), size1);
+    EXPECT_EQ(m1.BaseBegin(), base1);
+    EXPECT_EQ(m1.BaseSize(), size1);
     // Write to the second region.
     memset(base1, 43, page_size);
     // Check the contents of the two regions.
@@ -117,13 +110,18 @@
       EXPECT_EQ(base1[i], 43);
     }
     // Unmap the first region.
-    delete m0;
+    m0.Reset();
     // Make sure the second region is still accessible after the first
     // region is unmapped.
     for (size_t i = 0; i < page_size; ++i) {
       EXPECT_EQ(base1[i], 43);
     }
-    delete m1;
+    MemMap m2 = m1.RemapAtEnd(m1.Begin(),
+                              "MemMapTest_RemapAtEndTest_map1",
+                              PROT_READ | PROT_WRITE,
+                              &error_msg);
+    ASSERT_TRUE(m2.IsValid()) << error_msg;
+    ASSERT_FALSE(m1.IsValid());
   }
 
   void CommonInit() {
@@ -168,232 +166,241 @@
 #if HAVE_MREMAP_SYSCALL
 TEST_F(MemMapTest, ReplaceMapping_SameSize) {
   std::string error_msg;
-  std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
-                                                    nullptr,
-                                                    kPageSize,
-                                                    PROT_READ,
-                                                    false,
-                                                    false,
-                                                    &error_msg));
-  ASSERT_TRUE(dest != nullptr);
-  MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
-                                        nullptr,
-                                        kPageSize,
-                                        PROT_WRITE | PROT_READ,
-                                        false,
-                                        false,
-                                        &error_msg);
-  ASSERT_TRUE(source != nullptr);
-  void* source_addr = source->Begin();
-  void* dest_addr = dest->Begin();
+  MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+                                     /* addr */ nullptr,
+                                     kPageSize,
+                                     PROT_READ,
+                                     /* low_4gb */ false,
+                                     /* reuse */ false,
+                                     &error_msg);
+  ASSERT_TRUE(dest.IsValid());
+  MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+                                       /* addr */ nullptr,
+                                       kPageSize,
+                                       PROT_WRITE | PROT_READ,
+                                       /* low_4gb */ false,
+                                       /* reuse */ false,
+                                       &error_msg);
+  ASSERT_TRUE(source.IsValid());
+  void* source_addr = source.Begin();
+  void* dest_addr = dest.Begin();
   ASSERT_TRUE(IsAddressMapped(source_addr));
   ASSERT_TRUE(IsAddressMapped(dest_addr));
 
   std::vector<uint8_t> data = RandomData(kPageSize);
-  memcpy(source->Begin(), data.data(), data.size());
+  memcpy(source.Begin(), data.data(), data.size());
 
-  ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+  ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
 
   ASSERT_FALSE(IsAddressMapped(source_addr));
   ASSERT_TRUE(IsAddressMapped(dest_addr));
-  ASSERT_TRUE(source == nullptr);
+  ASSERT_FALSE(source.IsValid());
 
-  ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+  ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
 
-  ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+  ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
 }
 
 TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
   std::string error_msg;
-  std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
-                                                    nullptr,
-                                                    5 * kPageSize,  // Need to make it larger
-                                                                    // initially so we know
-                                                                    // there won't be mappings
-                                                                    // in the way we we move
-                                                                    // source.
-                                                    PROT_READ,
-                                                    false,
-                                                    false,
-                                                    &error_msg));
-  ASSERT_TRUE(dest != nullptr);
-  MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
-                                        nullptr,
-                                        3 * kPageSize,
-                                        PROT_WRITE | PROT_READ,
-                                        false,
-                                        false,
-                                        &error_msg);
-  ASSERT_TRUE(source != nullptr);
-  uint8_t* source_addr = source->Begin();
-  uint8_t* dest_addr = dest->Begin();
+  MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+                                     /* addr */ nullptr,
+                                     5 * kPageSize,  // Need to make it larger
+                                                     // initially so we know
+                                                     // there won't be mappings
+                                                     // in the way we we move
+                                                     // source.
+                                     PROT_READ,
+                                     /* low_4gb */ false,
+                                     /* reuse */ false,
+                                     &error_msg);
+  ASSERT_TRUE(dest.IsValid());
+  MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+                                       /* addr */ nullptr,
+                                       3 * kPageSize,
+                                       PROT_WRITE | PROT_READ,
+                                       /* low_4gb */ false,
+                                       /* reuse */ false,
+                                       &error_msg);
+  ASSERT_TRUE(source.IsValid());
+  uint8_t* source_addr = source.Begin();
+  uint8_t* dest_addr = dest.Begin();
   ASSERT_TRUE(IsAddressMapped(source_addr));
 
   // Fill the source with random data.
   std::vector<uint8_t> data = RandomData(3 * kPageSize);
-  memcpy(source->Begin(), data.data(), data.size());
+  memcpy(source.Begin(), data.data(), data.size());
 
   // Make the dest smaller so that we know we'll have space.
-  dest->SetSize(kPageSize);
+  dest.SetSize(kPageSize);
 
   ASSERT_TRUE(IsAddressMapped(dest_addr));
   ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
-  ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+  ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
 
-  ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+  ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
 
   ASSERT_FALSE(IsAddressMapped(source_addr));
-  ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
+  ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * kPageSize));
   ASSERT_TRUE(IsAddressMapped(dest_addr));
   ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
-  ASSERT_TRUE(source == nullptr);
+  ASSERT_FALSE(source.IsValid());
 
-  ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+  ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
 }
 
 TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
   std::string error_msg;
-  std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
-                                                    nullptr,
-                                                    3 * kPageSize,
-                                                    PROT_READ,
-                                                    false,
-                                                    false,
-                                                    &error_msg));
-  ASSERT_TRUE(dest != nullptr);
-  MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
-                                        nullptr,
-                                        kPageSize,
-                                        PROT_WRITE | PROT_READ,
-                                        false,
-                                        false,
-                                        &error_msg);
-  ASSERT_TRUE(source != nullptr);
-  uint8_t* source_addr = source->Begin();
-  uint8_t* dest_addr = dest->Begin();
+  MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+                                     /* addr */ nullptr,
+                                     3 * kPageSize,
+                                     PROT_READ,
+                                     /* low_4gb */ false,
+                                     /* reuse */ false,
+                                     &error_msg);
+  ASSERT_TRUE(dest.IsValid());
+  MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+                                       /* addr */ nullptr,
+                                       kPageSize,
+                                       PROT_WRITE | PROT_READ,
+                                       /* low_4gb */ false,
+                                       /* reuse */ false,
+                                       &error_msg);
+  ASSERT_TRUE(source.IsValid());
+  uint8_t* source_addr = source.Begin();
+  uint8_t* dest_addr = dest.Begin();
   ASSERT_TRUE(IsAddressMapped(source_addr));
   ASSERT_TRUE(IsAddressMapped(dest_addr));
   ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
-  ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
+  ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * kPageSize));
 
   std::vector<uint8_t> data = RandomData(kPageSize);
-  memcpy(source->Begin(), data.data(), kPageSize);
+  memcpy(source.Begin(), data.data(), kPageSize);
 
-  ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+  ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
 
   ASSERT_FALSE(IsAddressMapped(source_addr));
-  ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+  ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
   ASSERT_TRUE(IsAddressMapped(dest_addr));
   ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
-  ASSERT_TRUE(source == nullptr);
+  ASSERT_FALSE(source.IsValid());
 
-  ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+  ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
 }
 
 TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
   std::string error_msg;
-  std::unique_ptr<MemMap> dest(
+  MemMap dest =
       MemMap::MapAnonymous(
           "MapAnonymousEmpty-atomic-replace-dest",
-          nullptr,
+          /* addr */ nullptr,
           3 * kPageSize,  // Need to make it larger initially so we know there won't be mappings in
                           // the way we we move source.
           PROT_READ | PROT_WRITE,
-          false,
-          false,
-          &error_msg));
-  ASSERT_TRUE(dest != nullptr);
+          /* low_4gb */ false,
+          /* reuse */ false,
+          &error_msg);
+  ASSERT_TRUE(dest.IsValid());
   // Resize down to 1 page so we can remap the rest.
-  dest->SetSize(kPageSize);
+  dest.SetSize(kPageSize);
   // Create source from the last 2 pages
-  MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
-                                        dest->Begin() + kPageSize,
-                                        2 * kPageSize,
-                                        PROT_WRITE | PROT_READ,
-                                        false,
-                                        false,
-                                        &error_msg);
-  ASSERT_TRUE(source != nullptr);
-  MemMap* orig_source = source;
-  ASSERT_EQ(dest->Begin() + kPageSize, source->Begin());
-  uint8_t* source_addr = source->Begin();
-  uint8_t* dest_addr = dest->Begin();
+  MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+                                       dest.Begin() + kPageSize,
+                                       2 * kPageSize,
+                                       PROT_WRITE | PROT_READ,
+                                       /* low_4gb */ false,
+                                       /* reuse */ false,
+                                       &error_msg);
+  ASSERT_TRUE(source.IsValid());
+  ASSERT_EQ(dest.Begin() + kPageSize, source.Begin());
+  uint8_t* source_addr = source.Begin();
+  uint8_t* dest_addr = dest.Begin();
   ASSERT_TRUE(IsAddressMapped(source_addr));
 
   // Fill the source and dest with random data.
   std::vector<uint8_t> data = RandomData(2 * kPageSize);
-  memcpy(source->Begin(), data.data(), data.size());
+  memcpy(source.Begin(), data.data(), data.size());
   std::vector<uint8_t> dest_data = RandomData(kPageSize);
-  memcpy(dest->Begin(), dest_data.data(), dest_data.size());
+  memcpy(dest.Begin(), dest_data.data(), dest_data.size());
 
   ASSERT_TRUE(IsAddressMapped(dest_addr));
-  ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+  ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
 
-  ASSERT_FALSE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+  ASSERT_FALSE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
 
-  ASSERT_TRUE(source == orig_source);
   ASSERT_TRUE(IsAddressMapped(source_addr));
   ASSERT_TRUE(IsAddressMapped(dest_addr));
-  ASSERT_EQ(source->Size(), data.size());
-  ASSERT_EQ(dest->Size(), dest_data.size());
+  ASSERT_EQ(source.Size(), data.size());
+  ASSERT_EQ(dest.Size(), dest_data.size());
 
-  ASSERT_EQ(memcmp(source->Begin(), data.data(), data.size()), 0);
-  ASSERT_EQ(memcmp(dest->Begin(), dest_data.data(), dest_data.size()), 0);
-
-  delete source;
+  ASSERT_EQ(memcmp(source.Begin(), data.data(), data.size()), 0);
+  ASSERT_EQ(memcmp(dest.Begin(), dest_data.data(), dest_data.size()), 0);
 }
 #endif  // HAVE_MREMAP_SYSCALL
 
 TEST_F(MemMapTest, MapAnonymousEmpty) {
   CommonInit();
   std::string error_msg;
-  std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
-                                                   nullptr,
-                                                   0,
-                                                   PROT_READ,
-                                                   false,
-                                                   false,
-                                                   &error_msg));
-  ASSERT_TRUE(map.get() != nullptr) << error_msg;
-  ASSERT_TRUE(error_msg.empty());
-  map.reset(MemMap::MapAnonymous("MapAnonymousEmpty",
-                                 nullptr,
-                                 kPageSize,
-                                 PROT_READ | PROT_WRITE,
-                                 false,
-                                 false,
-                                 &error_msg));
-  ASSERT_TRUE(map.get() != nullptr) << error_msg;
+  MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
+                                    /* addr */ nullptr,
+                                    0,
+                                    PROT_READ,
+                                    /* low_4gb */ false,
+                                    /* reuse */ false,
+                                    &error_msg);
+  ASSERT_FALSE(map.IsValid()) << error_msg;
+  ASSERT_FALSE(error_msg.empty());
+
+  error_msg.clear();
+  map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
+                             /* addr */ nullptr,
+                             kPageSize,
+                             PROT_READ | PROT_WRITE,
+                             /* low_4gb */ false,
+                             /* reuse */ false,
+                             &error_msg);
+  ASSERT_TRUE(map.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
 }
 
 TEST_F(MemMapTest, MapAnonymousFailNullError) {
   CommonInit();
   // Test that we don't crash with a null error_str when mapping at an invalid location.
-  std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousInvalid",
-                                                    reinterpret_cast<uint8_t*>(kPageSize),
-                                                    0x20000,
-                                                    PROT_READ | PROT_WRITE,
-                                                    false,
-                                                    false,
-                                                    nullptr));
-  ASSERT_EQ(nullptr, map.get());
+  MemMap map = MemMap::MapAnonymous("MapAnonymousInvalid",
+                                    reinterpret_cast<uint8_t*>(kPageSize),
+                                    0x20000,
+                                    PROT_READ | PROT_WRITE,
+                                    /* low_4gb */ false,
+                                    /* reuse */ false,
+                                    nullptr);
+  ASSERT_FALSE(map.IsValid());
 }
 
 #ifdef __LP64__
 TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
   CommonInit();
   std::string error_msg;
-  std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
-                                                   nullptr,
-                                                   kPageSize,
-                                                   PROT_READ | PROT_WRITE,
-                                                   true,
-                                                   false,
-                                                   &error_msg));
-  ASSERT_TRUE(map.get() != nullptr) << error_msg;
+  MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
+                                    /* addr */ nullptr,
+                                    0,
+                                    PROT_READ,
+                                    /* low_4gb */ true,
+                                    /* reuse */ false,
+                                    &error_msg);
+  ASSERT_FALSE(map.IsValid()) << error_msg;
+  ASSERT_FALSE(error_msg.empty());
+
+  error_msg.clear();
+  map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
+                             /* addr */ nullptr,
+                             kPageSize,
+                             PROT_READ | PROT_WRITE,
+                             /* low_4gb */ true,
+                             /* reuse */ false,
+                             &error_msg);
+  ASSERT_TRUE(map.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
-  ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
+  ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
 }
 TEST_F(MemMapTest, MapFile32Bit) {
   CommonInit();
@@ -402,18 +409,18 @@
   constexpr size_t kMapSize = kPageSize;
   std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
   ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
-  std::unique_ptr<MemMap> map(MemMap::MapFile(/*byte_count*/kMapSize,
-                                              PROT_READ,
-                                              MAP_PRIVATE,
-                                              scratch_file.GetFd(),
-                                              /*start*/0,
-                                              /*low_4gb*/true,
-                                              scratch_file.GetFilename().c_str(),
-                                              &error_msg));
-  ASSERT_TRUE(map != nullptr) << error_msg;
+  MemMap map = MemMap::MapFile(/*byte_count*/kMapSize,
+                               PROT_READ,
+                               MAP_PRIVATE,
+                               scratch_file.GetFd(),
+                               /*start*/0,
+                               /*low_4gb*/true,
+                               scratch_file.GetFilename().c_str(),
+                               &error_msg);
+  ASSERT_TRUE(map.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
-  ASSERT_EQ(map->Size(), kMapSize);
-  ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
+  ASSERT_EQ(map.Size(), kMapSize);
+  ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
 }
 #endif
 
@@ -423,36 +430,36 @@
   // Find a valid address.
   uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb*/false);
   // Map at an address that should work, which should succeed.
-  std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
-                                                    valid_address,
-                                                    kPageSize,
-                                                    PROT_READ | PROT_WRITE,
-                                                    false,
-                                                    false,
-                                                    &error_msg));
-  ASSERT_TRUE(map0.get() != nullptr) << error_msg;
+  MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
+                                     valid_address,
+                                     kPageSize,
+                                     PROT_READ | PROT_WRITE,
+                                     /* low_4gb */ false,
+                                     /* reuse */ false,
+                                     &error_msg);
+  ASSERT_TRUE(map0.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
-  ASSERT_TRUE(map0->BaseBegin() == valid_address);
+  ASSERT_TRUE(map0.BaseBegin() == valid_address);
   // Map at an unspecified address, which should succeed.
-  std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
-                                                    nullptr,
-                                                    kPageSize,
-                                                    PROT_READ | PROT_WRITE,
-                                                    false,
-                                                    false,
-                                                    &error_msg));
-  ASSERT_TRUE(map1.get() != nullptr) << error_msg;
+  MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
+                                     /* addr */ nullptr,
+                                     kPageSize,
+                                     PROT_READ | PROT_WRITE,
+                                     /* low_4gb */ false,
+                                     /* reuse */ false,
+                                     &error_msg);
+  ASSERT_TRUE(map1.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
-  ASSERT_TRUE(map1->BaseBegin() != nullptr);
+  ASSERT_TRUE(map1.BaseBegin() != nullptr);
   // Attempt to map at the same address, which should fail.
-  std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
-                                                    reinterpret_cast<uint8_t*>(map1->BaseBegin()),
-                                                    kPageSize,
-                                                    PROT_READ | PROT_WRITE,
-                                                    false,
-                                                    false,
-                                                    &error_msg));
-  ASSERT_TRUE(map2.get() == nullptr) << error_msg;
+  MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
+                                     reinterpret_cast<uint8_t*>(map1.BaseBegin()),
+                                     kPageSize,
+                                     PROT_READ | PROT_WRITE,
+                                     /* low_4gb */ false,
+                                     /* reuse */ false,
+                                     &error_msg);
+  ASSERT_FALSE(map2.IsValid()) << error_msg;
   ASSERT_TRUE(!error_msg.empty());
 }
 
@@ -480,23 +487,23 @@
   // Try all addresses starting from 2GB to 4GB.
   size_t start_addr = 2 * GB;
   std::string error_msg;
-  std::unique_ptr<MemMap> map;
+  MemMap map;
   for (; start_addr <= std::numeric_limits<uint32_t>::max() - size; start_addr += size) {
-    map.reset(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
-                                   reinterpret_cast<uint8_t*>(start_addr),
-                                   size,
-                                   PROT_READ | PROT_WRITE,
-                                   /*low_4gb*/true,
-                                   false,
-                                   &error_msg));
-    if (map != nullptr) {
+    map = MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
+                               reinterpret_cast<uint8_t*>(start_addr),
+                               size,
+                               PROT_READ | PROT_WRITE,
+                               /*low_4gb*/ true,
+                               /* reuse */ false,
+                               &error_msg);
+    if (map.IsValid()) {
       break;
     }
   }
-  ASSERT_TRUE(map.get() != nullptr) << error_msg;
-  ASSERT_GE(reinterpret_cast<uintptr_t>(map->End()), 2u * GB);
+  ASSERT_TRUE(map.IsValid()) << error_msg;
+  ASSERT_GE(reinterpret_cast<uintptr_t>(map.End()), 2u * GB);
   ASSERT_TRUE(error_msg.empty());
-  ASSERT_EQ(BaseBegin(map.get()), reinterpret_cast<void*>(start_addr));
+  ASSERT_EQ(map.BaseBegin(), reinterpret_cast<void*>(start_addr));
 }
 
 TEST_F(MemMapTest, MapAnonymousOverflow) {
@@ -504,14 +511,14 @@
   std::string error_msg;
   uintptr_t ptr = 0;
   ptr -= kPageSize;  // Now it's close to the top.
-  std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousOverflow",
-                                                   reinterpret_cast<uint8_t*>(ptr),
-                                                   2 * kPageSize,  // brings it over the top.
-                                                   PROT_READ | PROT_WRITE,
-                                                   false,
-                                                   false,
-                                                   &error_msg));
-  ASSERT_EQ(nullptr, map.get());
+  MemMap map = MemMap::MapAnonymous("MapAnonymousOverflow",
+                                    reinterpret_cast<uint8_t*>(ptr),
+                                    2 * kPageSize,  // brings it over the top.
+                                    PROT_READ | PROT_WRITE,
+                                    /* low_4gb */ false,
+                                    /* reuse */ false,
+                                    &error_msg);
+  ASSERT_FALSE(map.IsValid());
   ASSERT_FALSE(error_msg.empty());
 }
 
@@ -519,29 +526,29 @@
 TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
   CommonInit();
   std::string error_msg;
-  std::unique_ptr<MemMap> map(
+  MemMap map =
       MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
                            reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
                            kPageSize,
                            PROT_READ | PROT_WRITE,
-                           true,
-                           false,
-                           &error_msg));
-  ASSERT_EQ(nullptr, map.get());
+                           /* low_4gb */ true,
+                           /* reuse */ false,
+                           &error_msg);
+  ASSERT_FALSE(map.IsValid());
   ASSERT_FALSE(error_msg.empty());
 }
 
 TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
   CommonInit();
   std::string error_msg;
-  std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
-                                                   reinterpret_cast<uint8_t*>(0xF0000000),
-                                                   0x20000000,
-                                                   PROT_READ | PROT_WRITE,
-                                                   true,
-                                                   false,
-                                                   &error_msg));
-  ASSERT_EQ(nullptr, map.get());
+  MemMap map = MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
+                                    reinterpret_cast<uint8_t*>(0xF0000000),
+                                    0x20000000,
+                                    PROT_READ | PROT_WRITE,
+                                    /* low_4gb */ true,
+                                    /* reuse */ false,
+                                    &error_msg);
+  ASSERT_FALSE(map.IsValid());
   ASSERT_FALSE(error_msg.empty());
 }
 #endif
@@ -549,23 +556,23 @@
 TEST_F(MemMapTest, MapAnonymousReuse) {
   CommonInit();
   std::string error_msg;
-  std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousReserve",
-                                                   nullptr,
-                                                   0x20000,
-                                                   PROT_READ | PROT_WRITE,
-                                                   false,
-                                                   false,
-                                                   &error_msg));
-  ASSERT_NE(nullptr, map.get());
+  MemMap map = MemMap::MapAnonymous("MapAnonymousReserve",
+                                    nullptr,
+                                    0x20000,
+                                    PROT_READ | PROT_WRITE,
+                                    /* low_4gb */ false,
+                                    /* reuse */ false,
+                                    &error_msg);
+  ASSERT_TRUE(map.IsValid());
   ASSERT_TRUE(error_msg.empty());
-  std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymousReused",
-                                                    reinterpret_cast<uint8_t*>(map->BaseBegin()),
-                                                    0x10000,
-                                                    PROT_READ | PROT_WRITE,
-                                                    false,
-                                                    true,
-                                                    &error_msg));
-  ASSERT_NE(nullptr, map2.get());
+  MemMap map2 = MemMap::MapAnonymous("MapAnonymousReused",
+                                     reinterpret_cast<uint8_t*>(map.BaseBegin()),
+                                     0x10000,
+                                     PROT_READ | PROT_WRITE,
+                                     /* low_4gb */ false,
+                                     /* reuse */ true,
+                                     &error_msg);
+  ASSERT_TRUE(map2.IsValid());
   ASSERT_TRUE(error_msg.empty());
 }
 
@@ -574,65 +581,65 @@
   std::string error_msg;
   constexpr size_t kNumPages = 3;
   // Map a 3-page mem map.
-  std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymous0",
-                                                   nullptr,
-                                                   kPageSize * kNumPages,
-                                                   PROT_READ | PROT_WRITE,
-                                                   false,
-                                                   false,
-                                                   &error_msg));
-  ASSERT_TRUE(map.get() != nullptr) << error_msg;
+  MemMap map = MemMap::MapAnonymous("MapAnonymous0",
+                                    /* addr */ nullptr,
+                                    kPageSize * kNumPages,
+                                    PROT_READ | PROT_WRITE,
+                                    /* low_4gb */ false,
+                                    /* reuse */ false,
+                                    &error_msg);
+  ASSERT_TRUE(map.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
   // Record the base address.
-  uint8_t* map_base = reinterpret_cast<uint8_t*>(map->BaseBegin());
+  uint8_t* map_base = reinterpret_cast<uint8_t*>(map.BaseBegin());
   // Unmap it.
-  map.reset();
+  map.Reset();
 
   // Map at the same address, but in page-sized separate mem maps,
   // assuming the space at the address is still available.
-  std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
-                                                    map_base,
-                                                    kPageSize,
-                                                    PROT_READ | PROT_WRITE,
-                                                    false,
-                                                    false,
-                                                    &error_msg));
-  ASSERT_TRUE(map0.get() != nullptr) << error_msg;
+  MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
+                                     map_base,
+                                     kPageSize,
+                                     PROT_READ | PROT_WRITE,
+                                     /* low_4gb */ false,
+                                     /* reuse */ false,
+                                     &error_msg);
+  ASSERT_TRUE(map0.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
-  std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
-                                                    map_base + kPageSize,
-                                                    kPageSize,
-                                                    PROT_READ | PROT_WRITE,
-                                                    false,
-                                                    false,
-                                                    &error_msg));
-  ASSERT_TRUE(map1.get() != nullptr) << error_msg;
+  MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
+                                     map_base + kPageSize,
+                                     kPageSize,
+                                     PROT_READ | PROT_WRITE,
+                                     /* low_4gb */ false,
+                                     /* reuse */ false,
+                                     &error_msg);
+  ASSERT_TRUE(map1.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
-  std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
-                                                    map_base + kPageSize * 2,
-                                                    kPageSize,
-                                                    PROT_READ | PROT_WRITE,
-                                                    false,
-                                                    false,
-                                                    &error_msg));
-  ASSERT_TRUE(map2.get() != nullptr) << error_msg;
+  MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
+                                     map_base + kPageSize * 2,
+                                     kPageSize,
+                                     PROT_READ | PROT_WRITE,
+                                     /* low_4gb */ false,
+                                     /* reuse */ false,
+                                     &error_msg);
+  ASSERT_TRUE(map2.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
 
   // One-map cases.
-  ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map0.get()));
-  ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map1.get()));
-  ASSERT_TRUE(MemMap::CheckNoGaps(map2.get(), map2.get()));
+  ASSERT_TRUE(MemMap::CheckNoGaps(map0, map0));
+  ASSERT_TRUE(MemMap::CheckNoGaps(map1, map1));
+  ASSERT_TRUE(MemMap::CheckNoGaps(map2, map2));
 
   // Two or three-map cases.
-  ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map1.get()));
-  ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map2.get()));
-  ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map2.get()));
+  ASSERT_TRUE(MemMap::CheckNoGaps(map0, map1));
+  ASSERT_TRUE(MemMap::CheckNoGaps(map1, map2));
+  ASSERT_TRUE(MemMap::CheckNoGaps(map0, map2));
 
   // Unmap the middle one.
-  map1.reset();
+  map1.Reset();
 
   // Should return false now that there's a gap in the middle.
-  ASSERT_FALSE(MemMap::CheckNoGaps(map0.get(), map2.get()));
+  ASSERT_FALSE(MemMap::CheckNoGaps(map0, map2));
 }
 
 TEST_F(MemMapTest, AlignBy) {
@@ -641,52 +648,53 @@
   // Cast the page size to size_t.
   const size_t page_size = static_cast<size_t>(kPageSize);
   // Map a region.
-  std::unique_ptr<MemMap> m0(MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
-                                                  nullptr,
-                                                  14 * page_size,
-                                                  PROT_READ | PROT_WRITE,
-                                                  false,
-                                                  false,
-                                                  &error_msg));
-  uint8_t* base0 = m0->Begin();
+  MemMap m0 = MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
+                                   /* addr */ nullptr,
+                                   14 * page_size,
+                                   PROT_READ | PROT_WRITE,
+                                   /* low_4gb */ false,
+                                   /* reuse */ false,
+                                   &error_msg);
+  ASSERT_TRUE(m0.IsValid());
+  uint8_t* base0 = m0.Begin();
   ASSERT_TRUE(base0 != nullptr) << error_msg;
-  ASSERT_EQ(m0->Size(), 14 * page_size);
-  ASSERT_EQ(BaseBegin(m0.get()), base0);
-  ASSERT_EQ(BaseSize(m0.get()), m0->Size());
+  ASSERT_EQ(m0.Size(), 14 * page_size);
+  ASSERT_EQ(m0.BaseBegin(), base0);
+  ASSERT_EQ(m0.BaseSize(), m0.Size());
 
   // Break it into several regions by using RemapAtEnd.
-  std::unique_ptr<MemMap> m1(m0->RemapAtEnd(base0 + 3 * page_size,
-                                            "MemMapTest_AlignByTest_map1",
-                                            PROT_READ | PROT_WRITE,
-                                            &error_msg));
-  uint8_t* base1 = m1->Begin();
+  MemMap m1 = m0.RemapAtEnd(base0 + 3 * page_size,
+                            "MemMapTest_AlignByTest_map1",
+                            PROT_READ | PROT_WRITE,
+                            &error_msg);
+  uint8_t* base1 = m1.Begin();
   ASSERT_TRUE(base1 != nullptr) << error_msg;
   ASSERT_EQ(base1, base0 + 3 * page_size);
-  ASSERT_EQ(m0->Size(), 3 * page_size);
+  ASSERT_EQ(m0.Size(), 3 * page_size);
 
-  std::unique_ptr<MemMap> m2(m1->RemapAtEnd(base1 + 4 * page_size,
-                                            "MemMapTest_AlignByTest_map2",
-                                            PROT_READ | PROT_WRITE,
-                                            &error_msg));
-  uint8_t* base2 = m2->Begin();
+  MemMap m2 = m1.RemapAtEnd(base1 + 4 * page_size,
+                            "MemMapTest_AlignByTest_map2",
+                            PROT_READ | PROT_WRITE,
+                            &error_msg);
+  uint8_t* base2 = m2.Begin();
   ASSERT_TRUE(base2 != nullptr) << error_msg;
   ASSERT_EQ(base2, base1 + 4 * page_size);
-  ASSERT_EQ(m1->Size(), 4 * page_size);
+  ASSERT_EQ(m1.Size(), 4 * page_size);
 
-  std::unique_ptr<MemMap> m3(m2->RemapAtEnd(base2 + 3 * page_size,
-                                            "MemMapTest_AlignByTest_map1",
-                                            PROT_READ | PROT_WRITE,
-                                            &error_msg));
-  uint8_t* base3 = m3->Begin();
+  MemMap m3 = m2.RemapAtEnd(base2 + 3 * page_size,
+                            "MemMapTest_AlignByTest_map1",
+                            PROT_READ | PROT_WRITE,
+                            &error_msg);
+  uint8_t* base3 = m3.Begin();
   ASSERT_TRUE(base3 != nullptr) << error_msg;
   ASSERT_EQ(base3, base2 + 3 * page_size);
-  ASSERT_EQ(m2->Size(), 3 * page_size);
-  ASSERT_EQ(m3->Size(), 4 * page_size);
+  ASSERT_EQ(m2.Size(), 3 * page_size);
+  ASSERT_EQ(m3.Size(), 4 * page_size);
 
-  uint8_t* end0 = base0 + m0->Size();
-  uint8_t* end1 = base1 + m1->Size();
-  uint8_t* end2 = base2 + m2->Size();
-  uint8_t* end3 = base3 + m3->Size();
+  uint8_t* end0 = base0 + m0.Size();
+  uint8_t* end1 = base1 + m1.Size();
+  uint8_t* end2 = base2 + m2.Size();
+  uint8_t* end3 = base3 + m3.Size();
 
   ASSERT_EQ(static_cast<size_t>(end3 - base0), 14 * page_size);
 
@@ -703,39 +711,39 @@
   }
 
   // Align by 2 * page_size;
-  m0->AlignBy(2 * page_size);
-  m1->AlignBy(2 * page_size);
-  m2->AlignBy(2 * page_size);
-  m3->AlignBy(2 * page_size);
+  m0.AlignBy(2 * page_size);
+  m1.AlignBy(2 * page_size);
+  m2.AlignBy(2 * page_size);
+  m3.AlignBy(2 * page_size);
 
-  EXPECT_TRUE(IsAlignedParam(m0->Begin(), 2 * page_size));
-  EXPECT_TRUE(IsAlignedParam(m1->Begin(), 2 * page_size));
-  EXPECT_TRUE(IsAlignedParam(m2->Begin(), 2 * page_size));
-  EXPECT_TRUE(IsAlignedParam(m3->Begin(), 2 * page_size));
+  EXPECT_TRUE(IsAlignedParam(m0.Begin(), 2 * page_size));
+  EXPECT_TRUE(IsAlignedParam(m1.Begin(), 2 * page_size));
+  EXPECT_TRUE(IsAlignedParam(m2.Begin(), 2 * page_size));
+  EXPECT_TRUE(IsAlignedParam(m3.Begin(), 2 * page_size));
 
-  EXPECT_TRUE(IsAlignedParam(m0->Begin() + m0->Size(), 2 * page_size));
-  EXPECT_TRUE(IsAlignedParam(m1->Begin() + m1->Size(), 2 * page_size));
-  EXPECT_TRUE(IsAlignedParam(m2->Begin() + m2->Size(), 2 * page_size));
-  EXPECT_TRUE(IsAlignedParam(m3->Begin() + m3->Size(), 2 * page_size));
+  EXPECT_TRUE(IsAlignedParam(m0.Begin() + m0.Size(), 2 * page_size));
+  EXPECT_TRUE(IsAlignedParam(m1.Begin() + m1.Size(), 2 * page_size));
+  EXPECT_TRUE(IsAlignedParam(m2.Begin() + m2.Size(), 2 * page_size));
+  EXPECT_TRUE(IsAlignedParam(m3.Begin() + m3.Size(), 2 * page_size));
 
   if (IsAlignedParam(base0, 2 * page_size)) {
-    EXPECT_EQ(m0->Begin(), base0);
-    EXPECT_EQ(m0->Begin() + m0->Size(), end0 - page_size);
-    EXPECT_EQ(m1->Begin(), base1 + page_size);
-    EXPECT_EQ(m1->Begin() + m1->Size(), end1 - page_size);
-    EXPECT_EQ(m2->Begin(), base2 + page_size);
-    EXPECT_EQ(m2->Begin() + m2->Size(), end2);
-    EXPECT_EQ(m3->Begin(), base3);
-    EXPECT_EQ(m3->Begin() + m3->Size(), end3);
+    EXPECT_EQ(m0.Begin(), base0);
+    EXPECT_EQ(m0.Begin() + m0.Size(), end0 - page_size);
+    EXPECT_EQ(m1.Begin(), base1 + page_size);
+    EXPECT_EQ(m1.Begin() + m1.Size(), end1 - page_size);
+    EXPECT_EQ(m2.Begin(), base2 + page_size);
+    EXPECT_EQ(m2.Begin() + m2.Size(), end2);
+    EXPECT_EQ(m3.Begin(), base3);
+    EXPECT_EQ(m3.Begin() + m3.Size(), end3);
   } else {
-    EXPECT_EQ(m0->Begin(), base0 + page_size);
-    EXPECT_EQ(m0->Begin() + m0->Size(), end0);
-    EXPECT_EQ(m1->Begin(), base1);
-    EXPECT_EQ(m1->Begin() + m1->Size(), end1);
-    EXPECT_EQ(m2->Begin(), base2);
-    EXPECT_EQ(m2->Begin() + m2->Size(), end2 - page_size);
-    EXPECT_EQ(m3->Begin(), base3 + page_size);
-    EXPECT_EQ(m3->Begin() + m3->Size(), end3 - page_size);
+    EXPECT_EQ(m0.Begin(), base0 + page_size);
+    EXPECT_EQ(m0.Begin() + m0.Size(), end0);
+    EXPECT_EQ(m1.Begin(), base1);
+    EXPECT_EQ(m1.Begin() + m1.Size(), end1);
+    EXPECT_EQ(m2.Begin(), base2);
+    EXPECT_EQ(m2.Begin() + m2.Size(), end2 - page_size);
+    EXPECT_EQ(m3.Begin(), base3 + page_size);
+    EXPECT_EQ(m3.Begin() + m3.Size(), end3 - page_size);
   }
 }
 
diff --git a/libartbase/base/zip_archive.cc b/libartbase/base/zip_archive.cc
index b5f946e..3c68ca1 100644
--- a/libartbase/base/zip_archive.cc
+++ b/libartbase/base/zip_archive.cc
@@ -68,31 +68,34 @@
   return true;
 }
 
-MemMap* ZipEntry::ExtractToMemMap(const char* zip_filename, const char* entry_filename,
-                                  std::string* error_msg) {
+MemMap ZipEntry::ExtractToMemMap(const char* zip_filename,
+                                 const char* entry_filename,
+                                 std::string* error_msg) {
   std::string name(entry_filename);
   name += " extracted in memory from ";
   name += zip_filename;
-  std::unique_ptr<MemMap> map(MemMap::MapAnonymous(name.c_str(),
-                                                   nullptr, GetUncompressedLength(),
-                                                   PROT_READ | PROT_WRITE, false, false,
-                                                   error_msg));
-  if (map.get() == nullptr) {
+  MemMap map = MemMap::MapAnonymous(name.c_str(),
+                                    /* addr */ nullptr,
+                                    GetUncompressedLength(),
+                                    PROT_READ | PROT_WRITE,
+                                    /* low_4gb */ false,
+                                    /* reuse */ false,
+                                    error_msg);
+  if (!map.IsValid()) {
     DCHECK(!error_msg->empty());
-    return nullptr;
+    return MemMap::Invalid();
   }
 
-  const int32_t error = ExtractToMemory(handle_, zip_entry_,
-                                        map->Begin(), map->Size());
+  const int32_t error = ExtractToMemory(handle_, zip_entry_, map.Begin(), map.Size());
   if (error) {
     *error_msg = std::string(ErrorCodeString(error));
-    return nullptr;
+    return MemMap::Invalid();
   }
 
-  return map.release();
+  return map;
 }
 
-MemMap* ZipEntry::MapDirectlyFromFile(const char* zip_filename, std::string* error_msg) {
+MemMap ZipEntry::MapDirectlyFromFile(const char* zip_filename, std::string* error_msg) {
   const int zip_fd = GetFileDescriptor(handle_);
   const char* entry_filename = entry_name_.c_str();
 
@@ -109,7 +112,7 @@
     *error_msg = StringPrintf("Cannot map '%s' (in zip '%s') directly because it is compressed.",
                               entry_filename,
                               zip_filename);
-    return nullptr;
+    return MemMap::Invalid();
   } else if (zip_entry_->uncompressed_length != zip_entry_->compressed_length) {
     *error_msg = StringPrintf("Cannot map '%s' (in zip '%s') directly because "
                               "entry has bad size (%u != %u).",
@@ -117,7 +120,7 @@
                               zip_filename,
                               zip_entry_->uncompressed_length,
                               zip_entry_->compressed_length);
-    return nullptr;
+    return MemMap::Invalid();
   }
 
   std::string name(entry_filename);
@@ -130,7 +133,7 @@
     LOG(INFO) << "zip_archive: " << "make mmap of " << name << " @ offset = " << offset;
   }
 
-  std::unique_ptr<MemMap> map(
+  MemMap map =
       MemMap::MapFileAtAddress(nullptr,  // Expected pointer address
                                GetUncompressedLength(),  // Byte count
                                PROT_READ | PROT_WRITE,
@@ -140,9 +143,9 @@
                                false,  // Don't restrict allocation to lower4GB
                                false,  // Doesn't overlap existing map (reuse=false)
                                name.c_str(),
-                               /*out*/error_msg));
+                               /*out*/error_msg);
 
-  if (map == nullptr) {
+  if (!map.IsValid()) {
     DCHECK(!error_msg->empty());
   }
 
@@ -169,12 +172,12 @@
     LOG(INFO) << "---------------------------";
 
     // Dump map contents.
-    if (map != nullptr) {
+    if (map.IsValid()) {
       tmp = "";
 
       count = kMaxDumpChars;
 
-      uint8_t* begin = map->Begin();
+      uint8_t* begin = map.Begin();
       for (i = 0; i < count; ++i) {
         tmp += StringPrintf("%3d ", (unsigned int)begin[i]);
       }
@@ -185,19 +188,20 @@
     }
   }
 
-  return map.release();
+  return map;
 }
 
-MemMap* ZipEntry::MapDirectlyOrExtract(const char* zip_filename,
-                                       const char* entry_filename,
-                                       std::string* error_msg) {
+MemMap ZipEntry::MapDirectlyOrExtract(const char* zip_filename,
+                                      const char* entry_filename,
+                                      std::string* error_msg) {
   if (IsUncompressed() && GetFileDescriptor(handle_) >= 0) {
-    MemMap* ret = MapDirectlyFromFile(zip_filename, error_msg);
-    if (ret != nullptr) {
+    std::string local_error_msg;
+    MemMap ret = MapDirectlyFromFile(zip_filename, &local_error_msg);
+    if (ret.IsValid()) {
       return ret;
     }
+    // Fall back to extraction for the failure case.
   }
-  // Fall back to extraction for the failure case.
   return ExtractToMemMap(zip_filename, entry_filename, error_msg);
 }
 
diff --git a/libartbase/base/zip_archive.h b/libartbase/base/zip_archive.h
index 73495da..8fc8b54 100644
--- a/libartbase/base/zip_archive.h
+++ b/libartbase/base/zip_archive.h
@@ -43,21 +43,22 @@
   bool ExtractToFile(File& file, std::string* error_msg);
   // Extract this entry to anonymous memory (R/W).
   // Returns null on failure and sets error_msg.
-  MemMap* ExtractToMemMap(const char* zip_filename, const char* entry_filename,
-                          std::string* error_msg);
+  MemMap ExtractToMemMap(const char* zip_filename,
+                         const char* entry_filename,
+                         std::string* error_msg);
   // Create a file-backed private (clean, R/W) memory mapping to this entry.
   // 'zip_filename' is used for diagnostics only,
   //   the original file that the ZipArchive was open with is used
   //   for the mapping.
   //
   // Will only succeed if the entry is stored uncompressed.
-  // Returns null on failure and sets error_msg.
-  MemMap* MapDirectlyFromFile(const char* zip_filename, /*out*/std::string* error_msg);
+  // Returns invalid MemMap on failure and sets error_msg.
+  MemMap MapDirectlyFromFile(const char* zip_filename, /*out*/std::string* error_msg);
   virtual ~ZipEntry();
 
-  MemMap* MapDirectlyOrExtract(const char* zip_filename,
-                               const char* entry_filename,
-                               std::string* error_msg);
+  MemMap MapDirectlyOrExtract(const char* zip_filename,
+                              const char* entry_filename,
+                              std::string* error_msg);
 
   uint32_t GetUncompressedLength();
   uint32_t GetCrc32();
diff --git a/libdexfile/dex/art_dex_file_loader.cc b/libdexfile/dex/art_dex_file_loader.cc
index cc7d7aa..1846a13 100644
--- a/libdexfile/dex/art_dex_file_loader.cc
+++ b/libdexfile/dex/art_dex_file_loader.cc
@@ -23,6 +23,7 @@
 
 #include "base/file_magic.h"
 #include "base/file_utils.h"
+#include "base/mem_map.h"
 #include "base/stl_util.h"
 #include "base/systrace.h"
 #include "base/unix_file/fd_file.h"
@@ -38,14 +39,14 @@
 
 class MemMapContainer : public DexFileContainer {
  public:
-  explicit MemMapContainer(std::unique_ptr<MemMap>&& mem_map) : mem_map_(std::move(mem_map)) { }
+  explicit MemMapContainer(MemMap&& mem_map) : mem_map_(std::move(mem_map)) { }
   virtual ~MemMapContainer() OVERRIDE { }
 
   int GetPermissions() OVERRIDE {
-    if (mem_map_.get() == nullptr) {
+    if (!mem_map_.IsValid()) {
       return 0;
     } else {
-      return mem_map_->GetProtect();
+      return mem_map_.GetProtect();
     }
   }
 
@@ -55,24 +56,24 @@
 
   bool EnableWrite() OVERRIDE {
     CHECK(IsReadOnly());
-    if (mem_map_.get() == nullptr) {
+    if (!mem_map_.IsValid()) {
       return false;
     } else {
-      return mem_map_->Protect(PROT_READ | PROT_WRITE);
+      return mem_map_.Protect(PROT_READ | PROT_WRITE);
     }
   }
 
   bool DisableWrite() OVERRIDE {
     CHECK(!IsReadOnly());
-    if (mem_map_.get() == nullptr) {
+    if (!mem_map_.IsValid()) {
       return false;
     } else {
-      return mem_map_->Protect(PROT_READ);
+      return mem_map_.Protect(PROT_READ);
     }
   }
 
  private:
-  std::unique_ptr<MemMap> mem_map_;
+  MemMap mem_map_;
   DISALLOW_COPY_AND_ASSIGN(MemMapContainer);
 };
 
@@ -180,22 +181,24 @@
 
 std::unique_ptr<const DexFile> ArtDexFileLoader::Open(const std::string& location,
                                                       uint32_t location_checksum,
-                                                      std::unique_ptr<MemMap> map,
+                                                      MemMap&& map,
                                                       bool verify,
                                                       bool verify_checksum,
                                                       std::string* error_msg) const {
   ScopedTrace trace(std::string("Open dex file from mapped-memory ") + location);
-  CHECK(map.get() != nullptr);
+  CHECK(map.IsValid());
 
-  if (map->Size() < sizeof(DexFile::Header)) {
+  size_t size = map.Size();
+  if (size < sizeof(DexFile::Header)) {
     *error_msg = StringPrintf(
         "DexFile: failed to open dex file '%s' that is too short to have a header",
         location.c_str());
     return nullptr;
   }
 
-  std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
-                                                 map->Size(),
+  uint8_t* begin = map.Begin();
+  std::unique_ptr<DexFile> dex_file = OpenCommon(begin,
+                                                 size,
                                                  /*data_base*/ nullptr,
                                                  /*data_size*/ 0u,
                                                  location,
@@ -285,7 +288,7 @@
                                                           std::string* error_msg) const {
   ScopedTrace trace(std::string("Open dex file ") + std::string(location));
   CHECK(!location.empty());
-  std::unique_ptr<MemMap> map;
+  MemMap map;
   {
     File delayed_close(fd, /* check_usage */ false);
     struct stat sbuf;
@@ -300,31 +303,33 @@
       return nullptr;
     }
     size_t length = sbuf.st_size;
-    map.reset(MemMap::MapFile(length,
-                              PROT_READ,
-                              mmap_shared ? MAP_SHARED : MAP_PRIVATE,
-                              fd,
-                              0,
-                              /*low_4gb*/false,
-                              location.c_str(),
-                              error_msg));
-    if (map == nullptr) {
+    map = MemMap::MapFile(length,
+                          PROT_READ,
+                          mmap_shared ? MAP_SHARED : MAP_PRIVATE,
+                          fd,
+                          0,
+                          /*low_4gb*/false,
+                          location.c_str(),
+                          error_msg);
+    if (!map.IsValid()) {
       DCHECK(!error_msg->empty());
       return nullptr;
     }
   }
 
-  if (map->Size() < sizeof(DexFile::Header)) {
+  const uint8_t* begin = map.Begin();
+  size_t size = map.Size();
+  if (size < sizeof(DexFile::Header)) {
     *error_msg = StringPrintf(
         "DexFile: failed to open dex file '%s' that is too short to have a header",
         location.c_str());
     return nullptr;
   }
 
-  const DexFile::Header* dex_header = reinterpret_cast<const DexFile::Header*>(map->Begin());
+  const DexFile::Header* dex_header = reinterpret_cast<const DexFile::Header*>(begin);
 
-  std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
-                                                 map->Size(),
+  std::unique_ptr<DexFile> dex_file = OpenCommon(begin,
+                                                 size,
                                                  /*data_base*/ nullptr,
                                                  /*data_size*/ 0u,
                                                  location,
@@ -366,7 +371,7 @@
     return nullptr;
   }
 
-  std::unique_ptr<MemMap> map;
+  MemMap map;
   if (zip_entry->IsUncompressed()) {
     if (!zip_entry->IsAlignedTo(alignof(DexFile::Header))) {
       // Do not mmap unaligned ZIP entries because
@@ -376,8 +381,8 @@
                    << "Falling back to extracting file.";
     } else {
       // Map uncompressed files within zip as file-backed to avoid a dirty copy.
-      map.reset(zip_entry->MapDirectlyFromFile(location.c_str(), /*out*/error_msg));
-      if (map == nullptr) {
+      map = zip_entry->MapDirectlyFromFile(location.c_str(), /*out*/error_msg);
+      if (!map.IsValid()) {
         LOG(WARNING) << "Can't mmap dex file " << location << "!" << entry_name << " directly; "
                      << "is your ZIP file corrupted? Falling back to extraction.";
         // Try again with Extraction which still has a chance of recovery.
@@ -385,21 +390,23 @@
     }
   }
 
-  if (map == nullptr) {
+  if (!map.IsValid()) {
     // Default path for compressed ZIP entries,
     // and fallback for stored ZIP entries.
-    map.reset(zip_entry->ExtractToMemMap(location.c_str(), entry_name, error_msg));
+    map = zip_entry->ExtractToMemMap(location.c_str(), entry_name, error_msg);
   }
 
-  if (map == nullptr) {
+  if (!map.IsValid()) {
     *error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", entry_name, location.c_str(),
                               error_msg->c_str());
     *error_code = DexFileLoaderErrorCode::kExtractToMemoryError;
     return nullptr;
   }
   VerifyResult verify_result;
-  std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
-                                                 map->Size(),
+  uint8_t* begin = map.Begin();
+  size_t size = map.Size();
+  std::unique_ptr<DexFile> dex_file = OpenCommon(begin,
+                                                 size,
                                                  /*data_base*/ nullptr,
                                                  /*data_size*/ 0u,
                                                  location,
diff --git a/libdexfile/dex/art_dex_file_loader.h b/libdexfile/dex/art_dex_file_loader.h
index da2620f..420b347 100644
--- a/libdexfile/dex/art_dex_file_loader.h
+++ b/libdexfile/dex/art_dex_file_loader.h
@@ -66,7 +66,7 @@
   // Opens .dex file that has been memory-mapped by the caller.
   std::unique_ptr<const DexFile> Open(const std::string& location,
                                       uint32_t location_checkum,
-                                      std::unique_ptr<MemMap> mem_map,
+                                      MemMap&& mem_map,
                                       bool verify,
                                       bool verify_checksum,
                                       std::string* error_msg) const;
diff --git a/libprofile/profile/profile_compilation_info.cc b/libprofile/profile/profile_compilation_info.cc
index 6f49adf..c765345 100644
--- a/libprofile/profile/profile_compilation_info.cc
+++ b/libprofile/profile/profile_compilation_info.cc
@@ -1183,7 +1183,7 @@
       // (e.g. dex metadata files)
       LOG(WARNING) << "Could not find entry " << kDexMetadataProfileEntry
           << " in the zip archive. Creating an empty profile.";
-      source->reset(ProfileSource::Create(nullptr));
+      source->reset(ProfileSource::Create(MemMap::Invalid()));
       return kProfileLoadSuccess;
     }
     if (zip_entry->GetUncompressedLength() == 0) {
@@ -1192,11 +1192,9 @@
     }
 
     // TODO(calin) pass along file names to assist with debugging.
-    std::unique_ptr<MemMap> map(zip_entry->MapDirectlyOrExtract(kDexMetadataProfileEntry,
-                                                                "profile file",
-                                                                error));
+    MemMap map = zip_entry->MapDirectlyOrExtract(kDexMetadataProfileEntry, "profile file", error);
 
-    if (map != nullptr) {
+    if (map.IsValid()) {
       source->reset(ProfileSource::Create(std::move(map)));
       return kProfileLoadSuccess;
     } else {
@@ -1211,11 +1209,11 @@
     const std::string& debug_stage,
     std::string* error) {
   if (IsMemMap()) {
-    if (mem_map_cur_ + byte_count > mem_map_->Size()) {
+    if (mem_map_cur_ + byte_count > mem_map_.Size()) {
       return kProfileLoadBadData;
     }
     for (size_t i = 0; i < byte_count; i++) {
-      buffer[i] = *(mem_map_->Begin() + mem_map_cur_);
+      buffer[i] = *(mem_map_.Begin() + mem_map_cur_);
       mem_map_cur_++;
     }
   } else {
@@ -1237,13 +1235,13 @@
 
 bool ProfileCompilationInfo::ProfileSource::HasConsumedAllData() const {
   return IsMemMap()
-      ? (mem_map_ == nullptr || mem_map_cur_ == mem_map_->Size())
+      ? (!mem_map_.IsValid() || mem_map_cur_ == mem_map_.Size())
       : (testEOF(fd_) == 0);
 }
 
 bool ProfileCompilationInfo::ProfileSource::HasEmptyContent() const {
   if (IsMemMap()) {
-    return mem_map_ == nullptr || mem_map_->Size() == 0;
+    return !mem_map_.IsValid() || mem_map_.Size() == 0;
   } else {
     struct stat stat_buffer;
     if (fstat(fd_, &stat_buffer) != 0) {
diff --git a/libprofile/profile/profile_compilation_info.h b/libprofile/profile/profile_compilation_info.h
index 3596f3e..0dbf490 100644
--- a/libprofile/profile/profile_compilation_info.h
+++ b/libprofile/profile/profile_compilation_info.h
@@ -637,14 +637,14 @@
      */
     static ProfileSource* Create(int32_t fd) {
       DCHECK_GT(fd, -1);
-      return new ProfileSource(fd, /*map*/ nullptr);
+      return new ProfileSource(fd, MemMap::Invalid());
     }
 
     /**
      * Create a profile source backed by a memory map. The map can be null in
      * which case it will the treated as an empty source.
      */
-    static ProfileSource* Create(std::unique_ptr<MemMap>&& mem_map) {
+    static ProfileSource* Create(MemMap&& mem_map) {
       return new ProfileSource(/*fd*/ -1, std::move(mem_map));
     }
 
@@ -664,13 +664,13 @@
     bool HasConsumedAllData() const;
 
    private:
-    ProfileSource(int32_t fd, std::unique_ptr<MemMap>&& mem_map)
+    ProfileSource(int32_t fd, MemMap&& mem_map)
         : fd_(fd), mem_map_(std::move(mem_map)), mem_map_cur_(0) {}
 
     bool IsMemMap() const { return fd_ == -1; }
 
     int32_t fd_;  // The fd is not owned by this class.
-    std::unique_ptr<MemMap> mem_map_;
+    MemMap mem_map_;
     size_t mem_map_cur_;  // Current position in the map to read from.
   };
 
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index c04c50e..a5cc38b 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -708,7 +708,7 @@
       return nullptr;
     }
 
-    std::unique_ptr<MemMap> mmap(MemMap::MapFile(
+    MemMap mmap = MemMap::MapFile(
         file->GetLength(),
         PROT_READ | PROT_WRITE,
         MAP_PRIVATE,
@@ -716,13 +716,13 @@
         /* start offset */ 0,
         /* low_4gb */ false,
         vdex_filename.c_str(),
-        error_msg));
-    if (mmap == nullptr) {
+        error_msg);
+    if (!mmap.IsValid()) {
       *error_msg = "Failed to mmap file " + vdex_filename + ": " + *error_msg;
       return nullptr;
     }
 
-    std::unique_ptr<VdexFile> vdex_file(new VdexFile(mmap.release()));
+    std::unique_ptr<VdexFile> vdex_file(new VdexFile(std::move(mmap)));
     if (!vdex_file->IsValid()) {
       *error_msg = "Vdex file is not valid";
       return nullptr;
diff --git a/openjdkjvmti/ti_class.cc b/openjdkjvmti/ti_class.cc
index 9bea18a..209add3 100644
--- a/openjdkjvmti/ti_class.cc
+++ b/openjdkjvmti/ti_class.cc
@@ -91,10 +91,8 @@
   // Make the mmap
   std::string error_msg;
   art::ArrayRef<const unsigned char> final_data(final_dex_data, final_len);
-  std::unique_ptr<art::MemMap> map(Redefiner::MoveDataToMemMap(orig_location,
-                                                               final_data,
-                                                               &error_msg));
-  if (map.get() == nullptr) {
+  art::MemMap map = Redefiner::MoveDataToMemMap(orig_location, final_data, &error_msg);
+  if (!map.IsValid()) {
     LOG(WARNING) << "Unable to allocate mmap for redefined dex file! Error was: " << error_msg;
     self->ThrowOutOfMemoryError(StringPrintf(
         "Unable to allocate dex file for transformation of %s", descriptor).c_str());
@@ -102,15 +100,15 @@
   }
 
   // Make a dex-file
-  if (map->Size() < sizeof(art::DexFile::Header)) {
+  if (map.Size() < sizeof(art::DexFile::Header)) {
     LOG(WARNING) << "Could not read dex file header because dex_data was too short";
     art::ThrowClassFormatError(nullptr,
                                "Unable to read transformed dex file of %s",
                                descriptor);
     return nullptr;
   }
-  uint32_t checksum = reinterpret_cast<const art::DexFile::Header*>(map->Begin())->checksum_;
-  std::string map_name = map->GetName();
+  uint32_t checksum = reinterpret_cast<const art::DexFile::Header*>(map.Begin())->checksum_;
+  std::string map_name = map.GetName();
   const art::ArtDexFileLoader dex_file_loader;
   std::unique_ptr<const art::DexFile> dex_file(dex_file_loader.Open(map_name,
                                                                     checksum,
diff --git a/openjdkjvmti/ti_class_definition.cc b/openjdkjvmti/ti_class_definition.cc
index dce2733..030ad98 100644
--- a/openjdkjvmti/ti_class_definition.cc
+++ b/openjdkjvmti/ti_class_definition.cc
@@ -49,26 +49,27 @@
 void ArtClassDefinition::InitializeMemory() const {
   DCHECK(art::MemMap::kCanReplaceMapping);
   VLOG(signals) << "Initializing de-quickened memory for dex file of " << name_;
-  CHECK(dex_data_mmap_ != nullptr);
-  CHECK(temp_mmap_ != nullptr);
-  CHECK_EQ(dex_data_mmap_->GetProtect(), PROT_NONE);
-  CHECK_EQ(temp_mmap_->GetProtect(), PROT_READ | PROT_WRITE);
+  CHECK(dex_data_mmap_.IsValid());
+  CHECK(temp_mmap_.IsValid());
+  CHECK_EQ(dex_data_mmap_.GetProtect(), PROT_NONE);
+  CHECK_EQ(temp_mmap_.GetProtect(), PROT_READ | PROT_WRITE);
 
   std::string desc = std::string("L") + name_ + ";";
   std::unique_ptr<FixedUpDexFile>
       fixed_dex_file(FixedUpDexFile::Create(*initial_dex_file_unquickened_, desc.c_str()));
   CHECK(fixed_dex_file.get() != nullptr);
-  CHECK_LE(fixed_dex_file->Size(), temp_mmap_->Size());
-  CHECK_EQ(temp_mmap_->Size(), dex_data_mmap_->Size());
+  CHECK_LE(fixed_dex_file->Size(), temp_mmap_.Size());
+  CHECK_EQ(temp_mmap_.Size(), dex_data_mmap_.Size());
   // Copy the data to the temp mmap.
-  memcpy(temp_mmap_->Begin(), fixed_dex_file->Begin(), fixed_dex_file->Size());
+  memcpy(temp_mmap_.Begin(), fixed_dex_file->Begin(), fixed_dex_file->Size());
 
   // Move the mmap atomically.
-  art::MemMap* source = temp_mmap_.release();
+  art::MemMap source;
+  source.swap(temp_mmap_);
   std::string error;
-  CHECK(dex_data_mmap_->ReplaceWith(&source, &error)) << "Failed to replace mmap for "
-                                                      << name_ << " because " << error;
-  CHECK(dex_data_mmap_->Protect(PROT_READ));
+  CHECK(dex_data_mmap_.ReplaceWith(&source, &error)) << "Failed to replace mmap for "
+                                                     << name_ << " because " << error;
+  CHECK(dex_data_mmap_.Protect(PROT_READ));
 }
 
 bool ArtClassDefinition::IsModified() const {
@@ -85,13 +86,13 @@
   }
 
   // The dex_data_ was never touched by the agents.
-  if (dex_data_mmap_ != nullptr && dex_data_mmap_->GetProtect() == PROT_NONE) {
-    if (current_dex_file_.data() == dex_data_mmap_->Begin()) {
+  if (dex_data_mmap_.IsValid() && dex_data_mmap_.GetProtect() == PROT_NONE) {
+    if (current_dex_file_.data() == dex_data_mmap_.Begin()) {
       // the dex_data_ looks like it changed (not equal to current_dex_file_) but we never
       // initialized the dex_data_mmap_. This means the new_dex_data was filled in without looking
       // at the initial dex_data_.
       return true;
-    } else if (dex_data_.data() == dex_data_mmap_->Begin()) {
+    } else if (dex_data_.data() == dex_data_mmap_.Begin()) {
       // The dex file used to have modifications but they were not added again.
       return true;
     } else {
@@ -244,26 +245,26 @@
     std::string mmap_name("anon-mmap-for-redefine: ");
     mmap_name += name_;
     std::string error;
-    dex_data_mmap_.reset(art::MemMap::MapAnonymous(mmap_name.c_str(),
-                                                   nullptr,
-                                                   dequick_size,
-                                                   PROT_NONE,
-                                                   /*low_4gb*/ false,
-                                                   /*reuse*/ false,
-                                                   &error));
-    mmap_name += "-TEMP";
-    temp_mmap_.reset(art::MemMap::MapAnonymous(mmap_name.c_str(),
-                                               nullptr,
+    dex_data_mmap_ = art::MemMap::MapAnonymous(mmap_name.c_str(),
+                                               /* addr */ nullptr,
                                                dequick_size,
-                                               PROT_READ | PROT_WRITE,
+                                               PROT_NONE,
                                                /*low_4gb*/ false,
                                                /*reuse*/ false,
-                                               &error));
-    if (UNLIKELY(dex_data_mmap_ != nullptr && temp_mmap_ != nullptr)) {
+                                               &error);
+    mmap_name += "-TEMP";
+    temp_mmap_ = art::MemMap::MapAnonymous(mmap_name.c_str(),
+                                           /* addr */ nullptr,
+                                           dequick_size,
+                                           PROT_READ | PROT_WRITE,
+                                           /*low_4gb*/ false,
+                                           /*reuse*/ false,
+                                           &error);
+    if (UNLIKELY(dex_data_mmap_.IsValid() && temp_mmap_.IsValid())) {
       // Need to save the initial dexfile so we don't need to search for it in the fault-handler.
       initial_dex_file_unquickened_ = quick_dex;
-      dex_data_ = art::ArrayRef<const unsigned char>(dex_data_mmap_->Begin(),
-                                                     dex_data_mmap_->Size());
+      dex_data_ = art::ArrayRef<const unsigned char>(dex_data_mmap_.Begin(),
+                                                     dex_data_mmap_.Size());
       if (from_class_ext_) {
         // We got initial from class_ext so the current one must have undergone redefinition so no
         // cdex or quickening stuff.
@@ -275,14 +276,14 @@
         // This class hasn't been redefined before. The dequickened current data is the same as the
         // dex_data_mmap_ when it's filled it. We don't need to copy anything because the mmap will
         // not be cleared until after everything is done.
-        current_dex_file_ = art::ArrayRef<const unsigned char>(dex_data_mmap_->Begin(),
+        current_dex_file_ = art::ArrayRef<const unsigned char>(dex_data_mmap_.Begin(),
                                                                dequick_size);
       }
       return;
     }
   }
-  dex_data_mmap_.reset(nullptr);
-  temp_mmap_.reset(nullptr);
+  dex_data_mmap_.Reset();
+  temp_mmap_.Reset();
   // Failed to mmap a large enough area (or on-demand dequickening was disabled). This is
   // unfortunate. Since currently the size is just a guess though we might as well try to do it
   // manually.
diff --git a/openjdkjvmti/ti_class_definition.h b/openjdkjvmti/ti_class_definition.h
index f888a74..224e664 100644
--- a/openjdkjvmti/ti_class_definition.h
+++ b/openjdkjvmti/ti_class_definition.h
@@ -56,8 +56,8 @@
         loader_(nullptr),
         name_(),
         protection_domain_(nullptr),
-        dex_data_mmap_(nullptr),
-        temp_mmap_(nullptr),
+        dex_data_mmap_(),
+        temp_mmap_(),
         dex_data_memory_(),
         initial_dex_file_unquickened_(nullptr),
         dex_data_(),
@@ -100,9 +100,9 @@
   }
 
   bool ContainsAddress(uintptr_t ptr) const {
-    return dex_data_mmap_ != nullptr &&
-        reinterpret_cast<uintptr_t>(dex_data_mmap_->Begin()) <= ptr &&
-        reinterpret_cast<uintptr_t>(dex_data_mmap_->End()) > ptr;
+    return dex_data_mmap_.IsValid() &&
+        reinterpret_cast<uintptr_t>(dex_data_mmap_.Begin()) <= ptr &&
+        reinterpret_cast<uintptr_t>(dex_data_mmap_.End()) > ptr;
   }
 
   bool IsModified() const REQUIRES_SHARED(art::Locks::mutator_lock_);
@@ -128,9 +128,9 @@
 
   bool IsLazyDefinition() const {
     DCHECK(IsInitialized());
-    return dex_data_mmap_ != nullptr &&
-        dex_data_.data() == dex_data_mmap_->Begin() &&
-        dex_data_mmap_->GetProtect() == PROT_NONE;
+    return dex_data_mmap_.IsValid() &&
+        dex_data_.data() == dex_data_mmap_.Begin() &&
+        dex_data_mmap_.GetProtect() == PROT_NONE;
   }
 
   jobject GetProtectionDomain() const {
@@ -159,9 +159,9 @@
 
   // Mmap that will be filled with the original-dex-file lazily if it needs to be de-quickened or
   // de-compact-dex'd
-  mutable std::unique_ptr<art::MemMap> dex_data_mmap_;
+  mutable art::MemMap dex_data_mmap_;
   // This is a temporary mmap we will use to be able to fill the dex file data atomically.
-  mutable std::unique_ptr<art::MemMap> temp_mmap_;
+  mutable art::MemMap temp_mmap_;
 
   // A unique_ptr to the current dex_data if it needs to be cleaned up.
   std::vector<unsigned char> dex_data_memory_;
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index dd0428d..6cba48a 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -300,24 +300,23 @@
 }
 
 // Moves dex data to an anonymous, read-only mmap'd region.
-std::unique_ptr<art::MemMap> Redefiner::MoveDataToMemMap(const std::string& original_location,
-                                                         art::ArrayRef<const unsigned char> data,
-                                                         std::string* error_msg) {
-  std::unique_ptr<art::MemMap> map(art::MemMap::MapAnonymous(
+art::MemMap Redefiner::MoveDataToMemMap(const std::string& original_location,
+                                        art::ArrayRef<const unsigned char> data,
+                                        std::string* error_msg) {
+  art::MemMap map = art::MemMap::MapAnonymous(
       StringPrintf("%s-transformed", original_location.c_str()).c_str(),
-      nullptr,
+      /* addr */ nullptr,
       data.size(),
       PROT_READ|PROT_WRITE,
-      /*low_4gb*/false,
-      /*reuse*/false,
-      error_msg));
-  if (map == nullptr) {
-    return map;
+      /*low_4gb*/ false,
+      /*reuse*/ false,
+      error_msg);
+  if (LIKELY(map.IsValid())) {
+    memcpy(map.Begin(), data.data(), data.size());
+    // Make the dex files mmap read only. This matches how other DexFiles are mmaped and prevents
+    // programs from corrupting it.
+    map.Protect(PROT_READ);
   }
-  memcpy(map->Begin(), data.data(), data.size());
-  // Make the dex files mmap read only. This matches how other DexFiles are mmaped and prevents
-  // programs from corrupting it.
-  map->Protect(PROT_READ);
   return map;
 }
 
@@ -429,23 +428,22 @@
   }
   JvmtiUniquePtr<char> generic_unique_ptr(MakeJvmtiUniquePtr(env, generic_ptr_unused));
   JvmtiUniquePtr<char> signature_unique_ptr(MakeJvmtiUniquePtr(env, signature_ptr));
-  std::unique_ptr<art::MemMap> map(MoveDataToMemMap(original_dex_location,
-                                                    def.GetDexData(),
-                                                    error_msg_));
+  art::MemMap map = MoveDataToMemMap(original_dex_location, def.GetDexData(), error_msg_);
   std::ostringstream os;
-  if (map.get() == nullptr) {
+  if (!map.IsValid()) {
     os << "Failed to create anonymous mmap for modified dex file of class " << def.GetName()
        << "in dex file " << original_dex_location << " because: " << *error_msg_;
     *error_msg_ = os.str();
     return ERR(OUT_OF_MEMORY);
   }
-  if (map->Size() < sizeof(art::DexFile::Header)) {
+  if (map.Size() < sizeof(art::DexFile::Header)) {
     *error_msg_ = "Could not read dex file header because dex_data was too short";
     return ERR(INVALID_CLASS_FORMAT);
   }
-  uint32_t checksum = reinterpret_cast<const art::DexFile::Header*>(map->Begin())->checksum_;
+  std::string name = map.GetName();
+  uint32_t checksum = reinterpret_cast<const art::DexFile::Header*>(map.Begin())->checksum_;
   const art::ArtDexFileLoader dex_file_loader;
-  std::unique_ptr<const art::DexFile> dex_file(dex_file_loader.Open(map->GetName(),
+  std::unique_ptr<const art::DexFile> dex_file(dex_file_loader.Open(name,
                                                                     checksum,
                                                                     std::move(map),
                                                                     /*verify*/true,
diff --git a/openjdkjvmti/ti_redefine.h b/openjdkjvmti/ti_redefine.h
index 6d8f6bf..f4a4280 100644
--- a/openjdkjvmti/ti_redefine.h
+++ b/openjdkjvmti/ti_redefine.h
@@ -78,9 +78,9 @@
 
   static jvmtiError IsModifiableClass(jvmtiEnv* env, jclass klass, jboolean* is_redefinable);
 
-  static std::unique_ptr<art::MemMap> MoveDataToMemMap(const std::string& original_location,
-                                                       art::ArrayRef<const unsigned char> data,
-                                                       std::string* error_msg);
+  static art::MemMap MoveDataToMemMap(const std::string& original_location,
+                                      art::ArrayRef<const unsigned char> data,
+                                      std::string* error_msg);
 
   // Helper for checking if redefinition/retransformation is allowed.
   static jvmtiError GetClassRedefinitionError(jclass klass, /*out*/std::string* error_msg)
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index a15f7b8..8169979 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -538,7 +538,7 @@
   ScopedObjectAccess soa(Thread::Current());
 
   std::vector<gc::space::ImageSpace*> spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces();
-  std::map<gc::space::ImageSpace*, std::unique_ptr<MemMap>> space_to_memmap_map;
+  std::map<gc::space::ImageSpace*, MemMap> space_to_memmap_map;
 
   for (size_t i = 0; i < spaces.size(); ++i) {
     t.NewTiming("Image Patching setup");
@@ -567,15 +567,15 @@
 
     // Create the map where we will write the image patches to.
     std::string error_msg;
-    std::unique_ptr<MemMap> image(MemMap::MapFile(image_len,
-                                                  PROT_READ | PROT_WRITE,
-                                                  MAP_PRIVATE,
-                                                  input_image->Fd(),
-                                                  0,
-                                                  /*low_4gb*/false,
-                                                  input_image->GetPath().c_str(),
-                                                  &error_msg));
-    if (image.get() == nullptr) {
+    MemMap image = MemMap::MapFile(image_len,
+                                   PROT_READ | PROT_WRITE,
+                                   MAP_PRIVATE,
+                                   input_image->Fd(),
+                                   0,
+                                   /*low_4gb*/false,
+                                   input_image->GetPath().c_str(),
+                                   &error_msg);
+    if (!image.IsValid()) {
       LOG(ERROR) << "Unable to map image file " << input_image->GetPath() << " : " << error_msg;
       return false;
     }
@@ -583,7 +583,7 @@
 
     space_to_memmap_map.emplace(space, std::move(image));
     PatchOat p = PatchOat(isa,
-                          space_to_memmap_map[space].get(),
+                          &space_to_memmap_map[space],
                           space->GetLiveBitmap(),
                           space->GetMemMap(),
                           delta,
@@ -636,22 +636,22 @@
         LOG(ERROR) << "Error while getting input image size";
         return false;
       }
-      std::unique_ptr<MemMap> original(MemMap::MapFile(input_image_size,
-                                                       PROT_READ,
-                                                       MAP_PRIVATE,
-                                                       input_image->Fd(),
-                                                       0,
-                                                       /*low_4gb*/false,
-                                                       input_image->GetPath().c_str(),
-                                                       &error_msg));
-      if (original.get() == nullptr) {
+      MemMap original = MemMap::MapFile(input_image_size,
+                                        PROT_READ,
+                                        MAP_PRIVATE,
+                                        input_image->Fd(),
+                                        0,
+                                        /*low_4gb*/false,
+                                        input_image->GetPath().c_str(),
+                                        &error_msg);
+      if (!original.IsValid()) {
         LOG(ERROR) << "Unable to map image file " << input_image->GetPath() << " : " << error_msg;
         return false;
       }
 
       const MemMap* relocated = p.image_;
 
-      if (!WriteRelFile(*original, *relocated, image_relocation_filename, &error_msg)) {
+      if (!WriteRelFile(original, *relocated, image_relocation_filename, &error_msg)) {
         LOG(ERROR) << "Failed to create image relocation file " << image_relocation_filename
             << ": " << error_msg;
         return false;
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index 2b1210b..ac2fdf5 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -74,7 +74,7 @@
   // All pointers are only borrowed.
   PatchOat(InstructionSet isa, MemMap* image,
            gc::accounting::ContinuousSpaceBitmap* bitmap, MemMap* heap, off_t delta,
-           std::map<gc::space::ImageSpace*, std::unique_ptr<MemMap>>* map, TimingLogger* timings)
+           std::map<gc::space::ImageSpace*, MemMap>* map, TimingLogger* timings)
       : image_(image), bitmap_(bitmap), heap_(heap),
         delta_(delta), isa_(isa), space_map_(map), timings_(timings) {}
 
@@ -139,7 +139,7 @@
       if (image_space->Contains(obj)) {
         uintptr_t heap_off = reinterpret_cast<uintptr_t>(obj) -
                              reinterpret_cast<uintptr_t>(image_space->GetMemMap()->Begin());
-        return reinterpret_cast<T*>(space_map_->find(image_space)->second->Begin() + heap_off);
+        return reinterpret_cast<T*>(space_map_->find(image_space)->second.Begin() + heap_off);
       }
     }
     LOG(FATAL) << "Did not find object in boot image space " << obj;
@@ -195,7 +195,7 @@
   // Active instruction set, used to know the entrypoint size.
   const InstructionSet isa_;
 
-  const std::map<gc::space::ImageSpace*, std::unique_ptr<MemMap>>* space_map_;
+  const std::map<gc::space::ImageSpace*, MemMap>* space_map_;
 
   TimingLogger* timings_;
 
diff --git a/runtime/base/mem_map_arena_pool.cc b/runtime/base/mem_map_arena_pool.cc
index 702f0e4..0f472e2 100644
--- a/runtime/base/mem_map_arena_pool.cc
+++ b/runtime/base/mem_map_arena_pool.cc
@@ -38,22 +38,34 @@
   void Release() OVERRIDE;
 
  private:
-  std::unique_ptr<MemMap> map_;
+  static MemMap Allocate(size_t size, bool low_4gb, const char* name);
+
+  MemMap map_;
 };
 
-MemMapArena::MemMapArena(size_t size, bool low_4gb, const char* name) {
+MemMapArena::MemMapArena(size_t size, bool low_4gb, const char* name)
+    : map_(Allocate(size, low_4gb, name)) {
+  memory_ = map_.Begin();
+  static_assert(ArenaAllocator::kArenaAlignment <= kPageSize,
+                "Arena should not need stronger alignment than kPageSize.");
+  DCHECK_ALIGNED(memory_, ArenaAllocator::kArenaAlignment);
+  size_ = map_.Size();
+}
+
+MemMap MemMapArena::Allocate(size_t size, bool low_4gb, const char* name) {
   // Round up to a full page as that's the smallest unit of allocation for mmap()
   // and we want to be able to use all memory that we actually allocate.
   size = RoundUp(size, kPageSize);
   std::string error_msg;
-  map_.reset(MemMap::MapAnonymous(
-      name, nullptr, size, PROT_READ | PROT_WRITE, low_4gb, false, &error_msg));
-  CHECK(map_.get() != nullptr) << error_msg;
-  memory_ = map_->Begin();
-  static_assert(ArenaAllocator::kArenaAlignment <= kPageSize,
-                "Arena should not need stronger alignment than kPageSize.");
-  DCHECK_ALIGNED(memory_, ArenaAllocator::kArenaAlignment);
-  size_ = map_->Size();
+  MemMap map = MemMap::MapAnonymous(name,
+                                    /* addr */ nullptr,
+                                    size,
+                                    PROT_READ | PROT_WRITE,
+                                    low_4gb,
+                                    /* reuse */ false,
+                                    &error_msg);
+  CHECK(map.IsValid()) << error_msg;
+  return map;
 }
 
 MemMapArena::~MemMapArena() {
@@ -62,7 +74,7 @@
 
 void MemMapArena::Release() {
   if (bytes_allocated_ > 0) {
-    map_->MadviseDontNeedAndZero();
+    map_.MadviseDontNeedAndZero();
     bytes_allocated_ = 0;
   }
 }
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
index f8388f3..b0eef00 100644
--- a/runtime/dexopt_test.cc
+++ b/runtime/dexopt_test.cc
@@ -249,14 +249,17 @@
 void DexoptTest::ReserveImageSpaceChunk(uintptr_t start, uintptr_t end) {
   if (start < end) {
     std::string error_msg;
-    image_reservation_.push_back(std::unique_ptr<MemMap>(
-        MemMap::MapAnonymous("image reservation",
-            reinterpret_cast<uint8_t*>(start), end - start,
-            PROT_NONE, false, false, &error_msg)));
-    ASSERT_TRUE(image_reservation_.back().get() != nullptr) << error_msg;
+    image_reservation_.push_back(MemMap::MapAnonymous("image reservation",
+                                                      reinterpret_cast<uint8_t*>(start),
+                                                      end - start,
+                                                      PROT_NONE,
+                                                      /* low_4gb*/ false,
+                                                      /* reuse */ false,
+                                                      &error_msg));
+    ASSERT_TRUE(image_reservation_.back().IsValid()) << error_msg;
     LOG(INFO) << "Reserved space for image " <<
-      reinterpret_cast<void*>(image_reservation_.back()->Begin()) << "-" <<
-      reinterpret_cast<void*>(image_reservation_.back()->End());
+      reinterpret_cast<void*>(image_reservation_.back().Begin()) << "-" <<
+      reinterpret_cast<void*>(image_reservation_.back().End());
   }
 }
 
diff --git a/runtime/dexopt_test.h b/runtime/dexopt_test.h
index 6e8dc09..3203ee5 100644
--- a/runtime/dexopt_test.h
+++ b/runtime/dexopt_test.h
@@ -91,7 +91,7 @@
   // before the image is loaded.
   void UnreserveImageSpace();
 
-  std::vector<std::unique_ptr<MemMap>> image_reservation_;
+  std::vector<MemMap> image_reservation_;
 };
 
 }  // namespace art
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 026b5da..4ae7362 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -283,7 +283,6 @@
 
 template <typename ElfTypes>
 ElfFileImpl<ElfTypes>::~ElfFileImpl() {
-  STLDeleteElements(&segments_);
   delete symtab_symbol_table_;
   delete dynsym_symbol_table_;
 }
@@ -418,17 +417,17 @@
 }
 
 template <typename ElfTypes>
-bool ElfFileImpl<ElfTypes>::SetMap(File* file, MemMap* map, std::string* error_msg) {
-  if (map == nullptr) {
+bool ElfFileImpl<ElfTypes>::SetMap(File* file, MemMap&& map, std::string* error_msg) {
+  if (!map.IsValid()) {
     // MemMap::Open should have already set an error.
     DCHECK(!error_msg->empty());
     return false;
   }
-  map_.reset(map);
-  CHECK(map_.get() != nullptr) << file->GetPath();
-  CHECK(map_->Begin() != nullptr) << file->GetPath();
+  map_ = std::move(map);
+  CHECK(map_.IsValid()) << file->GetPath();
+  CHECK(map_.Begin() != nullptr) << file->GetPath();
 
-  header_ = reinterpret_cast<Elf_Ehdr*>(map_->Begin());
+  header_ = reinterpret_cast<Elf_Ehdr*>(map_.Begin());
   if ((ELFMAG0 != header_->e_ident[EI_MAG0])
       || (ELFMAG1 != header_->e_ident[EI_MAG1])
       || (ELFMAG2 != header_->e_ident[EI_MAG2])
@@ -1164,14 +1163,14 @@
         DCHECK(!error_msg->empty());
         return false;
       }
-      std::unique_ptr<MemMap> reserve(MemMap::MapAnonymous(reservation_name.c_str(),
-                                                           reserve_base_override,
-                                                           loaded_size,
-                                                           PROT_NONE,
-                                                           low_4gb,
-                                                           false,
-                                                           error_msg));
-      if (reserve.get() == nullptr) {
+      MemMap reserve = MemMap::MapAnonymous(reservation_name.c_str(),
+                                            reserve_base_override,
+                                            loaded_size,
+                                            PROT_NONE,
+                                            low_4gb,
+                                            /* reuse */ false,
+                                            error_msg);
+      if (!reserve.IsValid()) {
         *error_msg = StringPrintf("Failed to allocate %s: %s",
                                   reservation_name.c_str(), error_msg->c_str());
         return false;
@@ -1179,14 +1178,14 @@
       reserved = true;
 
       // Base address is the difference of actual mapped location and the p_vaddr
-      base_address_ = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(reserve->Begin())
+      base_address_ = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(reserve.Begin())
                        - reinterpret_cast<uintptr_t>(reserve_base));
       // By adding the p_vaddr of a section/symbol to base_address_ we will always get the
       // dynamic memory address of where that object is actually mapped
       //
       // TODO: base_address_ needs to be calculated in ::Open, otherwise
       // FindDynamicSymbolAddress returns the wrong values until Load is called.
-      segments_.push_back(reserve.release());
+      segments_.push_back(std::move(reserve));
     }
     // empty segment, nothing to map
     if (program_header->p_memsz == 0) {
@@ -1234,7 +1233,7 @@
       return false;
     }
     if (program_header->p_filesz != 0u) {
-      std::unique_ptr<MemMap> segment(
+      MemMap segment =
           MemMap::MapFileAtAddress(p_vaddr,
                                    program_header->p_filesz,
                                    prot,
@@ -1244,40 +1243,42 @@
                                    /*low4_gb*/false,
                                    /*reuse*/true,  // implies MAP_FIXED
                                    file->GetPath().c_str(),
-                                   error_msg));
-      if (segment.get() == nullptr) {
+                                   error_msg);
+      if (!segment.IsValid()) {
         *error_msg = StringPrintf("Failed to map ELF file segment %d from %s: %s",
                                   i, file->GetPath().c_str(), error_msg->c_str());
         return false;
       }
-      if (segment->Begin() != p_vaddr) {
+      if (segment.Begin() != p_vaddr) {
         *error_msg = StringPrintf("Failed to map ELF file segment %d from %s at expected address %p, "
                                   "instead mapped to %p",
-                                  i, file->GetPath().c_str(), p_vaddr, segment->Begin());
+                                  i, file->GetPath().c_str(), p_vaddr, segment.Begin());
         return false;
       }
-      segments_.push_back(segment.release());
+      segments_.push_back(std::move(segment));
     }
     if (program_header->p_filesz < program_header->p_memsz) {
       std::string name = StringPrintf("Zero-initialized segment %" PRIu64 " of ELF file %s",
                                       static_cast<uint64_t>(i), file->GetPath().c_str());
-      std::unique_ptr<MemMap> segment(
-          MemMap::MapAnonymous(name.c_str(),
-                               p_vaddr + program_header->p_filesz,
-                               program_header->p_memsz - program_header->p_filesz,
-                               prot, false, true /* reuse */, error_msg));
-      if (segment == nullptr) {
+      MemMap segment = MemMap::MapAnonymous(name.c_str(),
+                                            p_vaddr + program_header->p_filesz,
+                                            program_header->p_memsz - program_header->p_filesz,
+                                            prot,
+                                            /* low_4gb */ false,
+                                            /* reuse */ true,
+                                            error_msg);
+      if (!segment.IsValid()) {
         *error_msg = StringPrintf("Failed to map zero-initialized ELF file segment %d from %s: %s",
                                   i, file->GetPath().c_str(), error_msg->c_str());
         return false;
       }
-      if (segment->Begin() != p_vaddr) {
+      if (segment.Begin() != p_vaddr) {
         *error_msg = StringPrintf("Failed to map zero-initialized ELF file segment %d from %s "
                                   "at expected address %p, instead mapped to %p",
-                                  i, file->GetPath().c_str(), p_vaddr, segment->Begin());
+                                  i, file->GetPath().c_str(), p_vaddr, segment.Begin());
         return false;
       }
-      segments_.push_back(segment.release());
+      segments_.push_back(std::move(segment));
     }
   }
 
@@ -1343,9 +1344,8 @@
 
 template <typename ElfTypes>
 bool ElfFileImpl<ElfTypes>::ValidPointer(const uint8_t* start) const {
-  for (size_t i = 0; i < segments_.size(); ++i) {
-    const MemMap* segment = segments_[i];
-    if (segment->Begin() <= start && start < segment->End()) {
+  for (const MemMap& segment : segments_) {
+    if (segment.Begin() <= start && start < segment.End()) {
       return true;
     }
   }
@@ -1712,18 +1712,18 @@
                               file->GetPath().c_str());
     return nullptr;
   }
-  std::unique_ptr<MemMap> map(MemMap::MapFile(EI_NIDENT,
-                                              PROT_READ,
-                                              MAP_PRIVATE,
-                                              file->Fd(),
-                                              0,
-                                              low_4gb,
-                                              file->GetPath().c_str(),
-                                              error_msg));
-  if (map == nullptr || map->Size() != EI_NIDENT) {
+  MemMap map = MemMap::MapFile(EI_NIDENT,
+                               PROT_READ,
+                               MAP_PRIVATE,
+                               file->Fd(),
+                               0,
+                               low_4gb,
+                               file->GetPath().c_str(),
+                               error_msg);
+  if (!map.IsValid() || map.Size() != EI_NIDENT) {
     return nullptr;
   }
-  uint8_t* header = map->Begin();
+  uint8_t* header = map.Begin();
   if (header[EI_CLASS] == ELFCLASS64) {
     ElfFileImpl64* elf_file_impl = ElfFileImpl64::Open(file,
                                                        writable,
@@ -1763,18 +1763,18 @@
                               file->GetPath().c_str());
     return nullptr;
   }
-  std::unique_ptr<MemMap> map(MemMap::MapFile(EI_NIDENT,
-                                              PROT_READ,
-                                              MAP_PRIVATE,
-                                              file->Fd(),
-                                              0,
-                                              low_4gb,
-                                              file->GetPath().c_str(),
-                                              error_msg));
-  if (map == nullptr || map->Size() != EI_NIDENT) {
+  MemMap map = MemMap::MapFile(EI_NIDENT,
+                               PROT_READ,
+                               MAP_PRIVATE,
+                               file->Fd(),
+                               /* start */ 0,
+                               low_4gb,
+                               file->GetPath().c_str(),
+                               error_msg);
+  if (!map.IsValid() || map.Size() != EI_NIDENT) {
     return nullptr;
   }
-  uint8_t* header = map->Begin();
+  uint8_t* header = map.Begin();
   if (header[EI_CLASS] == ELFCLASS64) {
     ElfFileImpl64* elf_file_impl = ElfFileImpl64::Open(file,
                                                        mmap_prot,
diff --git a/runtime/elf_file_impl.h b/runtime/elf_file_impl.h
index a5808e2..58c38a4 100644
--- a/runtime/elf_file_impl.h
+++ b/runtime/elf_file_impl.h
@@ -62,15 +62,15 @@
   }
 
   uint8_t* Begin() const {
-    return map_->Begin();
+    return map_.Begin();
   }
 
   uint8_t* End() const {
-    return map_->End();
+    return map_.End();
   }
 
   size_t Size() const {
-    return map_->Size();
+    return map_.Size();
   }
 
   Elf_Ehdr& GetHeader() const;
@@ -135,7 +135,7 @@
 
   bool Setup(File* file, int prot, int flags, bool low_4gb, std::string* error_msg);
 
-  bool SetMap(File* file, MemMap* map, std::string* error_msg);
+  bool SetMap(File* file, MemMap&& map, std::string* error_msg);
 
   uint8_t* GetProgramHeadersStart() const;
   uint8_t* GetSectionHeadersStart() const;
@@ -193,9 +193,9 @@
 
   // ELF header mapping. If program_header_only_ is false, will
   // actually point to the entire elf file.
-  std::unique_ptr<MemMap> map_;
+  MemMap map_;
   Elf_Ehdr* header_;
-  std::vector<MemMap*> segments_;
+  std::vector<MemMap> segments_;
 
   // Pointer to start of first PT_LOAD program segment after Load()
   // when program_header_only_ is true.
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index e30fef4..2a71dec 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -72,12 +72,12 @@
   ~AtomicStack() {}
 
   void Reset() {
-    DCHECK(mem_map_.get() != nullptr);
+    DCHECK(mem_map_.IsValid());
     DCHECK(begin_ != nullptr);
     front_index_.store(0, std::memory_order_relaxed);
     back_index_.store(0, std::memory_order_relaxed);
     debug_is_sorted_ = true;
-    mem_map_->MadviseDontNeedAndZero();
+    mem_map_.MadviseDontNeedAndZero();
   }
 
   // Beware: Mixing atomic pushes and atomic pops will cause ABA problem.
@@ -252,10 +252,15 @@
   // Size in number of elements.
   void Init() {
     std::string error_msg;
-    mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), nullptr, capacity_ * sizeof(begin_[0]),
-                                        PROT_READ | PROT_WRITE, false, false, &error_msg));
-    CHECK(mem_map_.get() != nullptr) << "couldn't allocate mark stack.\n" << error_msg;
-    uint8_t* addr = mem_map_->Begin();
+    mem_map_ = MemMap::MapAnonymous(name_.c_str(),
+                                    /* addr */ nullptr,
+                                    capacity_ * sizeof(begin_[0]),
+                                    PROT_READ | PROT_WRITE,
+                                    /* low_4gb */ false,
+                                    /* reuse */ false,
+                                    &error_msg);
+    CHECK(mem_map_.IsValid()) << "couldn't allocate mark stack.\n" << error_msg;
+    uint8_t* addr = mem_map_.Begin();
     CHECK(addr != nullptr);
     debug_is_sorted_ = true;
     begin_ = reinterpret_cast<StackReference<T>*>(addr);
@@ -265,7 +270,7 @@
   // Name of the mark stack.
   std::string name_;
   // Memory mapping of the atomic stack.
-  std::unique_ptr<MemMap> mem_map_;
+  MemMap mem_map_;
   // Back index (index after the last element pushed).
   AtomicInteger back_index_;
   // Front index, used for implementing PopFront.
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index d45a0cc..e157e5e 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -27,47 +27,51 @@
 namespace gc {
 namespace accounting {
 
-Bitmap* Bitmap::CreateFromMemMap(MemMap* mem_map, size_t num_bits) {
-  CHECK(mem_map != nullptr);
-  return new Bitmap(mem_map, num_bits);
+Bitmap* Bitmap::CreateFromMemMap(MemMap&& mem_map, size_t num_bits) {
+  CHECK(mem_map.IsValid());
+  return new Bitmap(std::move(mem_map), num_bits);
 }
 
-Bitmap::Bitmap(MemMap* mem_map, size_t bitmap_size)
-    : mem_map_(mem_map), bitmap_begin_(reinterpret_cast<uintptr_t*>(mem_map->Begin())),
+Bitmap::Bitmap(MemMap&& mem_map, size_t bitmap_size)
+    : mem_map_(std::move(mem_map)),
+      bitmap_begin_(reinterpret_cast<uintptr_t*>(mem_map_.Begin())),
       bitmap_size_(bitmap_size) {
   CHECK(bitmap_begin_ != nullptr);
   CHECK_NE(bitmap_size, 0U);
 }
 
 Bitmap::~Bitmap() {
-  // Destroys MemMap via std::unique_ptr<>.
+  // Destroys member MemMap.
 }
 
-MemMap* Bitmap::AllocateMemMap(const std::string& name, size_t num_bits) {
+MemMap Bitmap::AllocateMemMap(const std::string& name, size_t num_bits) {
   const size_t bitmap_size = RoundUp(
       RoundUp(num_bits, kBitsPerBitmapWord) / kBitsPerBitmapWord * sizeof(uintptr_t), kPageSize);
   std::string error_msg;
-  std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), nullptr, bitmap_size,
-                                                       PROT_READ | PROT_WRITE, false, false,
-                                                       &error_msg));
-  if (UNLIKELY(mem_map.get() == nullptr)) {
+  MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
+                                        /* addr */ nullptr,
+                                        bitmap_size,
+                                        PROT_READ | PROT_WRITE,
+                                        /* low_4gb */ false,
+                                        /* reuse */ false,
+                                        &error_msg);
+  if (UNLIKELY(!mem_map.IsValid())) {
     LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
-    return nullptr;
   }
-  return mem_map.release();
+  return mem_map;
 }
 
 Bitmap* Bitmap::Create(const std::string& name, size_t num_bits) {
-  auto* const mem_map = AllocateMemMap(name, num_bits);
-  if (mem_map == nullptr) {
+  MemMap mem_map = AllocateMemMap(name, num_bits);
+  if (UNLIKELY(!mem_map.IsValid())) {
     return nullptr;
   }
-  return CreateFromMemMap(mem_map, num_bits);
+  return CreateFromMemMap(std::move(mem_map), num_bits);
 }
 
 void Bitmap::Clear() {
   if (bitmap_begin_ != nullptr) {
-    mem_map_->MadviseDontNeedAndZero();
+    mem_map_.MadviseDontNeedAndZero();
   }
 }
 
@@ -83,14 +87,15 @@
   CHECK_ALIGNED(cover_begin, kAlignment);
   CHECK_ALIGNED(cover_end, kAlignment);
   const size_t num_bits = (cover_end - cover_begin) / kAlignment;
-  auto* const mem_map = Bitmap::AllocateMemMap(name, num_bits);
-  return CreateFromMemMap(mem_map, cover_begin, num_bits);
+  MemMap mem_map = Bitmap::AllocateMemMap(name, num_bits);
+  CHECK(mem_map.IsValid());
+  return CreateFromMemMap(std::move(mem_map), cover_begin, num_bits);
 }
 
 template<size_t kAlignment>
 MemoryRangeBitmap<kAlignment>* MemoryRangeBitmap<kAlignment>::CreateFromMemMap(
-    MemMap* mem_map, uintptr_t begin, size_t num_bits) {
-  return new MemoryRangeBitmap(mem_map, begin, num_bits);
+    MemMap&& mem_map, uintptr_t begin, size_t num_bits) {
+  return new MemoryRangeBitmap(std::move(mem_map), begin, num_bits);
 }
 
 template class MemoryRangeBitmap<CardTable::kCardSize>;
diff --git a/runtime/gc/accounting/bitmap.h b/runtime/gc/accounting/bitmap.h
index 2d83a8a..ffef566 100644
--- a/runtime/gc/accounting/bitmap.h
+++ b/runtime/gc/accounting/bitmap.h
@@ -24,12 +24,11 @@
 #include <vector>
 
 #include "base/globals.h"
+#include "base/mem_map.h"
 #include "base/mutex.h"
 
 namespace art {
 
-class MemMap;
-
 namespace gc {
 namespace accounting {
 
@@ -42,7 +41,7 @@
   // Initialize a space bitmap using the provided mem_map as the live bits. Takes ownership of the
   // mem map. The address range covered starts at heap_begin and is of size equal to heap_capacity.
   // Objects are kAlignement-aligned.
-  static Bitmap* CreateFromMemMap(MemMap* mem_map, size_t num_bits);
+  static Bitmap* CreateFromMemMap(MemMap&& mem_map, size_t num_bits);
 
   // offset is the difference from base to a index.
   static ALWAYS_INLINE constexpr size_t BitIndexToWordIndex(uintptr_t offset) {
@@ -101,17 +100,17 @@
  protected:
   static constexpr size_t kBitsPerBitmapWord = sizeof(uintptr_t) * kBitsPerByte;
 
-  Bitmap(MemMap* mem_map, size_t bitmap_size);
+  Bitmap(MemMap&& mem_map, size_t bitmap_size);
   ~Bitmap();
 
   // Allocate the mem-map for a bitmap based on how many bits are required.
-  static MemMap* AllocateMemMap(const std::string& name, size_t num_bits);
+  static MemMap AllocateMemMap(const std::string& name, size_t num_bits);
 
   template<bool kSetBit>
   ALWAYS_INLINE bool ModifyBit(uintptr_t bit_index);
 
   // Backing storage for bitmap.
-  std::unique_ptr<MemMap> mem_map_;
+  MemMap mem_map_;
 
   // This bitmap itself, word sized for efficiency in scanning.
   uintptr_t* const bitmap_begin_;
@@ -127,10 +126,10 @@
 template<size_t kAlignment>
 class MemoryRangeBitmap : public Bitmap {
  public:
-  static MemoryRangeBitmap* Create(const std::string& name, uintptr_t cover_begin,
-                                   uintptr_t cover_end);
-  static MemoryRangeBitmap* CreateFromMemMap(MemMap* mem_map, uintptr_t cover_begin,
-                                             size_t num_bits);
+  static MemoryRangeBitmap* Create(
+      const std::string& name, uintptr_t cover_begin, uintptr_t cover_end);
+  static MemoryRangeBitmap* CreateFromMemMap(
+      MemMap&& mem_map, uintptr_t cover_begin, size_t num_bits);
 
   // Beginning of the memory range that the bitmap covers.
   ALWAYS_INLINE uintptr_t CoverBegin() const {
@@ -177,9 +176,10 @@
   }
 
  private:
-  MemoryRangeBitmap(MemMap* mem_map, uintptr_t begin, size_t num_bits)
-     : Bitmap(mem_map, num_bits), cover_begin_(begin), cover_end_(begin + kAlignment * num_bits) {
-  }
+  MemoryRangeBitmap(MemMap&& mem_map, uintptr_t begin, size_t num_bits)
+      : Bitmap(std::move(mem_map), num_bits),
+        cover_begin_(begin),
+        cover_end_(begin + kAlignment * num_bits) {}
 
   uintptr_t const cover_begin_;
   uintptr_t const cover_end_;
diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h
index 357a498..1e7d76c 100644
--- a/runtime/gc/accounting/card_table-inl.h
+++ b/runtime/gc/accounting/card_table-inl.h
@@ -213,8 +213,8 @@
 inline void* CardTable::AddrFromCard(const uint8_t *card_addr) const {
   DCHECK(IsValidCard(card_addr))
     << " card_addr: " << reinterpret_cast<const void*>(card_addr)
-    << " begin: " << reinterpret_cast<void*>(mem_map_->Begin() + offset_)
-    << " end: " << reinterpret_cast<void*>(mem_map_->End());
+    << " begin: " << reinterpret_cast<void*>(mem_map_.Begin() + offset_)
+    << " end: " << reinterpret_cast<void*>(mem_map_.End());
   uintptr_t offset = card_addr - biased_begin_;
   return reinterpret_cast<void*>(offset << kCardShift);
 }
@@ -228,16 +228,16 @@
 }
 
 inline bool CardTable::IsValidCard(const uint8_t* card_addr) const {
-  uint8_t* begin = mem_map_->Begin() + offset_;
-  uint8_t* end = mem_map_->End();
+  uint8_t* begin = mem_map_.Begin() + offset_;
+  uint8_t* end = mem_map_.End();
   return card_addr >= begin && card_addr < end;
 }
 
 inline void CardTable::CheckCardValid(uint8_t* card) const {
   DCHECK(IsValidCard(card))
       << " card_addr: " << reinterpret_cast<const void*>(card)
-      << " begin: " << reinterpret_cast<void*>(mem_map_->Begin() + offset_)
-      << " end: " << reinterpret_cast<void*>(mem_map_->End());
+      << " begin: " << reinterpret_cast<void*>(mem_map_.Begin() + offset_)
+      << " end: " << reinterpret_cast<void*>(mem_map_.End());
 }
 
 }  // namespace accounting
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 22104a3..89645e0 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -64,15 +64,19 @@
   size_t capacity = heap_capacity / kCardSize;
   /* Allocate an extra 256 bytes to allow fixed low-byte of base */
   std::string error_msg;
-  std::unique_ptr<MemMap> mem_map(
-      MemMap::MapAnonymous("card table", nullptr, capacity + 256, PROT_READ | PROT_WRITE,
-                           false, false, &error_msg));
-  CHECK(mem_map.get() != nullptr) << "couldn't allocate card table: " << error_msg;
+  MemMap mem_map = MemMap::MapAnonymous("card table",
+                                        /* addr */ nullptr,
+                                        capacity + 256,
+                                        PROT_READ | PROT_WRITE,
+                                        /* low_4gb */ false,
+                                        /* reuse */ false,
+                                        &error_msg);
+  CHECK(mem_map.IsValid()) << "couldn't allocate card table: " << error_msg;
   // All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
   // don't clear the card table to avoid unnecessary pages being allocated
   static_assert(kCardClean == 0, "kCardClean must be 0");
 
-  uint8_t* cardtable_begin = mem_map->Begin();
+  uint8_t* cardtable_begin = mem_map.Begin();
   CHECK(cardtable_begin != nullptr);
 
   // We allocated up to a bytes worth of extra space to allow `biased_begin`'s byte value to equal
@@ -87,11 +91,11 @@
     biased_begin += offset;
   }
   CHECK_EQ(reinterpret_cast<uintptr_t>(biased_begin) & 0xff, kCardDirty);
-  return new CardTable(mem_map.release(), biased_begin, offset);
+  return new CardTable(std::move(mem_map), biased_begin, offset);
 }
 
-CardTable::CardTable(MemMap* mem_map, uint8_t* biased_begin, size_t offset)
-    : mem_map_(mem_map), biased_begin_(biased_begin), offset_(offset) {
+CardTable::CardTable(MemMap&& mem_map, uint8_t* biased_begin, size_t offset)
+    : mem_map_(std::move(mem_map)), biased_begin_(biased_begin), offset_(offset) {
 }
 
 CardTable::~CardTable() {
@@ -100,7 +104,7 @@
 
 void CardTable::ClearCardTable() {
   static_assert(kCardClean == 0, "kCardClean must be 0");
-  mem_map_->MadviseDontNeedAndZero();
+  mem_map_.MadviseDontNeedAndZero();
 }
 
 void CardTable::ClearCardRange(uint8_t* start, uint8_t* end) {
@@ -118,8 +122,8 @@
 
 void CardTable::CheckAddrIsInCardTable(const uint8_t* addr) const {
   uint8_t* card_addr = biased_begin_ + ((uintptr_t)addr >> kCardShift);
-  uint8_t* begin = mem_map_->Begin() + offset_;
-  uint8_t* end = mem_map_->End();
+  uint8_t* begin = mem_map_.Begin() + offset_;
+  uint8_t* end = mem_map_.End();
   CHECK(AddrIsInCardTable(addr))
       << "Card table " << this
       << " begin: " << reinterpret_cast<void*>(begin)
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index b8520b7..47e2430 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -20,12 +20,11 @@
 #include <memory>
 
 #include "base/globals.h"
+#include "base/mem_map.h"
 #include "base/mutex.h"
 
 namespace art {
 
-class MemMap;
-
 namespace mirror {
 class Object;
 }  // namespace mirror
@@ -133,7 +132,7 @@
   bool AddrIsInCardTable(const void* addr) const;
 
  private:
-  CardTable(MemMap* begin, uint8_t* biased_begin, size_t offset);
+  CardTable(MemMap&& mem_map, uint8_t* biased_begin, size_t offset);
 
   // Returns true iff the card table address is within the bounds of the card table.
   bool IsValidCard(const uint8_t* card_addr) const ALWAYS_INLINE;
@@ -144,7 +143,7 @@
   void VerifyCardTable();
 
   // Mmapped pages for the card table
-  std::unique_ptr<MemMap> mem_map_;
+  MemMap mem_map_;
   // Value used to compute card table addresses from object addresses, see GetBiasedBegin
   uint8_t* const biased_begin_;
   // Card table doesn't begin at the beginning of the mem_map_, instead it is displaced by offset
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index 4b5a8c6..d8b1bb2 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -39,11 +39,15 @@
     DCHECK_EQ(kHeapCapacity / kRegionSize,
               static_cast<uint64_t>(static_cast<size_t>(kHeapCapacity / kRegionSize)));
     std::string error_msg;
-    MemMap* mem_map = MemMap::MapAnonymous("read barrier table", nullptr, capacity,
-                                           PROT_READ | PROT_WRITE, false, false, &error_msg);
-    CHECK(mem_map != nullptr && mem_map->Begin() != nullptr)
+    mem_map_ = MemMap::MapAnonymous("read barrier table",
+                                    /* addr */ nullptr,
+                                    capacity,
+                                    PROT_READ | PROT_WRITE,
+                                    /* low_4gb */ false,
+                                    /* reuse */ false,
+                                    &error_msg);
+    CHECK(mem_map_.IsValid() && mem_map_.Begin() != nullptr)
         << "couldn't allocate read barrier table: " << error_msg;
-    mem_map_.reset(mem_map);
   }
   void ClearForSpace(space::ContinuousSpace* space) {
     uint8_t* entry_start = EntryFromAddr(space->Begin());
@@ -66,14 +70,14 @@
     return entry_value == kSetEntryValue;
   }
   void ClearAll() {
-    mem_map_->MadviseDontNeedAndZero();
+    mem_map_.MadviseDontNeedAndZero();
   }
   void SetAll() {
-    memset(mem_map_->Begin(), kSetEntryValue, mem_map_->Size());
+    memset(mem_map_.Begin(), kSetEntryValue, mem_map_.Size());
   }
   bool IsAllCleared() const {
-    for (uint32_t* p = reinterpret_cast<uint32_t*>(mem_map_->Begin());
-         p < reinterpret_cast<uint32_t*>(mem_map_->End()); ++p) {
+    for (uint32_t* p = reinterpret_cast<uint32_t*>(mem_map_.Begin());
+         p < reinterpret_cast<uint32_t*>(mem_map_.End()); ++p) {
       if (*p != 0) {
         return false;
       }
@@ -90,7 +94,7 @@
 
   uint8_t* EntryFromAddr(const void* heap_addr) const {
     DCHECK(IsValidHeapAddr(heap_addr)) << heap_addr;
-    uint8_t* entry_addr = mem_map_->Begin() + reinterpret_cast<uintptr_t>(heap_addr) / kRegionSize;
+    uint8_t* entry_addr = mem_map_.Begin() + reinterpret_cast<uintptr_t>(heap_addr) / kRegionSize;
     DCHECK(IsValidEntry(entry_addr)) << "heap_addr: " << heap_addr
                                      << " entry_addr: " << reinterpret_cast<void*>(entry_addr);
     return entry_addr;
@@ -106,12 +110,12 @@
   }
 
   bool IsValidEntry(const uint8_t* entry_addr) const {
-    uint8_t* begin = mem_map_->Begin();
-    uint8_t* end = mem_map_->End();
+    uint8_t* begin = mem_map_.Begin();
+    uint8_t* end = mem_map_.End();
     return entry_addr >= begin && entry_addr < end;
   }
 
-  std::unique_ptr<MemMap> mem_map_;
+  MemMap mem_map_;
 };
 
 }  // namespace accounting
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index ced62cd..f87a67e 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -49,21 +49,22 @@
 
 template<size_t kAlignment>
 SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::CreateFromMemMap(
-    const std::string& name, MemMap* mem_map, uint8_t* heap_begin, size_t heap_capacity) {
-  CHECK(mem_map != nullptr);
-  uintptr_t* bitmap_begin = reinterpret_cast<uintptr_t*>(mem_map->Begin());
+    const std::string& name, MemMap&& mem_map, uint8_t* heap_begin, size_t heap_capacity) {
+  CHECK(mem_map.IsValid());
+  uintptr_t* bitmap_begin = reinterpret_cast<uintptr_t*>(mem_map.Begin());
   const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
-  return new SpaceBitmap(name, mem_map, bitmap_begin, bitmap_size, heap_begin, heap_capacity);
+  return new SpaceBitmap(
+      name, std::move(mem_map), bitmap_begin, bitmap_size, heap_begin, heap_capacity);
 }
 
 template<size_t kAlignment>
 SpaceBitmap<kAlignment>::SpaceBitmap(const std::string& name,
-                                     MemMap* mem_map,
+                                     MemMap&& mem_map,
                                      uintptr_t* bitmap_begin,
                                      size_t bitmap_size,
                                      const void* heap_begin,
                                      size_t heap_capacity)
-    : mem_map_(mem_map),
+    : mem_map_(std::move(mem_map)),
       bitmap_begin_(reinterpret_cast<Atomic<uintptr_t>*>(bitmap_begin)),
       bitmap_size_(bitmap_size),
       heap_begin_(reinterpret_cast<uintptr_t>(heap_begin)),
@@ -83,14 +84,18 @@
   // (we represent one word as an `intptr_t`).
   const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
   std::string error_msg;
-  std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), nullptr, bitmap_size,
-                                                       PROT_READ | PROT_WRITE, false, false,
-                                                       &error_msg));
-  if (UNLIKELY(mem_map.get() == nullptr)) {
+  MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
+                                        /* addr */ nullptr,
+                                        bitmap_size,
+                                        PROT_READ | PROT_WRITE,
+                                        /* low_4gb */ false,
+                                        /* reuse */ false,
+                                        &error_msg);
+  if (UNLIKELY(!mem_map.IsValid())) {
     LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
     return nullptr;
   }
-  return CreateFromMemMap(name, mem_map.release(), heap_begin, heap_capacity);
+  return CreateFromMemMap(name, std::move(mem_map), heap_begin, heap_capacity);
 }
 
 template<size_t kAlignment>
@@ -114,7 +119,7 @@
 template<size_t kAlignment>
 void SpaceBitmap<kAlignment>::Clear() {
   if (bitmap_begin_ != nullptr) {
-    mem_map_->MadviseDontNeedAndZero();
+    mem_map_.MadviseDontNeedAndZero();
   }
 }
 
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 1237f6e..6a3faef 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -24,6 +24,7 @@
 #include <vector>
 
 #include "base/globals.h"
+#include "base/mem_map.h"
 #include "base/mutex.h"
 
 namespace art {
@@ -32,7 +33,6 @@
 class Class;
 class Object;
 }  // namespace mirror
-class MemMap;
 
 namespace gc {
 namespace accounting {
@@ -50,8 +50,10 @@
   // Initialize a space bitmap using the provided mem_map as the live bits. Takes ownership of the
   // mem map. The address range covered starts at heap_begin and is of size equal to heap_capacity.
   // Objects are kAlignement-aligned.
-  static SpaceBitmap* CreateFromMemMap(const std::string& name, MemMap* mem_map,
-                                       uint8_t* heap_begin, size_t heap_capacity);
+  static SpaceBitmap* CreateFromMemMap(const std::string& name,
+                                       MemMap&& mem_map,
+                                       uint8_t* heap_begin,
+                                       size_t heap_capacity);
 
   ~SpaceBitmap();
 
@@ -215,7 +217,7 @@
   // TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1,
   // however, we document that this is expected on heap_end_
   SpaceBitmap(const std::string& name,
-              MemMap* mem_map,
+              MemMap&& mem_map,
               uintptr_t* bitmap_begin,
               size_t bitmap_size,
               const void* heap_begin,
@@ -227,7 +229,7 @@
   bool Modify(const mirror::Object* obj);
 
   // Backing storage for bitmap.
-  std::unique_ptr<MemMap> mem_map_;
+  MemMap mem_map_;
 
   // This bitmap itself, word sized for efficiency in scanning.
   Atomic<uintptr_t>* const bitmap_begin_;
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index a4095d8..1639a82 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -91,11 +91,15 @@
   size_t num_of_pages = footprint_ / kPageSize;
   size_t max_num_of_pages = max_capacity_ / kPageSize;
   std::string error_msg;
-  page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", nullptr,
-                                               RoundUp(max_num_of_pages, kPageSize),
-                                               PROT_READ | PROT_WRITE, false, false, &error_msg));
-  CHECK(page_map_mem_map_.get() != nullptr) << "Couldn't allocate the page map : " << error_msg;
-  page_map_ = page_map_mem_map_->Begin();
+  page_map_mem_map_ = MemMap::MapAnonymous("rosalloc page map",
+                                           /* addr */ nullptr,
+                                           RoundUp(max_num_of_pages, kPageSize),
+                                           PROT_READ | PROT_WRITE,
+                                           /* low_4gb */ false,
+                                           /* reuse */ false,
+                                           &error_msg);
+  CHECK(page_map_mem_map_.IsValid()) << "Couldn't allocate the page map : " << error_msg;
+  page_map_ = page_map_mem_map_.Begin();
   page_map_size_ = num_of_pages;
   max_page_map_size_ = max_num_of_pages;
   free_page_run_size_map_.resize(num_of_pages);
@@ -1364,8 +1368,8 @@
     // Zero out the tail of the page map.
     uint8_t* zero_begin = const_cast<uint8_t*>(page_map_) + new_num_of_pages;
     uint8_t* madvise_begin = AlignUp(zero_begin, kPageSize);
-    DCHECK_LE(madvise_begin, page_map_mem_map_->End());
-    size_t madvise_size = page_map_mem_map_->End() - madvise_begin;
+    DCHECK_LE(madvise_begin, page_map_mem_map_.End());
+    size_t madvise_size = page_map_mem_map_.End() - madvise_begin;
     if (madvise_size > 0) {
       DCHECK_ALIGNED(madvise_begin, kPageSize);
       DCHECK_EQ(RoundUp(madvise_size, kPageSize), madvise_size);
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 30213d5..0562167 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -31,13 +31,12 @@
 #include "base/allocator.h"
 #include "base/bit_utils.h"
 #include "base/globals.h"
+#include "base/mem_map.h"
 #include "base/mutex.h"
 #include "thread.h"
 
 namespace art {
 
-class MemMap;
-
 namespace gc {
 namespace allocator {
 
@@ -746,7 +745,7 @@
   volatile uint8_t* page_map_;  // No GUARDED_BY(lock_) for kReadPageMapEntryWithoutLockInBulkFree.
   size_t page_map_size_;
   size_t max_page_map_size_;
-  std::unique_ptr<MemMap> page_map_mem_map_;
+  MemMap page_map_mem_map_;
 
   // The table that indicates the size of free page runs. These sizes
   // are stored here to avoid storing in the free page header and
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 9767807..558a4a7 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -40,22 +40,22 @@
 
 class DummyImageSpace : public space::ImageSpace {
  public:
-  DummyImageSpace(MemMap* map,
+  DummyImageSpace(MemMap&& map,
                   accounting::ContinuousSpaceBitmap* live_bitmap,
                   std::unique_ptr<DummyOatFile>&& oat_file,
-                  std::unique_ptr<MemMap>&& oat_map)
+                  MemMap&& oat_map)
       : ImageSpace("DummyImageSpace",
                    /*image_location*/"",
-                   map,
+                   std::move(map),
                    live_bitmap,
-                   map->End()),
+                   map.End()),
         oat_map_(std::move(oat_map)) {
     oat_file_ = std::move(oat_file);
     oat_file_non_owned_ = oat_file_.get();
   }
 
  private:
-  std::unique_ptr<MemMap> oat_map_;
+  MemMap oat_map_;
 };
 
 class ImmuneSpacesTest : public CommonRuntimeTest {
@@ -83,39 +83,39 @@
                                     uint8_t* oat_begin,
                                     size_t oat_size) {
     std::string error_str;
-    std::unique_ptr<MemMap> map(MemMap::MapAnonymous("DummyImageSpace",
-                                                     image_begin,
-                                                     image_size,
-                                                     PROT_READ | PROT_WRITE,
-                                                     /*low_4gb*/true,
-                                                     /*reuse*/false,
-                                                     &error_str));
-    if (map == nullptr) {
+    MemMap map = MemMap::MapAnonymous("DummyImageSpace",
+                                      image_begin,
+                                      image_size,
+                                      PROT_READ | PROT_WRITE,
+                                      /*low_4gb*/true,
+                                      /*reuse*/false,
+                                      &error_str);
+    if (!map.IsValid()) {
       LOG(ERROR) << error_str;
       return nullptr;
     }
     CHECK(!live_bitmaps_.empty());
     std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap(std::move(live_bitmaps_.back()));
     live_bitmaps_.pop_back();
-    std::unique_ptr<MemMap> oat_map(MemMap::MapAnonymous("OatMap",
-                                                         oat_begin,
-                                                         oat_size,
-                                                         PROT_READ | PROT_WRITE,
-                                                         /*low_4gb*/true,
-                                                         /*reuse*/false,
-                                                         &error_str));
-    if (oat_map == nullptr) {
+    MemMap oat_map = MemMap::MapAnonymous("OatMap",
+                                          oat_begin,
+                                          oat_size,
+                                          PROT_READ | PROT_WRITE,
+                                          /*low_4gb*/true,
+                                          /*reuse*/false,
+                                          &error_str);
+    if (!oat_map.IsValid()) {
       LOG(ERROR) << error_str;
       return nullptr;
     }
-    std::unique_ptr<DummyOatFile> oat_file(new DummyOatFile(oat_map->Begin(), oat_map->End()));
+    std::unique_ptr<DummyOatFile> oat_file(new DummyOatFile(oat_map.Begin(), oat_map.End()));
     // Create image header.
     ImageSection sections[ImageHeader::kSectionCount];
-    new (map->Begin()) ImageHeader(
-        /*image_begin*/PointerToLowMemUInt32(map->Begin()),
-        /*image_size*/map->Size(),
+    new (map.Begin()) ImageHeader(
+        /*image_begin*/PointerToLowMemUInt32(map.Begin()),
+        /*image_size*/map.Size(),
         sections,
-        /*image_roots*/PointerToLowMemUInt32(map->Begin()) + 1,
+        /*image_roots*/PointerToLowMemUInt32(map.Begin()) + 1,
         /*oat_checksum*/0u,
         // The oat file data in the header is always right after the image space.
         /*oat_file_begin*/PointerToLowMemUInt32(oat_begin),
@@ -131,7 +131,7 @@
         /*is_pic*/false,
         ImageHeader::kStorageModeUncompressed,
         /*storage_size*/0u);
-    return new DummyImageSpace(map.release(),
+    return new DummyImageSpace(std::move(map),
                                live_bitmap.release(),
                                std::move(oat_file),
                                std::move(oat_map));
@@ -141,18 +141,18 @@
   // returned address.
   static uint8_t* GetContinuousMemoryRegion(size_t size) {
     std::string error_str;
-    std::unique_ptr<MemMap> map(MemMap::MapAnonymous("reserve",
-                                                     nullptr,
-                                                     size,
-                                                     PROT_READ | PROT_WRITE,
-                                                     /*low_4gb*/true,
-                                                     /*reuse*/false,
-                                                     &error_str));
-    if (map == nullptr) {
+    MemMap map = MemMap::MapAnonymous("reserve",
+                                      /* addr */ nullptr,
+                                      size,
+                                      PROT_READ | PROT_WRITE,
+                                      /*low_4gb*/ true,
+                                      /*reuse*/ false,
+                                      &error_str);
+    if (!map.IsValid()) {
       LOG(ERROR) << "Failed to allocate memory region " << error_str;
       return nullptr;
     }
-    return map->Begin();
+    return map.Begin();
   }
 
  private:
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 2335964..334c7a0 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -103,12 +103,16 @@
       is_concurrent_(is_concurrent),
       live_stack_freeze_size_(0) {
   std::string error_msg;
-  MemMap* mem_map = MemMap::MapAnonymous(
-      "mark sweep sweep array free buffer", nullptr,
+  sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
+      "mark sweep sweep array free buffer",
+      /* addr */ nullptr,
       RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
-      PROT_READ | PROT_WRITE, false, false, &error_msg);
-  CHECK(mem_map != nullptr) << "Couldn't allocate sweep array free buffer: " << error_msg;
-  sweep_array_free_buffer_mem_map_.reset(mem_map);
+      PROT_READ | PROT_WRITE,
+      /* low_4gb */ false,
+      /* reuse */ false,
+      &error_msg);
+  CHECK(sweep_array_free_buffer_mem_map_.IsValid())
+      << "Couldn't allocate sweep array free buffer: " << error_msg;
 }
 
 void MarkSweep::InitializePhase() {
@@ -1207,7 +1211,7 @@
   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
   Thread* self = Thread::Current();
   mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>(
-      sweep_array_free_buffer_mem_map_->BaseBegin());
+      sweep_array_free_buffer_mem_map_.BaseBegin());
   size_t chunk_free_pos = 0;
   ObjectBytePair freed;
   ObjectBytePair freed_los;
@@ -1300,7 +1304,7 @@
     t2.NewTiming("ResetStack");
     allocations->Reset();
   }
-  sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero();
+  sweep_array_free_buffer_mem_map_.MadviseDontNeedAndZero();
 }
 
 void MarkSweep::Sweep(bool swap_bitmaps) {
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 5e0fe06..70e4432 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -351,7 +351,7 @@
   // Verification.
   size_t live_stack_freeze_size_;
 
-  std::unique_ptr<MemMap> sweep_array_free_buffer_mem_map_;
+  MemMap sweep_array_free_buffer_mem_map_;
 
  private:
   class CardScanTask;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 58becb1..a1a1a5c 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -358,8 +358,8 @@
   if (foreground_collector_type_ == kCollectorTypeGSS) {
     separate_non_moving_space = false;
   }
-  std::unique_ptr<MemMap> main_mem_map_1;
-  std::unique_ptr<MemMap> main_mem_map_2;
+  MemMap main_mem_map_1;
+  MemMap main_mem_map_2;
 
   // Gross hack to make dex2oat deterministic.
   if (foreground_collector_type_ == kCollectorTypeMS &&
@@ -374,7 +374,7 @@
     request_begin += non_moving_space_capacity;
   }
   std::string error_str;
-  std::unique_ptr<MemMap> non_moving_space_mem_map;
+  MemMap non_moving_space_mem_map;
   if (separate_non_moving_space) {
     ScopedTrace trace2("Create separate non moving space");
     // If we are the zygote, the non moving space becomes the zygote space when we run
@@ -383,11 +383,9 @@
     const char* space_name = is_zygote ? kZygoteSpaceName : kNonMovingSpaceName;
     // Reserve the non moving mem map before the other two since it needs to be at a specific
     // address.
-    non_moving_space_mem_map.reset(MapAnonymousPreferredAddress(space_name,
-                                                                requested_alloc_space_begin,
-                                                                non_moving_space_capacity,
-                                                                &error_str));
-    CHECK(non_moving_space_mem_map != nullptr) << error_str;
+    non_moving_space_mem_map = MapAnonymousPreferredAddress(
+        space_name, requested_alloc_space_begin, non_moving_space_capacity, &error_str);
+    CHECK(non_moving_space_mem_map.IsValid()) << error_str;
     // Try to reserve virtual memory at a lower address if we have a separate non moving space.
     request_begin = kPreferredAllocSpaceBegin + non_moving_space_capacity;
   }
@@ -395,27 +393,29 @@
   if (foreground_collector_type_ != kCollectorTypeCC) {
     ScopedTrace trace2("Create main mem map");
     if (separate_non_moving_space || !is_zygote) {
-      main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0],
-                                                        request_begin,
-                                                        capacity_,
-                                                        &error_str));
+      main_mem_map_1 = MapAnonymousPreferredAddress(
+          kMemMapSpaceName[0], request_begin, capacity_, &error_str);
     } else {
       // If no separate non-moving space and we are the zygote, the main space must come right
       // after the image space to avoid a gap. This is required since we want the zygote space to
       // be adjacent to the image space.
-      main_mem_map_1.reset(MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity_,
-                                                PROT_READ | PROT_WRITE, true, false,
-                                                &error_str));
+      main_mem_map_1 = MemMap::MapAnonymous(kMemMapSpaceName[0],
+                                            request_begin,
+                                            capacity_,
+                                            PROT_READ | PROT_WRITE,
+                                            /* low_4gb */ true,
+                                            /* reuse */ false,
+                                            &error_str);
     }
-    CHECK(main_mem_map_1.get() != nullptr) << error_str;
+    CHECK(main_mem_map_1.IsValid()) << error_str;
   }
   if (support_homogeneous_space_compaction ||
       background_collector_type_ == kCollectorTypeSS ||
       foreground_collector_type_ == kCollectorTypeSS) {
     ScopedTrace trace2("Create main mem map 2");
-    main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
-                                                      capacity_, &error_str));
-    CHECK(main_mem_map_2.get() != nullptr) << error_str;
+    main_mem_map_2 = MapAnonymousPreferredAddress(
+        kMemMapSpaceName[1], main_mem_map_1.End(), capacity_, &error_str);
+    CHECK(main_mem_map_2.IsValid()) << error_str;
   }
 
   // Create the non moving space first so that bitmaps don't take up the address range.
@@ -423,10 +423,14 @@
     ScopedTrace trace2("Add non moving space");
     // Non moving space is always dlmalloc since we currently don't have support for multiple
     // active rosalloc spaces.
-    const size_t size = non_moving_space_mem_map->Size();
-    non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(
-        non_moving_space_mem_map.release(), "zygote / non moving space", kDefaultStartingSize,
-        initial_size, size, size, false);
+    const size_t size = non_moving_space_mem_map.Size();
+    non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(std::move(non_moving_space_mem_map),
+                                                               "zygote / non moving space",
+                                                               kDefaultStartingSize,
+                                                               initial_size,
+                                                               size,
+                                                               size,
+                                                               /* can_move_objects */ false);
     non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
     CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
         << requested_alloc_space_begin;
@@ -436,11 +440,10 @@
   if (foreground_collector_type_ == kCollectorTypeCC) {
     CHECK(separate_non_moving_space);
     // Reserve twice the capacity, to allow evacuating every region for explicit GCs.
-    MemMap* region_space_mem_map = space::RegionSpace::CreateMemMap(kRegionSpaceName,
-                                                                    capacity_ * 2,
-                                                                    request_begin);
-    CHECK(region_space_mem_map != nullptr) << "No region space mem map";
-    region_space_ = space::RegionSpace::Create(kRegionSpaceName, region_space_mem_map);
+    MemMap region_space_mem_map =
+        space::RegionSpace::CreateMemMap(kRegionSpaceName, capacity_ * 2, request_begin);
+    CHECK(region_space_mem_map.IsValid()) << "No region space mem map";
+    region_space_ = space::RegionSpace::Create(kRegionSpaceName, std::move(region_space_mem_map));
     AddSpace(region_space_);
   } else if (IsMovingGc(foreground_collector_type_) &&
       foreground_collector_type_ != kCollectorTypeGSS) {
@@ -448,16 +451,16 @@
     // We only to create the bump pointer if the foreground collector is a compacting GC.
     // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
     bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
-                                                                    main_mem_map_1.release());
+                                                                    std::move(main_mem_map_1));
     CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
     AddSpace(bump_pointer_space_);
     temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
-                                                            main_mem_map_2.release());
+                                                            std::move(main_mem_map_2));
     CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
     AddSpace(temp_space_);
     CHECK(separate_non_moving_space);
   } else {
-    CreateMainMallocSpace(main_mem_map_1.release(), initial_size, growth_limit_, capacity_);
+    CreateMainMallocSpace(std::move(main_mem_map_1), initial_size, growth_limit_, capacity_);
     CHECK(main_space_ != nullptr);
     AddSpace(main_space_);
     if (!separate_non_moving_space) {
@@ -467,19 +470,23 @@
     if (foreground_collector_type_ == kCollectorTypeGSS) {
       CHECK_EQ(foreground_collector_type_, background_collector_type_);
       // Create bump pointer spaces instead of a backup space.
-      main_mem_map_2.release();
-      bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space 1",
-                                                            kGSSBumpPointerSpaceCapacity, nullptr);
+      main_mem_map_2.Reset();
+      bump_pointer_space_ = space::BumpPointerSpace::Create(
+          "Bump pointer space 1", kGSSBumpPointerSpaceCapacity, /* requested_begin */ nullptr);
       CHECK(bump_pointer_space_ != nullptr);
       AddSpace(bump_pointer_space_);
-      temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
-                                                    kGSSBumpPointerSpaceCapacity, nullptr);
+      temp_space_ = space::BumpPointerSpace::Create(
+          "Bump pointer space 2", kGSSBumpPointerSpaceCapacity, /* requested_begin */ nullptr);
       CHECK(temp_space_ != nullptr);
       AddSpace(temp_space_);
-    } else if (main_mem_map_2.get() != nullptr) {
+    } else if (main_mem_map_2.IsValid()) {
       const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
-      main_space_backup_.reset(CreateMallocSpaceFromMemMap(main_mem_map_2.release(), initial_size,
-                                                           growth_limit_, capacity_, name, true));
+      main_space_backup_.reset(CreateMallocSpaceFromMemMap(std::move(main_mem_map_2),
+                                                           initial_size,
+                                                           growth_limit_,
+                                                           capacity_,
+                                                           name,
+                                                           /* can_move_objects */ true));
       CHECK(main_space_backup_.get() != nullptr);
       // Add the space so its accounted for in the heap_begin and heap_end.
       AddSpace(main_space_backup_.get());
@@ -613,7 +620,7 @@
         first_space = space;
       }
     }
-    bool no_gap = MemMap::CheckNoGaps(first_space->GetMemMap(), non_moving_space_->GetMemMap());
+    bool no_gap = MemMap::CheckNoGaps(*first_space->GetMemMap(), *non_moving_space_->GetMemMap());
     if (!no_gap) {
       PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
       MemMap::DumpMaps(LOG_STREAM(ERROR), true);
@@ -632,14 +639,19 @@
   }
 }
 
-MemMap* Heap::MapAnonymousPreferredAddress(const char* name,
-                                           uint8_t* request_begin,
-                                           size_t capacity,
-                                           std::string* out_error_str) {
+MemMap Heap::MapAnonymousPreferredAddress(const char* name,
+                                          uint8_t* request_begin,
+                                          size_t capacity,
+                                          std::string* out_error_str) {
   while (true) {
-    MemMap* map = MemMap::MapAnonymous(name, request_begin, capacity,
-                                       PROT_READ | PROT_WRITE, true, false, out_error_str);
-    if (map != nullptr || request_begin == nullptr) {
+    MemMap map = MemMap::MapAnonymous(name,
+                                      request_begin,
+                                      capacity,
+                                      PROT_READ | PROT_WRITE,
+                                      /* low_4gb*/ true,
+                                      /* reuse */ false,
+                                      out_error_str);
+    if (map.IsValid() || request_begin == nullptr) {
       return map;
     }
     // Retry a  second time with no specified request begin.
@@ -651,7 +663,7 @@
   return foreground_collector_type_ == type || background_collector_type_ == type;
 }
 
-space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map,
+space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap&& mem_map,
                                                       size_t initial_size,
                                                       size_t growth_limit,
                                                       size_t capacity,
@@ -660,12 +672,21 @@
   space::MallocSpace* malloc_space = nullptr;
   if (kUseRosAlloc) {
     // Create rosalloc space.
-    malloc_space = space::RosAllocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
-                                                          initial_size, growth_limit, capacity,
-                                                          low_memory_mode_, can_move_objects);
+    malloc_space = space::RosAllocSpace::CreateFromMemMap(std::move(mem_map),
+                                                          name,
+                                                          kDefaultStartingSize,
+                                                          initial_size,
+                                                          growth_limit,
+                                                          capacity,
+                                                          low_memory_mode_,
+                                                          can_move_objects);
   } else {
-    malloc_space = space::DlMallocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
-                                                          initial_size, growth_limit, capacity,
+    malloc_space = space::DlMallocSpace::CreateFromMemMap(std::move(mem_map),
+                                                          name,
+                                                          kDefaultStartingSize,
+                                                          initial_size,
+                                                          growth_limit,
+                                                          capacity,
                                                           can_move_objects);
   }
   if (collector::SemiSpace::kUseRememberedSet) {
@@ -679,7 +700,9 @@
   return malloc_space;
 }
 
-void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
+void Heap::CreateMainMallocSpace(MemMap&& mem_map,
+                                 size_t initial_size,
+                                 size_t growth_limit,
                                  size_t capacity) {
   // Is background compaction is enabled?
   bool can_move_objects = IsMovingGc(background_collector_type_) !=
@@ -698,7 +721,10 @@
     RemoveRememberedSet(main_space_);
   }
   const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
-  main_space_ = CreateMallocSpaceFromMemMap(mem_map, initial_size, growth_limit, capacity, name,
+  main_space_ = CreateMallocSpaceFromMemMap(std::move(mem_map),
+                                            initial_size,
+                                            growth_limit,
+                                            capacity, name,
                                             can_move_objects);
   SetSpaceAsDefault(main_space_);
   VLOG(heap) << "Created main space " << main_space_;
@@ -2012,17 +2038,17 @@
         if (!IsMovingGc(collector_type_)) {
           // Create the bump pointer space from the backup space.
           CHECK(main_space_backup_ != nullptr);
-          std::unique_ptr<MemMap> mem_map(main_space_backup_->ReleaseMemMap());
+          MemMap mem_map = main_space_backup_->ReleaseMemMap();
           // We are transitioning from non moving GC -> moving GC, since we copied from the bump
           // pointer space last transition it will be protected.
-          CHECK(mem_map != nullptr);
-          mem_map->Protect(PROT_READ | PROT_WRITE);
+          CHECK(mem_map.IsValid());
+          mem_map.Protect(PROT_READ | PROT_WRITE);
           bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
-                                                                          mem_map.release());
+                                                                          std::move(mem_map));
           AddSpace(bump_pointer_space_);
           collector = Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
           // Use the now empty main space mem map for the bump pointer temp space.
-          mem_map.reset(main_space_->ReleaseMemMap());
+          mem_map = main_space_->ReleaseMemMap();
           // Unset the pointers just in case.
           if (dlmalloc_space_ == main_space_) {
             dlmalloc_space_ = nullptr;
@@ -2038,7 +2064,7 @@
           RemoveRememberedSet(main_space_backup_.get());
           main_space_backup_.reset(nullptr);  // Deletes the space.
           temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
-                                                                  mem_map.release());
+                                                                  std::move(mem_map));
           AddSpace(temp_space_);
         }
         break;
@@ -2048,37 +2074,35 @@
       case kCollectorTypeCMS: {
         if (IsMovingGc(collector_type_)) {
           CHECK(temp_space_ != nullptr);
-          std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap());
+          MemMap mem_map = temp_space_->ReleaseMemMap();
           RemoveSpace(temp_space_);
           temp_space_ = nullptr;
-          mem_map->Protect(PROT_READ | PROT_WRITE);
-          CreateMainMallocSpace(mem_map.get(),
+          mem_map.Protect(PROT_READ | PROT_WRITE);
+          CreateMainMallocSpace(std::move(mem_map),
                                 kDefaultInitialSize,
-                                std::min(mem_map->Size(), growth_limit_),
-                                mem_map->Size());
-          mem_map.release();
+                                std::min(mem_map.Size(), growth_limit_),
+                                mem_map.Size());
           // Compact to the main space from the bump pointer space, don't need to swap semispaces.
           AddSpace(main_space_);
           collector = Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
-          mem_map.reset(bump_pointer_space_->ReleaseMemMap());
+          mem_map = bump_pointer_space_->ReleaseMemMap();
           RemoveSpace(bump_pointer_space_);
           bump_pointer_space_ = nullptr;
           const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
           // Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
           if (kIsDebugBuild && kUseRosAlloc) {
-            mem_map->Protect(PROT_READ | PROT_WRITE);
+            mem_map.Protect(PROT_READ | PROT_WRITE);
           }
           main_space_backup_.reset(CreateMallocSpaceFromMemMap(
-              mem_map.get(),
+              std::move(mem_map),
               kDefaultInitialSize,
-              std::min(mem_map->Size(), growth_limit_),
-              mem_map->Size(),
+              std::min(mem_map.Size(), growth_limit_),
+              mem_map.Size(),
               name,
               true));
           if (kIsDebugBuild && kUseRosAlloc) {
-            mem_map->Protect(PROT_NONE);
+            main_space_backup_->GetMemMap()->Protect(PROT_NONE);
           }
-          mem_map.release();
         }
         break;
       }
@@ -2323,11 +2347,13 @@
     if (reset_main_space) {
       main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
       madvise(main_space_->Begin(), main_space_->Capacity(), MADV_DONTNEED);
-      MemMap* mem_map = main_space_->ReleaseMemMap();
+      MemMap mem_map = main_space_->ReleaseMemMap();
       RemoveSpace(main_space_);
       space::Space* old_main_space = main_space_;
-      CreateMainMallocSpace(mem_map, kDefaultInitialSize, std::min(mem_map->Size(), growth_limit_),
-                            mem_map->Size());
+      CreateMainMallocSpace(std::move(mem_map),
+                            kDefaultInitialSize,
+                            std::min(mem_map.Size(), growth_limit_),
+                            mem_map.Size());
       delete old_main_space;
       AddSpace(main_space_);
     } else {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 5c34c56..0dcf4f5 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -835,8 +835,10 @@
   void FinishGC(Thread* self, collector::GcType gc_type) REQUIRES(!*gc_complete_lock_);
 
   // Create a mem map with a preferred base address.
-  static MemMap* MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
-                                              size_t capacity, std::string* out_error_str);
+  static MemMap MapAnonymousPreferredAddress(const char* name,
+                                             uint8_t* request_begin,
+                                             size_t capacity,
+                                             std::string* out_error_str);
 
   bool SupportHSpaceCompaction() const {
     // Returns true if we can do hspace compaction
@@ -979,13 +981,13 @@
   collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
 
   // Create the main free list malloc space, either a RosAlloc space or DlMalloc space.
-  void CreateMainMallocSpace(MemMap* mem_map,
+  void CreateMainMallocSpace(MemMap&& mem_map,
                              size_t initial_size,
                              size_t growth_limit,
                              size_t capacity);
 
   // Create a malloc space based on a mem map. Does not set the space as default.
-  space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap* mem_map,
+  space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap&& mem_map,
                                                   size_t initial_size,
                                                   size_t growth_limit,
                                                   size_t capacity,
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index c6b2120..d35ae38 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -33,19 +33,19 @@
     MemMap::Init();
     std::string error_msg;
     // Reserve the preferred address to force the heap to use another one for testing.
-    reserved_.reset(MemMap::MapAnonymous("ReserveMap",
-                                         gc::Heap::kPreferredAllocSpaceBegin,
-                                         16 * KB,
-                                         PROT_READ,
-                                         /*low_4gb*/ true,
-                                         /*reuse*/ false,
-                                         &error_msg));
-    ASSERT_TRUE(reserved_ != nullptr) << error_msg;
+    reserved_ = MemMap::MapAnonymous("ReserveMap",
+                                     gc::Heap::kPreferredAllocSpaceBegin,
+                                     16 * KB,
+                                     PROT_READ,
+                                     /*low_4gb*/ true,
+                                     /*reuse*/ false,
+                                     &error_msg);
+    ASSERT_TRUE(reserved_.IsValid()) << error_msg;
     CommonRuntimeTest::SetUp();
   }
 
  private:
-  std::unique_ptr<MemMap> reserved_;
+  MemMap reserved_;
 };
 
 TEST_F(HeapTest, ClearGrowthLimit) {
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index e95da01..2712ec2 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -28,23 +28,31 @@
                                            uint8_t* requested_begin) {
   capacity = RoundUp(capacity, kPageSize);
   std::string error_msg;
-  std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
-                                                       PROT_READ | PROT_WRITE, true, false,
-                                                       &error_msg));
-  if (mem_map.get() == nullptr) {
+  MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
+                                        requested_begin,
+                                        capacity,
+                                        PROT_READ | PROT_WRITE,
+                                        /* low_4gb */ true,
+                                        /* reuse */ false,
+                                        &error_msg);
+  if (!mem_map.IsValid()) {
     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
         << PrettySize(capacity) << " with message " << error_msg;
     return nullptr;
   }
-  return new BumpPointerSpace(name, mem_map.release());
+  return new BumpPointerSpace(name, std::move(mem_map));
 }
 
-BumpPointerSpace* BumpPointerSpace::CreateFromMemMap(const std::string& name, MemMap* mem_map) {
-  return new BumpPointerSpace(name, mem_map);
+BumpPointerSpace* BumpPointerSpace::CreateFromMemMap(const std::string& name, MemMap&& mem_map) {
+  return new BumpPointerSpace(name, std::move(mem_map));
 }
 
 BumpPointerSpace::BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit)
-    : ContinuousMemMapAllocSpace(name, nullptr, begin, begin, limit,
+    : ContinuousMemMapAllocSpace(name,
+                                 MemMap::Invalid(),
+                                 begin,
+                                 begin,
+                                 limit,
                                  kGcRetentionPolicyAlwaysCollect),
       growth_end_(limit),
       objects_allocated_(0), bytes_allocated_(0),
@@ -53,10 +61,14 @@
       num_blocks_(0) {
 }
 
-BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap* mem_map)
-    : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->Begin(), mem_map->End(),
+BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap&& mem_map)
+    : ContinuousMemMapAllocSpace(name,
+                                 std::move(mem_map),
+                                 mem_map.Begin(),
+                                 mem_map.Begin(),
+                                 mem_map.End(),
                                  kGcRetentionPolicyAlwaysCollect),
-      growth_end_(mem_map->End()),
+      growth_end_(mem_map_.End()),
       objects_allocated_(0), bytes_allocated_(0),
       block_lock_("Block lock", kBumpPointerSpaceBlockLock),
       main_block_size_(0),
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 5ba13ca..9b31558 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -47,7 +47,7 @@
   // guaranteed to be granted, if it is required, the caller should call Begin on the returned
   // space to confirm the request was granted.
   static BumpPointerSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
-  static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap* mem_map);
+  static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap&& mem_map);
 
   // Allocate num_bytes, returns null if the space is full.
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -166,7 +166,7 @@
   static constexpr size_t kAlignment = 8;
 
  protected:
-  BumpPointerSpace(const std::string& name, MemMap* mem_map);
+  BumpPointerSpace(const std::string& name, MemMap&& mem_map);
 
   // Allocate a raw block of bytes.
   uint8_t* AllocBlock(size_t bytes) REQUIRES(block_lock_);
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 025c3f0..36d2161 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -38,41 +38,73 @@
 
 static constexpr bool kPrefetchDuringDlMallocFreeList = true;
 
-DlMallocSpace::DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
-                             void* mspace, uint8_t* begin, uint8_t* end, uint8_t* limit,
-                             size_t growth_limit, bool can_move_objects, size_t starting_size)
-    : MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
+DlMallocSpace::DlMallocSpace(MemMap&& mem_map,
+                             size_t initial_size,
+                             const std::string& name,
+                             void* mspace,
+                             uint8_t* begin,
+                             uint8_t* end,
+                             uint8_t* limit,
+                             size_t growth_limit,
+                             bool can_move_objects,
+                             size_t starting_size)
+    : MallocSpace(name,
+                  std::move(mem_map),
+                  begin,
+                  end,
+                  limit,
+                  growth_limit,
+                  /* create_bitmaps */ true,
+                  can_move_objects,
                   starting_size, initial_size),
       mspace_(mspace) {
   CHECK(mspace != nullptr);
 }
 
-DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
-                                               size_t starting_size, size_t initial_size,
-                                               size_t growth_limit, size_t capacity,
+DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap&& mem_map,
+                                               const std::string& name,
+                                               size_t starting_size,
+                                               size_t initial_size,
+                                               size_t growth_limit,
+                                               size_t capacity,
                                                bool can_move_objects) {
-  DCHECK(mem_map != nullptr);
-  void* mspace = CreateMspace(mem_map->Begin(), starting_size, initial_size);
+  DCHECK(mem_map.IsValid());
+  void* mspace = CreateMspace(mem_map.Begin(), starting_size, initial_size);
   if (mspace == nullptr) {
     LOG(ERROR) << "Failed to initialize mspace for alloc space (" << name << ")";
     return nullptr;
   }
 
   // Protect memory beyond the starting size. morecore will add r/w permissions when necessory
-  uint8_t* end = mem_map->Begin() + starting_size;
+  uint8_t* end = mem_map.Begin() + starting_size;
   if (capacity - starting_size > 0) {
     CheckedCall(mprotect, name.c_str(), end, capacity - starting_size, PROT_NONE);
   }
 
   // Everything is set so record in immutable structure and leave
-  uint8_t* begin = mem_map->Begin();
+  uint8_t* begin = mem_map.Begin();
   if (Runtime::Current()->IsRunningOnMemoryTool()) {
     return new MemoryToolMallocSpace<DlMallocSpace, kDefaultMemoryToolRedZoneBytes, true, false>(
-        mem_map, initial_size, name, mspace, begin, end, begin + capacity, growth_limit,
-        can_move_objects, starting_size);
+        std::move(mem_map),
+        initial_size,
+        name,
+        mspace,
+        begin,
+        end,
+        begin + capacity, growth_limit,
+        can_move_objects,
+        starting_size);
   } else {
-    return new DlMallocSpace(mem_map, initial_size, name, mspace, begin, end, begin + capacity,
-                             growth_limit, can_move_objects, starting_size);
+    return new DlMallocSpace(std::move(mem_map),
+                             initial_size,
+                             name,
+                             mspace,
+                             begin,
+                             end,
+                             begin + capacity,
+                             growth_limit,
+                             can_move_objects,
+                             starting_size);
   }
 }
 
@@ -94,15 +126,20 @@
   // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
   // size of the large allocation) will be greater than the footprint limit.
   size_t starting_size = kPageSize;
-  MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
-                                 requested_begin);
-  if (mem_map == nullptr) {
+  MemMap mem_map =
+      CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity, requested_begin);
+  if (!mem_map.IsValid()) {
     LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
                << PrettySize(capacity);
     return nullptr;
   }
-  DlMallocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size,
-                                          growth_limit, capacity, can_move_objects);
+  DlMallocSpace* space = CreateFromMemMap(std::move(mem_map),
+                                          name,
+                                          starting_size,
+                                          initial_size,
+                                          growth_limit,
+                                          capacity,
+                                          can_move_objects);
   // We start out with only the initial size possibly containing objects.
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     LOG(INFO) << "DlMallocSpace::Create exiting (" << PrettyDuration(NanoTime() - start_time)
@@ -152,17 +189,37 @@
   return result;
 }
 
-MallocSpace* DlMallocSpace::CreateInstance(MemMap* mem_map, const std::string& name,
-                                           void* allocator, uint8_t* begin, uint8_t* end,
-                                           uint8_t* limit, size_t growth_limit,
+MallocSpace* DlMallocSpace::CreateInstance(MemMap&& mem_map,
+                                           const std::string& name,
+                                           void* allocator,
+                                           uint8_t* begin,
+                                           uint8_t* end,
+                                           uint8_t* limit,
+                                           size_t growth_limit,
                                            bool can_move_objects) {
   if (Runtime::Current()->IsRunningOnMemoryTool()) {
     return new MemoryToolMallocSpace<DlMallocSpace, kDefaultMemoryToolRedZoneBytes, true, false>(
-        mem_map, initial_size_, name, allocator, begin, end, limit, growth_limit,
-        can_move_objects, starting_size_);
+        std::move(mem_map),
+        initial_size_,
+        name,
+        allocator,
+        begin,
+        end,
+        limit,
+        growth_limit,
+        can_move_objects,
+        starting_size_);
   } else {
-    return new DlMallocSpace(mem_map, initial_size_, name, allocator, begin, end, limit,
-                             growth_limit, can_move_objects, starting_size_);
+    return new DlMallocSpace(std::move(mem_map),
+                             initial_size_,
+                             name,
+                             allocator,
+                             begin,
+                             end,
+                             limit,
+                             growth_limit,
+                             can_move_objects,
+                             starting_size_);
   }
 }
 
@@ -283,7 +340,7 @@
   live_bitmap_->Clear();
   mark_bitmap_->Clear();
   SetEnd(Begin() + starting_size_);
-  mspace_ = CreateMspace(mem_map_->Begin(), starting_size_, initial_size_);
+  mspace_ = CreateMspace(mem_map_.Begin(), starting_size_, initial_size_);
   SetFootprintLimit(footprint_limit);
 }
 
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 4c7fcfd..66537d5 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -34,9 +34,12 @@
 class DlMallocSpace : public MallocSpace {
  public:
   // Create a DlMallocSpace from an existing mem_map.
-  static DlMallocSpace* CreateFromMemMap(MemMap* mem_map, const std::string& name,
-                                         size_t starting_size, size_t initial_size,
-                                         size_t growth_limit, size_t capacity,
+  static DlMallocSpace* CreateFromMemMap(MemMap&& mem_map,
+                                         const std::string& name,
+                                         size_t starting_size,
+                                         size_t initial_size,
+                                         size_t growth_limit,
+                                         size_t capacity,
                                          bool can_move_objects);
 
   // Create a DlMallocSpace with the requested sizes. The requested
@@ -118,9 +121,14 @@
   // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
   void SetFootprintLimit(size_t limit) OVERRIDE;
 
-  MallocSpace* CreateInstance(MemMap* mem_map, const std::string& name, void* allocator,
-                              uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
-                              bool can_move_objects);
+  MallocSpace* CreateInstance(MemMap&& mem_map,
+                              const std::string& name,
+                              void* allocator,
+                              uint8_t* begin,
+                              uint8_t* end,
+                              uint8_t* limit,
+                              size_t growth_limit,
+                              bool can_move_objects) OVERRIDE;
 
   uint64_t GetBytesAllocated() OVERRIDE;
   uint64_t GetObjectsAllocated() OVERRIDE;
@@ -139,9 +147,16 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
  protected:
-  DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::string& name, void* mspace,
-                uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
-                bool can_move_objects, size_t starting_size);
+  DlMallocSpace(MemMap&& mem_map,
+                size_t initial_size,
+                const std::string& name,
+                void* mspace,
+                uint8_t* begin,
+                uint8_t* end,
+                uint8_t* limit,
+                size_t growth_limit,
+                bool can_move_objects,
+                size_t starting_size);
 
  private:
   mirror::Object* AllocWithoutGrowthLocked(Thread* self, size_t num_bytes, size_t* bytes_allocated,
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 826f382f..ae4b9da 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -62,12 +62,12 @@
 
 ImageSpace::ImageSpace(const std::string& image_filename,
                        const char* image_location,
-                       MemMap* mem_map,
+                       MemMap&& mem_map,
                        accounting::ContinuousSpaceBitmap* live_bitmap,
                        uint8_t* end)
     : MemMapSpace(image_filename,
-                  mem_map,
-                  mem_map->Begin(),
+                  std::move(mem_map),
+                  mem_map.Begin(),
                   end,
                   end,
                   kGcRetentionPolicyNeverCollect),
@@ -636,53 +636,53 @@
       return nullptr;
     }
 
-    std::unique_ptr<MemMap> map;
+    MemMap map;
 
     // GetImageBegin is the preferred address to map the image. If we manage to map the
     // image at the image begin, the amount of fixup work required is minimized.
     // If it is pic we will retry with error_msg for the failure case. Pass a null error_msg to
     // avoid reading proc maps for a mapping failure and slowing everything down.
-    map.reset(LoadImageFile(image_filename,
-                            image_location,
-                            *image_header,
-                            image_header->GetImageBegin(),
-                            file->Fd(),
-                            logger,
-                            image_header->IsPic() ? nullptr : error_msg));
+    map = LoadImageFile(image_filename,
+                        image_location,
+                        *image_header,
+                        image_header->GetImageBegin(),
+                        file->Fd(),
+                        logger,
+                        image_header->IsPic() ? nullptr : error_msg);
     // If the header specifies PIC mode, we can also map at a random low_4gb address since we can
     // relocate in-place.
-    if (map == nullptr && image_header->IsPic()) {
-      map.reset(LoadImageFile(image_filename,
-                              image_location,
-                              *image_header,
-                              /* address */ nullptr,
-                              file->Fd(),
-                              logger,
-                              error_msg));
+    if (!map.IsValid() && image_header->IsPic()) {
+      map = LoadImageFile(image_filename,
+                          image_location,
+                          *image_header,
+                          /* address */ nullptr,
+                          file->Fd(),
+                          logger,
+                          error_msg);
     }
     // Were we able to load something and continue?
-    if (map == nullptr) {
+    if (!map.IsValid()) {
       DCHECK(!error_msg->empty());
       return nullptr;
     }
-    DCHECK_EQ(0, memcmp(image_header, map->Begin(), sizeof(ImageHeader)));
+    DCHECK_EQ(0, memcmp(image_header, map.Begin(), sizeof(ImageHeader)));
 
-    std::unique_ptr<MemMap> image_bitmap_map(MemMap::MapFileAtAddress(nullptr,
-                                                                      bitmap_section.Size(),
-                                                                      PROT_READ, MAP_PRIVATE,
-                                                                      file->Fd(),
-                                                                      image_bitmap_offset,
-                                                                      /*low_4gb*/false,
-                                                                      /*reuse*/false,
-                                                                      image_filename,
-                                                                      error_msg));
-    if (image_bitmap_map == nullptr) {
+    MemMap image_bitmap_map = MemMap::MapFileAtAddress(nullptr,
+                                                       bitmap_section.Size(),
+                                                       PROT_READ, MAP_PRIVATE,
+                                                       file->Fd(),
+                                                       image_bitmap_offset,
+                                                       /*low_4gb*/false,
+                                                       /*reuse*/false,
+                                                       image_filename,
+                                                       error_msg);
+    if (!image_bitmap_map.IsValid()) {
       *error_msg = StringPrintf("Failed to map image bitmap: %s", error_msg->c_str());
       return nullptr;
     }
     // Loaded the map, use the image header from the file now in case we patch it with
     // RelocateInPlace.
-    image_header = reinterpret_cast<ImageHeader*>(map->Begin());
+    image_header = reinterpret_cast<ImageHeader*>(map.Begin());
     const uint32_t bitmap_index = ImageSpace::bitmap_index_.fetch_add(1, std::memory_order_seq_cst);
     std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u",
                                          image_filename,
@@ -690,15 +690,15 @@
     // Bitmap only needs to cover until the end of the mirror objects section.
     const ImageSection& image_objects = image_header->GetObjectsSection();
     // We only want the mirror object, not the ArtFields and ArtMethods.
-    uint8_t* const image_end = map->Begin() + image_objects.End();
+    uint8_t* const image_end = map.Begin() + image_objects.End();
     std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap;
     {
       TimingLogger::ScopedTiming timing("CreateImageBitmap", &logger);
       bitmap.reset(
           accounting::ContinuousSpaceBitmap::CreateFromMemMap(
               bitmap_name,
-              image_bitmap_map.release(),
-              reinterpret_cast<uint8_t*>(map->Begin()),
+              std::move(image_bitmap_map),
+              reinterpret_cast<uint8_t*>(map.Begin()),
               // Make sure the bitmap is aligned to card size instead of just bitmap word size.
               RoundUp(image_objects.End(), gc::accounting::CardTable::kCardSize)));
       if (bitmap == nullptr) {
@@ -709,7 +709,7 @@
     {
       TimingLogger::ScopedTiming timing("RelocateImage", &logger);
       if (!RelocateInPlace(*image_header,
-                           map->Begin(),
+                           map.Begin(),
                            bitmap.get(),
                            oat_file,
                            error_msg)) {
@@ -719,7 +719,7 @@
     // We only want the mirror object, not the ArtFields and ArtMethods.
     std::unique_ptr<ImageSpace> space(new ImageSpace(image_filename,
                                                      image_location,
-                                                     map.release(),
+                                                     std::move(map),
                                                      bitmap.release(),
                                                      image_end));
 
@@ -807,13 +807,13 @@
   }
 
  private:
-  static MemMap* LoadImageFile(const char* image_filename,
-                               const char* image_location,
-                               const ImageHeader& image_header,
-                               uint8_t* address,
-                               int fd,
-                               TimingLogger& logger,
-                               std::string* error_msg) {
+  static MemMap LoadImageFile(const char* image_filename,
+                              const char* image_location,
+                              const ImageHeader& image_header,
+                              uint8_t* address,
+                              int fd,
+                              TimingLogger& logger,
+                              std::string* error_msg) {
     TimingLogger::ScopedTiming timing("MapImageFile", &logger);
     const ImageHeader::StorageMode storage_mode = image_header.GetStorageMode();
     if (storage_mode == ImageHeader::kStorageModeUncompressed) {
@@ -835,45 +835,45 @@
         *error_msg = StringPrintf("Invalid storage mode in image header %d",
                                   static_cast<int>(storage_mode));
       }
-      return nullptr;
+      return MemMap::Invalid();
     }
 
     // Reserve output and decompress into it.
-    std::unique_ptr<MemMap> map(MemMap::MapAnonymous(image_location,
-                                                     address,
-                                                     image_header.GetImageSize(),
-                                                     PROT_READ | PROT_WRITE,
-                                                     /*low_4gb*/true,
-                                                     /*reuse*/false,
-                                                     error_msg));
-    if (map != nullptr) {
+    MemMap map = MemMap::MapAnonymous(image_location,
+                                      address,
+                                      image_header.GetImageSize(),
+                                      PROT_READ | PROT_WRITE,
+                                      /*low_4gb*/ true,
+                                      /*reuse*/ false,
+                                      error_msg);
+    if (map.IsValid()) {
       const size_t stored_size = image_header.GetDataSize();
       const size_t decompress_offset = sizeof(ImageHeader);  // Skip the header.
-      std::unique_ptr<MemMap> temp_map(MemMap::MapFile(sizeof(ImageHeader) + stored_size,
-                                                       PROT_READ,
-                                                       MAP_PRIVATE,
-                                                       fd,
-                                                       /*offset*/0,
-                                                       /*low_4gb*/false,
-                                                       image_filename,
-                                                       error_msg));
-      if (temp_map == nullptr) {
+      MemMap temp_map = MemMap::MapFile(sizeof(ImageHeader) + stored_size,
+                                        PROT_READ,
+                                        MAP_PRIVATE,
+                                        fd,
+                                        /*offset*/0,
+                                        /*low_4gb*/false,
+                                        image_filename,
+                                        error_msg);
+      if (!temp_map.IsValid()) {
         DCHECK(error_msg == nullptr || !error_msg->empty());
-        return nullptr;
+        return MemMap::Invalid();
       }
-      memcpy(map->Begin(), &image_header, sizeof(ImageHeader));
+      memcpy(map.Begin(), &image_header, sizeof(ImageHeader));
       const uint64_t start = NanoTime();
       // LZ4HC and LZ4 have same internal format, both use LZ4_decompress.
       TimingLogger::ScopedTiming timing2("LZ4 decompress image", &logger);
       const size_t decompressed_size = LZ4_decompress_safe(
-          reinterpret_cast<char*>(temp_map->Begin()) + sizeof(ImageHeader),
-          reinterpret_cast<char*>(map->Begin()) + decompress_offset,
+          reinterpret_cast<char*>(temp_map.Begin()) + sizeof(ImageHeader),
+          reinterpret_cast<char*>(map.Begin()) + decompress_offset,
           stored_size,
-          map->Size() - decompress_offset);
+          map.Size() - decompress_offset);
       const uint64_t time = NanoTime() - start;
       // Add one 1 ns to prevent possible divide by 0.
       VLOG(image) << "Decompressing image took " << PrettyDuration(time) << " ("
-                  << PrettySize(static_cast<uint64_t>(map->Size()) * MsToNs(1000) / (time + 1))
+                  << PrettySize(static_cast<uint64_t>(map.Size()) * MsToNs(1000) / (time + 1))
                   << "/s)";
       if (decompressed_size + sizeof(ImageHeader) != image_header.GetImageSize()) {
         if (error_msg != nullptr) {
@@ -882,11 +882,11 @@
               decompressed_size + sizeof(ImageHeader),
               image_header.GetImageSize());
         }
-        return nullptr;
+        return MemMap::Invalid();
       }
     }
 
-    return map.release();
+    return map;
   }
 
   class FixupVisitor : public ValueObject {
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 3383d6b3..89038e5 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -182,7 +182,7 @@
 
   ImageSpace(const std::string& name,
              const char* image_location,
-             MemMap* mem_map,
+             MemMap&& mem_map,
              accounting::ContinuousSpaceBitmap* live_bitmap,
              uint8_t* end);
 
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index a24ca32..ada59b3 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -48,10 +48,6 @@
     // Historical note: We were deleting large objects to keep Valgrind happy if there were
     // any large objects such as Dex cache arrays which aren't freed since they are held live
     // by the class linker.
-    MutexLock mu(Thread::Current(), lock_);
-    for (auto& m : large_objects_) {
-      delete m.second.mem_map;
-    }
   }
 
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -139,16 +135,21 @@
                                            size_t* bytes_allocated, size_t* usable_size,
                                            size_t* bytes_tl_bulk_allocated) {
   std::string error_msg;
-  MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", nullptr, num_bytes,
-                                         PROT_READ | PROT_WRITE, true, false, &error_msg);
-  if (UNLIKELY(mem_map == nullptr)) {
+  MemMap mem_map = MemMap::MapAnonymous("large object space allocation",
+                                        /* addr */ nullptr,
+                                        num_bytes,
+                                        PROT_READ | PROT_WRITE,
+                                        /* low_4gb */ true,
+                                        /* reuse */ false,
+                                        &error_msg);
+  if (UNLIKELY(!mem_map.IsValid())) {
     LOG(WARNING) << "Large object allocation failed: " << error_msg;
     return nullptr;
   }
-  mirror::Object* const obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
+  mirror::Object* const obj = reinterpret_cast<mirror::Object*>(mem_map.Begin());
+  const size_t allocation_size = mem_map.BaseSize();
   MutexLock mu(self, lock_);
-  large_objects_.Put(obj, LargeObject {mem_map, false /* not zygote */});
-  const size_t allocation_size = mem_map->BaseSize();
+  large_objects_.Put(obj, LargeObject {std::move(mem_map), false /* not zygote */});
   DCHECK(bytes_allocated != nullptr);
 
   if (begin_ == nullptr || begin_ > reinterpret_cast<uint8_t*>(obj)) {
@@ -191,13 +192,11 @@
     Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(FATAL_WITHOUT_ABORT));
     LOG(FATAL) << "Attempted to free large object " << ptr << " which was not live";
   }
-  MemMap* mem_map = it->second.mem_map;
-  const size_t map_size = mem_map->BaseSize();
+  const size_t map_size = it->second.mem_map.BaseSize();
   DCHECK_GE(num_bytes_allocated_, map_size);
   size_t allocation_size = map_size;
   num_bytes_allocated_ -= allocation_size;
   --num_objects_allocated_;
-  delete mem_map;
   large_objects_.erase(it);
   return allocation_size;
 }
@@ -206,7 +205,7 @@
   MutexLock mu(Thread::Current(), lock_);
   auto it = large_objects_.find(obj);
   CHECK(it != large_objects_.end()) << "Attempted to get size of a large object which is not live";
-  size_t alloc_size = it->second.mem_map->BaseSize();
+  size_t alloc_size = it->second.mem_map.BaseSize();
   if (usable_size != nullptr) {
     *usable_size = alloc_size;
   }
@@ -227,7 +226,7 @@
 void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
   MutexLock mu(Thread::Current(), lock_);
   for (auto& pair : large_objects_) {
-    MemMap* mem_map = pair.second.mem_map;
+    MemMap* mem_map = &pair.second.mem_map;
     callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
     callback(nullptr, nullptr, 0, arg);
   }
@@ -326,7 +325,7 @@
 
 size_t FreeListSpace::GetSlotIndexForAllocationInfo(const AllocationInfo* info) const {
   DCHECK_GE(info, allocation_info_);
-  DCHECK_LT(info, reinterpret_cast<AllocationInfo*>(allocation_info_map_->End()));
+  DCHECK_LT(info, reinterpret_cast<AllocationInfo*>(allocation_info_map_.End()));
   return info - allocation_info_;
 }
 
@@ -350,28 +349,39 @@
 FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested_begin, size_t size) {
   CHECK_EQ(size % kAlignment, 0U);
   std::string error_msg;
-  MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
-                                         PROT_READ | PROT_WRITE, true, false, &error_msg);
-  CHECK(mem_map != nullptr) << "Failed to allocate large object space mem map: " << error_msg;
-  return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
+  MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
+                                        requested_begin,
+                                        size,
+                                        PROT_READ | PROT_WRITE,
+                                        /* low_4gb */ true,
+                                        /* reuse */ false,
+                                        &error_msg);
+  CHECK(mem_map.IsValid()) << "Failed to allocate large object space mem map: " << error_msg;
+  return new FreeListSpace(name, std::move(mem_map), mem_map.Begin(), mem_map.End());
 }
 
-FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end)
+FreeListSpace::FreeListSpace(const std::string& name,
+                             MemMap&& mem_map,
+                             uint8_t* begin,
+                             uint8_t* end)
     : LargeObjectSpace(name, begin, end),
-      mem_map_(mem_map),
+      mem_map_(std::move(mem_map)),
       lock_("free list space lock", kAllocSpaceLock) {
   const size_t space_capacity = end - begin;
   free_end_ = space_capacity;
   CHECK_ALIGNED(space_capacity, kAlignment);
   const size_t alloc_info_size = sizeof(AllocationInfo) * (space_capacity / kAlignment);
   std::string error_msg;
-  allocation_info_map_.reset(
+  allocation_info_map_ =
       MemMap::MapAnonymous("large object free list space allocation info map",
-                           nullptr, alloc_info_size, PROT_READ | PROT_WRITE,
-                           false, false, &error_msg));
-  CHECK(allocation_info_map_.get() != nullptr) << "Failed to allocate allocation info map"
-      << error_msg;
-  allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_->Begin());
+                           /* addr */ nullptr,
+                           alloc_info_size,
+                           PROT_READ | PROT_WRITE,
+                           /* low_4gb */ false,
+                           /* reuse */ false,
+                           &error_msg);
+  CHECK(allocation_info_map_.IsValid()) << "Failed to allocate allocation info map" << error_msg;
+  allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_.Begin());
 }
 
 FreeListSpace::~FreeListSpace() {}
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index f37d814..b69bd91 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -148,7 +148,7 @@
 
  protected:
   struct LargeObject {
-    MemMap* mem_map;
+    MemMap mem_map;
     bool is_zygote;
   };
   explicit LargeObjectMapSpace(const std::string& name);
@@ -182,7 +182,7 @@
   std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const OVERRIDE REQUIRES(!lock_);
 
  protected:
-  FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end);
+  FreeListSpace(const std::string& name, MemMap&& mem_map, uint8_t* begin, uint8_t* end);
   size_t GetSlotIndexForAddress(uintptr_t address) const {
     DCHECK(Contains(reinterpret_cast<mirror::Object*>(address)));
     return (address - reinterpret_cast<uintptr_t>(Begin())) / kAlignment;
@@ -210,9 +210,9 @@
 
   // There is not footer for any allocations at the end of the space, so we keep track of how much
   // free space there is at the end manually.
-  std::unique_ptr<MemMap> mem_map_;
+  MemMap mem_map_;
   // Side table for allocation info, one per page.
-  std::unique_ptr<MemMap> allocation_info_map_;
+  MemMap allocation_info_map_;
   AllocationInfo* allocation_info_;
 
   mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 6936fdc..91e0ce8 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -40,19 +40,26 @@
 
 size_t MallocSpace::bitmap_index_ = 0;
 
-MallocSpace::MallocSpace(const std::string& name, MemMap* mem_map,
-                         uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
-                         bool create_bitmaps, bool can_move_objects, size_t starting_size,
+MallocSpace::MallocSpace(const std::string& name,
+                         MemMap&& mem_map,
+                         uint8_t* begin,
+                         uint8_t* end,
+                         uint8_t* limit,
+                         size_t growth_limit,
+                         bool create_bitmaps,
+                         bool can_move_objects,
+                         size_t starting_size,
                          size_t initial_size)
-    : ContinuousMemMapAllocSpace(name, mem_map, begin, end, limit, kGcRetentionPolicyAlwaysCollect),
+    : ContinuousMemMapAllocSpace(
+        name, std::move(mem_map), begin, end, limit, kGcRetentionPolicyAlwaysCollect),
       recent_free_pos_(0), lock_("allocation space lock", kAllocSpaceLock),
       growth_limit_(growth_limit), can_move_objects_(can_move_objects),
       starting_size_(starting_size), initial_size_(initial_size) {
   if (create_bitmaps) {
     size_t bitmap_index = bitmap_index_++;
     static const uintptr_t kGcCardSize = static_cast<uintptr_t>(accounting::CardTable::kCardSize);
-    CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map->Begin()), kGcCardSize);
-    CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map->End()), kGcCardSize);
+    CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map_.Begin()), kGcCardSize);
+    CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map_.End()), kGcCardSize);
     live_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
         StringPrintf("allocspace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
         Begin(), NonGrowthLimitCapacity()));
@@ -70,8 +77,12 @@
   }
 }
 
-MemMap* MallocSpace::CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size,
-                                  size_t* growth_limit, size_t* capacity, uint8_t* requested_begin) {
+MemMap MallocSpace::CreateMemMap(const std::string& name,
+                                 size_t starting_size,
+                                 size_t* initial_size,
+                                 size_t* growth_limit,
+                                 size_t* capacity,
+                                 uint8_t* requested_begin) {
   // Sanity check arguments
   if (starting_size > *initial_size) {
     *initial_size = starting_size;
@@ -80,13 +91,13 @@
     LOG(ERROR) << "Failed to create alloc space (" << name << ") where the initial size ("
         << PrettySize(*initial_size) << ") is larger than its capacity ("
         << PrettySize(*growth_limit) << ")";
-    return nullptr;
+    return MemMap::Invalid();
   }
   if (*growth_limit > *capacity) {
     LOG(ERROR) << "Failed to create alloc space (" << name << ") where the growth limit capacity ("
         << PrettySize(*growth_limit) << ") is larger than the capacity ("
         << PrettySize(*capacity) << ")";
-    return nullptr;
+    return MemMap::Invalid();
   }
 
   // Page align growth limit and capacity which will be used to manage mmapped storage
@@ -94,9 +105,14 @@
   *capacity = RoundUp(*capacity, kPageSize);
 
   std::string error_msg;
-  MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, *capacity,
-                                         PROT_READ | PROT_WRITE, true, false, &error_msg);
-  if (mem_map == nullptr) {
+  MemMap mem_map = MemMap::MapAnonymous(name.c_str(),
+                                        requested_begin,
+                                        *capacity,
+                                        PROT_READ | PROT_WRITE,
+                                        /* low_4gb */ true,
+                                        /* reuse */ false,
+                                        &error_msg);
+  if (!mem_map.IsValid()) {
     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
                << PrettySize(*capacity) << ": " << error_msg;
   }
@@ -194,18 +210,24 @@
   VLOG(heap) << "Capacity " << PrettySize(capacity);
   // Remap the tail.
   std::string error_msg;
-  std::unique_ptr<MemMap> mem_map(GetMemMap()->RemapAtEnd(End(), alloc_space_name,
-                                                          PROT_READ | PROT_WRITE, &error_msg));
-  CHECK(mem_map.get() != nullptr) << error_msg;
-  void* allocator = CreateAllocator(End(), starting_size_, initial_size_, capacity,
-                                    low_memory_mode);
+  MemMap mem_map = GetMemMap()->RemapAtEnd(
+      End(), alloc_space_name, PROT_READ | PROT_WRITE, &error_msg);
+  CHECK(mem_map.IsValid()) << error_msg;
+  void* allocator =
+      CreateAllocator(End(), starting_size_, initial_size_, capacity, low_memory_mode);
   // Protect memory beyond the initial size.
-  uint8_t* end = mem_map->Begin() + starting_size_;
+  uint8_t* end = mem_map.Begin() + starting_size_;
   if (capacity > initial_size_) {
     CheckedCall(mprotect, alloc_space_name, end, capacity - initial_size_, PROT_NONE);
   }
-  *out_malloc_space = CreateInstance(mem_map.release(), alloc_space_name, allocator, End(), end,
-                                     limit_, growth_limit, CanMoveObjects());
+  *out_malloc_space = CreateInstance(std::move(mem_map),
+                                     alloc_space_name,
+                                     allocator,
+                                     End(),
+                                     end,
+                                     limit_,
+                                     growth_limit,
+                                     CanMoveObjects());
   SetLimit(End());
   live_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
   CHECK_EQ(live_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End()));
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index c1f4841..e4a6f15 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -113,9 +113,14 @@
 
   void SetGrowthLimit(size_t growth_limit);
 
-  virtual MallocSpace* CreateInstance(MemMap* mem_map, const std::string& name, void* allocator,
-                                      uint8_t* begin, uint8_t* end, uint8_t* limit,
-                                      size_t growth_limit, bool can_move_objects) = 0;
+  virtual MallocSpace* CreateInstance(MemMap&& mem_map,
+                                      const std::string& name,
+                                      void* allocator,
+                                      uint8_t* begin,
+                                      uint8_t* end,
+                                      uint8_t* limit,
+                                      size_t growth_limit,
+                                      bool can_move_objects) = 0;
 
   // Splits ourself into a zygote space and new malloc space which has our unused memory. When true,
   // the low memory mode argument specifies that the heap wishes the created space to be more
@@ -137,12 +142,23 @@
   }
 
  protected:
-  MallocSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end,
-              uint8_t* limit, size_t growth_limit, bool create_bitmaps, bool can_move_objects,
-              size_t starting_size, size_t initial_size);
+  MallocSpace(const std::string& name,
+              MemMap&& mem_map,
+              uint8_t* begin,
+              uint8_t* end,
+              uint8_t* limit,
+              size_t growth_limit,
+              bool create_bitmaps,
+              bool can_move_objects,
+              size_t starting_size,
+              size_t initial_size);
 
-  static MemMap* CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size,
-                              size_t* growth_limit, size_t* capacity, uint8_t* requested_begin);
+  static MemMap CreateMemMap(const std::string& name,
+                             size_t starting_size,
+                             size_t* initial_size,
+                             size_t* growth_limit,
+                             size_t* capacity,
+                             uint8_t* requested_begin);
 
   // When true the low memory mode argument specifies that the heap wishes the created allocator to
   // be more aggressive in releasing unused pages.
diff --git a/runtime/gc/space/memory_tool_malloc_space-inl.h b/runtime/gc/space/memory_tool_malloc_space-inl.h
index c022171..f1c1cb8 100644
--- a/runtime/gc/space/memory_tool_malloc_space-inl.h
+++ b/runtime/gc/space/memory_tool_malloc_space-inl.h
@@ -267,8 +267,8 @@
                       kMemoryToolRedZoneBytes,
                       kAdjustForRedzoneInAllocSize,
                       kUseObjSizeForUsable>::MemoryToolMallocSpace(
-                          MemMap* mem_map, size_t initial_size, Params... params)
-                          : S(mem_map, initial_size, params...) {
+                          MemMap&& mem_map, size_t initial_size, Params... params)
+                          : S(std::move(mem_map), initial_size, params...) {
   // Don't want to change the memory tool states of the mem map here as the allocator is already
   // initialized at this point and that may interfere with what the allocator does internally. Note
   // that the tail beyond the initial size is mprotected.
diff --git a/runtime/gc/space/memory_tool_malloc_space.h b/runtime/gc/space/memory_tool_malloc_space.h
index e53f009..32bd204 100644
--- a/runtime/gc/space/memory_tool_malloc_space.h
+++ b/runtime/gc/space/memory_tool_malloc_space.h
@@ -53,7 +53,7 @@
   size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE;
 
   template <typename... Params>
-  MemoryToolMallocSpace(MemMap* mem_map, size_t initial_size, Params... params);
+  MemoryToolMallocSpace(MemMap&& mem_map, size_t initial_size, Params... params);
   virtual ~MemoryToolMallocSpace() {}
 
  private:
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 6d494fa..85e6919 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -45,60 +45,65 @@
 // Whether we check a region's live bytes count against the region bitmap.
 static constexpr bool kCheckLiveBytesAgainstRegionBitmap = kIsDebugBuild;
 
-MemMap* RegionSpace::CreateMemMap(const std::string& name, size_t capacity,
-                                  uint8_t* requested_begin) {
+MemMap RegionSpace::CreateMemMap(const std::string& name,
+                                 size_t capacity,
+                                 uint8_t* requested_begin) {
   CHECK_ALIGNED(capacity, kRegionSize);
   std::string error_msg;
   // Ask for the capacity of an additional kRegionSize so that we can align the map by kRegionSize
   // even if we get unaligned base address. This is necessary for the ReadBarrierTable to work.
-  std::unique_ptr<MemMap> mem_map;
+  MemMap mem_map;
   while (true) {
-    mem_map.reset(MemMap::MapAnonymous(name.c_str(),
-                                       requested_begin,
-                                       capacity + kRegionSize,
-                                       PROT_READ | PROT_WRITE,
-                                       true,
-                                       false,
-                                       &error_msg));
-    if (mem_map.get() != nullptr || requested_begin == nullptr) {
+    mem_map = MemMap::MapAnonymous(name.c_str(),
+                                   requested_begin,
+                                   capacity + kRegionSize,
+                                   PROT_READ | PROT_WRITE,
+                                   /* low_4gb */ true,
+                                   /* reuse */ false,
+                                   &error_msg);
+    if (mem_map.IsValid() || requested_begin == nullptr) {
       break;
     }
     // Retry with no specified request begin.
     requested_begin = nullptr;
   }
-  if (mem_map.get() == nullptr) {
+  if (!mem_map.IsValid()) {
     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
         << PrettySize(capacity) << " with message " << error_msg;
     MemMap::DumpMaps(LOG_STREAM(ERROR));
-    return nullptr;
+    return MemMap::Invalid();
   }
-  CHECK_EQ(mem_map->Size(), capacity + kRegionSize);
-  CHECK_EQ(mem_map->Begin(), mem_map->BaseBegin());
-  CHECK_EQ(mem_map->Size(), mem_map->BaseSize());
-  if (IsAlignedParam(mem_map->Begin(), kRegionSize)) {
+  CHECK_EQ(mem_map.Size(), capacity + kRegionSize);
+  CHECK_EQ(mem_map.Begin(), mem_map.BaseBegin());
+  CHECK_EQ(mem_map.Size(), mem_map.BaseSize());
+  if (IsAlignedParam(mem_map.Begin(), kRegionSize)) {
     // Got an aligned map. Since we requested a map that's kRegionSize larger. Shrink by
     // kRegionSize at the end.
-    mem_map->SetSize(capacity);
+    mem_map.SetSize(capacity);
   } else {
     // Got an unaligned map. Align the both ends.
-    mem_map->AlignBy(kRegionSize);
+    mem_map.AlignBy(kRegionSize);
   }
-  CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
-  CHECK_ALIGNED(mem_map->End(), kRegionSize);
-  CHECK_EQ(mem_map->Size(), capacity);
-  return mem_map.release();
+  CHECK_ALIGNED(mem_map.Begin(), kRegionSize);
+  CHECK_ALIGNED(mem_map.End(), kRegionSize);
+  CHECK_EQ(mem_map.Size(), capacity);
+  return mem_map;
 }
 
-RegionSpace* RegionSpace::Create(const std::string& name, MemMap* mem_map) {
-  return new RegionSpace(name, mem_map);
+RegionSpace* RegionSpace::Create(const std::string& name, MemMap&& mem_map) {
+  return new RegionSpace(name, std::move(mem_map));
 }
 
-RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
-    : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
+RegionSpace::RegionSpace(const std::string& name, MemMap&& mem_map)
+    : ContinuousMemMapAllocSpace(name,
+                                 std::move(mem_map),
+                                 mem_map.Begin(),
+                                 mem_map.End(),
+                                 mem_map.End(),
                                  kGcRetentionPolicyAlwaysCollect),
       region_lock_("Region lock", kRegionSpaceRegionLock),
       time_(1U),
-      num_regions_(mem_map->Size() / kRegionSize),
+      num_regions_(mem_map_.Size() / kRegionSize),
       num_non_free_regions_(0U),
       num_evac_regions_(0U),
       max_peak_num_non_free_regions_(0U),
@@ -106,11 +111,11 @@
       current_region_(&full_region_),
       evac_region_(nullptr),
       cyclic_alloc_region_index_(0U) {
-  CHECK_ALIGNED(mem_map->Size(), kRegionSize);
-  CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
+  CHECK_ALIGNED(mem_map_.Size(), kRegionSize);
+  CHECK_ALIGNED(mem_map_.Begin(), kRegionSize);
   DCHECK_GT(num_regions_, 0U);
   regions_.reset(new Region[num_regions_]);
-  uint8_t* region_addr = mem_map->Begin();
+  uint8_t* region_addr = mem_map_.Begin();
   for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
     regions_[i].Init(i, region_addr, region_addr + kRegionSize);
   }
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index ef2e137..beedfd2 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -50,8 +50,8 @@
   // Create a region space mem map with the requested sizes. The requested base address is not
   // guaranteed to be granted, if it is required, the caller should call Begin on the returned
   // space to confirm the request was granted.
-  static MemMap* CreateMemMap(const std::string& name, size_t capacity, uint8_t* requested_begin);
-  static RegionSpace* Create(const std::string& name, MemMap* mem_map);
+  static MemMap CreateMemMap(const std::string& name, size_t capacity, uint8_t* requested_begin);
+  static RegionSpace* Create(const std::string& name, MemMap&& mem_map);
 
   // Allocate `num_bytes`, returns null if the space is full.
   mirror::Object* Alloc(Thread* self,
@@ -301,7 +301,7 @@
   }
 
  private:
-  RegionSpace(const std::string& name, MemMap* mem_map);
+  RegionSpace(const std::string& name, MemMap&& mem_map);
 
   template<bool kToSpaceOnly, typename Visitor>
   ALWAYS_INLINE void WalkInternal(Visitor&& visitor) NO_THREAD_SAFETY_ANALYSIS;
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index b0402e4..10ff1c1 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -44,48 +44,88 @@
 // TODO: Fix
 // template class MemoryToolMallocSpace<RosAllocSpace, allocator::RosAlloc*>;
 
-RosAllocSpace::RosAllocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
-                             art::gc::allocator::RosAlloc* rosalloc, uint8_t* begin, uint8_t* end,
-                             uint8_t* limit, size_t growth_limit, bool can_move_objects,
-                             size_t starting_size, bool low_memory_mode)
-    : MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
+RosAllocSpace::RosAllocSpace(MemMap&& mem_map,
+                             size_t initial_size,
+                             const std::string& name,
+                             art::gc::allocator::RosAlloc* rosalloc,
+                             uint8_t* begin,
+                             uint8_t* end,
+                             uint8_t* limit,
+                             size_t growth_limit,
+                             bool can_move_objects,
+                             size_t starting_size,
+                             bool low_memory_mode)
+    : MallocSpace(name,
+                  std::move(mem_map),
+                  begin,
+                  end,
+                  limit,
+                  growth_limit,
+                  true,
+                  can_move_objects,
                   starting_size, initial_size),
       rosalloc_(rosalloc), low_memory_mode_(low_memory_mode) {
   CHECK(rosalloc != nullptr);
 }
 
-RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
-                                               size_t starting_size, size_t initial_size,
-                                               size_t growth_limit, size_t capacity,
-                                               bool low_memory_mode, bool can_move_objects) {
-  DCHECK(mem_map != nullptr);
+RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap&& mem_map,
+                                               const std::string& name,
+                                               size_t starting_size,
+                                               size_t initial_size,
+                                               size_t growth_limit,
+                                               size_t capacity,
+                                               bool low_memory_mode,
+                                               bool can_move_objects) {
+  DCHECK(mem_map.IsValid());
 
   bool running_on_memory_tool = Runtime::Current()->IsRunningOnMemoryTool();
 
-  allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size,
-                                                 capacity, low_memory_mode, running_on_memory_tool);
+  allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map.Begin(),
+                                                 starting_size,
+                                                 initial_size,
+                                                 capacity,
+                                                 low_memory_mode,
+                                                 running_on_memory_tool);
   if (rosalloc == nullptr) {
     LOG(ERROR) << "Failed to initialize rosalloc for alloc space (" << name << ")";
     return nullptr;
   }
 
   // Protect memory beyond the starting size. MoreCore will add r/w permissions when necessory
-  uint8_t* end = mem_map->Begin() + starting_size;
+  uint8_t* end = mem_map.Begin() + starting_size;
   if (capacity - starting_size > 0) {
     CheckedCall(mprotect, name.c_str(), end, capacity - starting_size, PROT_NONE);
   }
 
   // Everything is set so record in immutable structure and leave
-  uint8_t* begin = mem_map->Begin();
+  uint8_t* begin = mem_map.Begin();
   // TODO: Fix RosAllocSpace to support ASan. There is currently some issues with
   // AllocationSize caused by redzones. b/12944686
   if (running_on_memory_tool) {
     return new MemoryToolMallocSpace<RosAllocSpace, kDefaultMemoryToolRedZoneBytes, false, true>(
-        mem_map, initial_size, name, rosalloc, begin, end, begin + capacity, growth_limit,
-        can_move_objects, starting_size, low_memory_mode);
+        std::move(mem_map),
+        initial_size,
+        name,
+        rosalloc,
+        begin,
+        end,
+        begin + capacity,
+        growth_limit,
+        can_move_objects,
+        starting_size,
+        low_memory_mode);
   } else {
-    return new RosAllocSpace(mem_map, initial_size, name, rosalloc, begin, end, begin + capacity,
-                             growth_limit, can_move_objects, starting_size, low_memory_mode);
+    return new RosAllocSpace(std::move(mem_map),
+                             initial_size,
+                             name,
+                             rosalloc,
+                             begin,
+                             end,
+                             begin + capacity,
+                             growth_limit,
+                             can_move_objects,
+                             starting_size,
+                             low_memory_mode);
   }
 }
 
@@ -111,16 +151,21 @@
   // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
   // size of the large allocation) will be greater than the footprint limit.
   size_t starting_size = Heap::kDefaultStartingSize;
-  MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
-                                 requested_begin);
-  if (mem_map == nullptr) {
+  MemMap mem_map =
+      CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity, requested_begin);
+  if (!mem_map.IsValid()) {
     LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
                << PrettySize(capacity);
     return nullptr;
   }
 
-  RosAllocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size,
-                                          growth_limit, capacity, low_memory_mode,
+  RosAllocSpace* space = CreateFromMemMap(std::move(mem_map),
+                                          name,
+                                          starting_size,
+                                          initial_size,
+                                          growth_limit,
+                                          capacity,
+                                          low_memory_mode,
                                           can_move_objects);
   // We start out with only the initial size possibly containing objects.
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
@@ -175,18 +220,39 @@
   return result;
 }
 
-MallocSpace* RosAllocSpace::CreateInstance(MemMap* mem_map, const std::string& name,
-                                           void* allocator, uint8_t* begin, uint8_t* end,
-                                           uint8_t* limit, size_t growth_limit,
+MallocSpace* RosAllocSpace::CreateInstance(MemMap&& mem_map,
+                                           const std::string& name,
+                                           void* allocator,
+                                           uint8_t* begin,
+                                           uint8_t* end,
+                                           uint8_t* limit,
+                                           size_t growth_limit,
                                            bool can_move_objects) {
   if (Runtime::Current()->IsRunningOnMemoryTool()) {
     return new MemoryToolMallocSpace<RosAllocSpace, kDefaultMemoryToolRedZoneBytes, false, true>(
-        mem_map, initial_size_, name, reinterpret_cast<allocator::RosAlloc*>(allocator), begin, end,
-        limit, growth_limit, can_move_objects, starting_size_, low_memory_mode_);
+        std::move(mem_map),
+        initial_size_,
+        name,
+        reinterpret_cast<allocator::RosAlloc*>(allocator),
+        begin,
+        end,
+        limit,
+        growth_limit,
+        can_move_objects,
+        starting_size_,
+        low_memory_mode_);
   } else {
-    return new RosAllocSpace(mem_map, initial_size_, name,
-                             reinterpret_cast<allocator::RosAlloc*>(allocator), begin, end, limit,
-                             growth_limit, can_move_objects, starting_size_, low_memory_mode_);
+    return new RosAllocSpace(std::move(mem_map),
+                             initial_size_,
+                             name,
+                             reinterpret_cast<allocator::RosAlloc*>(allocator),
+                             begin,
+                             end,
+                             limit,
+                             growth_limit,
+                             can_move_objects,
+                             starting_size_,
+                             low_memory_mode_);
   }
 }
 
@@ -364,8 +430,11 @@
   mark_bitmap_->Clear();
   SetEnd(begin_ + starting_size_);
   delete rosalloc_;
-  rosalloc_ = CreateRosAlloc(mem_map_->Begin(), starting_size_, initial_size_,
-                             NonGrowthLimitCapacity(), low_memory_mode_,
+  rosalloc_ = CreateRosAlloc(mem_map_.Begin(),
+                             starting_size_,
+                             initial_size_,
+                             NonGrowthLimitCapacity(),
+                             low_memory_mode_,
                              Runtime::Current()->IsRunningOnMemoryTool());
   SetFootprintLimit(footprint_limit);
 }
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 4c17233..c630826 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -41,10 +41,14 @@
   static RosAllocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
                                size_t capacity, uint8_t* requested_begin, bool low_memory_mode,
                                bool can_move_objects);
-  static RosAllocSpace* CreateFromMemMap(MemMap* mem_map, const std::string& name,
-                                         size_t starting_size, size_t initial_size,
-                                         size_t growth_limit, size_t capacity,
-                                         bool low_memory_mode, bool can_move_objects);
+  static RosAllocSpace* CreateFromMemMap(MemMap&& mem_map,
+                                         const std::string& name,
+                                         size_t starting_size,
+                                         size_t initial_size,
+                                         size_t growth_limit,
+                                         size_t capacity,
+                                         bool low_memory_mode,
+                                         bool can_move_objects);
 
   mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                   size_t* usable_size, size_t* bytes_tl_bulk_allocated)
@@ -111,8 +115,13 @@
 
   void Clear() OVERRIDE;
 
-  MallocSpace* CreateInstance(MemMap* mem_map, const std::string& name, void* allocator,
-                              uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit,
+  MallocSpace* CreateInstance(MemMap&& mem_map,
+                              const std::string& name,
+                              void* allocator,
+                              uint8_t* begin,
+                              uint8_t* end,
+                              uint8_t* limit,
+                              size_t growth_limit,
                               bool can_move_objects) OVERRIDE;
 
   uint64_t GetBytesAllocated() OVERRIDE;
@@ -147,9 +156,16 @@
   void DumpStats(std::ostream& os);
 
  protected:
-  RosAllocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
-                allocator::RosAlloc* rosalloc, uint8_t* begin, uint8_t* end, uint8_t* limit,
-                size_t growth_limit, bool can_move_objects, size_t starting_size,
+  RosAllocSpace(MemMap&& mem_map,
+                size_t initial_size,
+                const std::string& name,
+                allocator::RosAlloc* rosalloc,
+                uint8_t* begin,
+                uint8_t* end,
+                uint8_t* limit,
+                size_t growth_limit,
+                bool can_move_objects,
+                size_t starting_size,
                 bool low_memory_mode);
 
  private:
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 4f43d9f..4e173a8 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -377,30 +377,30 @@
   }
 
   MemMap* GetMemMap() {
-    return mem_map_.get();
+    return &mem_map_;
   }
 
   const MemMap* GetMemMap() const {
-    return mem_map_.get();
+    return &mem_map_;
   }
 
-  MemMap* ReleaseMemMap() {
-    return mem_map_.release();
+  MemMap ReleaseMemMap() {
+    return std::move(mem_map_);
   }
 
  protected:
   MemMapSpace(const std::string& name,
-              MemMap* mem_map,
+              MemMap&& mem_map,
               uint8_t* begin,
               uint8_t* end,
               uint8_t* limit,
               GcRetentionPolicy gc_retention_policy)
       : ContinuousSpace(name, gc_retention_policy, begin, end, limit),
-        mem_map_(mem_map) {
+        mem_map_(std::move(mem_map)) {
   }
 
   // Underlying storage of the space
-  std::unique_ptr<MemMap> mem_map_;
+  MemMap mem_map_;
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(MemMapSpace);
@@ -451,9 +451,13 @@
   std::unique_ptr<accounting::ContinuousSpaceBitmap> mark_bitmap_;
   std::unique_ptr<accounting::ContinuousSpaceBitmap> temp_bitmap_;
 
-  ContinuousMemMapAllocSpace(const std::string& name, MemMap* mem_map, uint8_t* begin,
-                             uint8_t* end, uint8_t* limit, GcRetentionPolicy gc_retention_policy)
-      : MemMapSpace(name, mem_map, begin, end, limit, gc_retention_policy) {
+  ContinuousMemMapAllocSpace(const std::string& name,
+                             MemMap&& mem_map,
+                             uint8_t* begin,
+                             uint8_t* end,
+                             uint8_t* limit,
+                             GcRetentionPolicy gc_retention_policy)
+      : MemMapSpace(name, std::move(mem_map), begin, end, limit, gc_retention_policy) {
   }
 
  private:
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index 8c73ef9..ed85b06 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -41,7 +41,8 @@
   size_t* const objects_allocated_;
 };
 
-ZygoteSpace* ZygoteSpace::Create(const std::string& name, MemMap* mem_map,
+ZygoteSpace* ZygoteSpace::Create(const std::string& name,
+                                 MemMap&& mem_map,
                                  accounting::ContinuousSpaceBitmap* live_bitmap,
                                  accounting::ContinuousSpaceBitmap* mark_bitmap) {
   DCHECK(live_bitmap != nullptr);
@@ -49,9 +50,9 @@
   size_t objects_allocated = 0;
   CountObjectsAllocated visitor(&objects_allocated);
   ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
-  live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(mem_map->Begin()),
-                                reinterpret_cast<uintptr_t>(mem_map->End()), visitor);
-  ZygoteSpace* zygote_space = new ZygoteSpace(name, mem_map, objects_allocated);
+  live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(mem_map.Begin()),
+                                reinterpret_cast<uintptr_t>(mem_map.End()), visitor);
+  ZygoteSpace* zygote_space = new ZygoteSpace(name, std::move(mem_map), objects_allocated);
   CHECK(zygote_space->live_bitmap_.get() == nullptr);
   CHECK(zygote_space->mark_bitmap_.get() == nullptr);
   zygote_space->live_bitmap_.reset(live_bitmap);
@@ -64,8 +65,12 @@
   UNREACHABLE();
 }
 
-ZygoteSpace::ZygoteSpace(const std::string& name, MemMap* mem_map, size_t objects_allocated)
-    : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
+ZygoteSpace::ZygoteSpace(const std::string& name, MemMap&& mem_map, size_t objects_allocated)
+    : ContinuousMemMapAllocSpace(name,
+                                 std::move(mem_map),
+                                 mem_map.Begin(),
+                                 mem_map.End(),
+                                 mem_map.End(),
                                  kGcRetentionPolicyFullCollect),
       objects_allocated_(objects_allocated) {
 }
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 6fe21d9..200c79f 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -30,7 +30,8 @@
 class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
  public:
   // Returns the remaining storage in the out_map field.
-  static ZygoteSpace* Create(const std::string& name, MemMap* mem_map,
+  static ZygoteSpace* Create(const std::string& name,
+                             MemMap&& mem_map,
                              accounting::ContinuousSpaceBitmap* live_bitmap,
                              accounting::ContinuousSpaceBitmap* mark_bitmap)
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -85,7 +86,7 @@
   }
 
  private:
-  ZygoteSpace(const std::string& name, MemMap* mem_map, size_t objects_allocated);
+  ZygoteSpace(const std::string& name, MemMap&& mem_map, size_t objects_allocated);
   static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
 
   AtomicInteger objects_allocated_;
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 950a54d..098db9f 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -78,14 +78,19 @@
   CHECK_LE(max_count, kMaxTableSizeInBytes / sizeof(IrtEntry));
 
   const size_t table_bytes = max_count * sizeof(IrtEntry);
-  table_mem_map_.reset(MemMap::MapAnonymous("indirect ref table", nullptr, table_bytes,
-                                            PROT_READ | PROT_WRITE, false, false, error_msg));
-  if (table_mem_map_.get() == nullptr && error_msg->empty()) {
+  table_mem_map_ = MemMap::MapAnonymous("indirect ref table",
+                                        /* addr */ nullptr,
+                                        table_bytes,
+                                        PROT_READ | PROT_WRITE,
+                                        /* low_4gb */ false,
+                                        /* reuse */ false,
+                                        error_msg);
+  if (!table_mem_map_.IsValid() && error_msg->empty()) {
     *error_msg = "Unable to map memory for indirect ref table";
   }
 
-  if (table_mem_map_.get() != nullptr) {
-    table_ = reinterpret_cast<IrtEntry*>(table_mem_map_->Begin());
+  if (table_mem_map_.IsValid()) {
+    table_ = reinterpret_cast<IrtEntry*>(table_mem_map_.Begin());
   } else {
     table_ = nullptr;
   }
@@ -125,7 +130,7 @@
 }
 
 bool IndirectReferenceTable::IsValid() const {
-  return table_mem_map_.get() != nullptr;
+  return table_mem_map_.IsValid();
 }
 
 // Holes:
@@ -217,20 +222,20 @@
   // Note: the above check also ensures that there is no overflow below.
 
   const size_t table_bytes = new_size * sizeof(IrtEntry);
-  std::unique_ptr<MemMap> new_map(MemMap::MapAnonymous("indirect ref table",
-                                                       nullptr,
-                                                       table_bytes,
-                                                       PROT_READ | PROT_WRITE,
-                                                       false,
-                                                       false,
-                                                       error_msg));
-  if (new_map == nullptr) {
+  MemMap new_map = MemMap::MapAnonymous("indirect ref table",
+                                        /* addr */ nullptr,
+                                        table_bytes,
+                                        PROT_READ | PROT_WRITE,
+                                        /* is_low_4gb */ false,
+                                        /* reuse */ false,
+                                        error_msg);
+  if (!new_map.IsValid()) {
     return false;
   }
 
-  memcpy(new_map->Begin(), table_mem_map_->Begin(), table_mem_map_->Size());
+  memcpy(new_map.Begin(), table_mem_map_.Begin(), table_mem_map_.Size());
   table_mem_map_ = std::move(new_map);
-  table_ = reinterpret_cast<IrtEntry*>(table_mem_map_->Begin());
+  table_ = reinterpret_cast<IrtEntry*>(table_mem_map_.Begin());
   max_entries_ = new_size;
 
   return true;
@@ -444,7 +449,7 @@
   ScopedTrace trace(__PRETTY_FUNCTION__);
   const size_t top_index = Capacity();
   auto* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize);
-  uint8_t* release_end = table_mem_map_->End();
+  uint8_t* release_end = table_mem_map_.End();
   madvise(release_start, release_end - release_start, MADV_DONTNEED);
 }
 
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index d2093f2..8c63c00 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -27,6 +27,7 @@
 
 #include "base/bit_utils.h"
 #include "base/macros.h"
+#include "base/mem_map.h"
 #include "base/mutex.h"
 #include "gc_root.h"
 #include "obj_ptr.h"
@@ -41,8 +42,6 @@
 class Object;
 }  // namespace mirror
 
-class MemMap;
-
 // Maintain a table of indirect references.  Used for local/global JNI references.
 //
 // The table contains object references, where the strong (local/global) references are part of the
@@ -398,7 +397,7 @@
   IRTSegmentState segment_state_;
 
   // Mem map where we store the indirect refs.
-  std::unique_ptr<MemMap> table_mem_map_;
+  MemMap table_mem_map_;
   // bottom of the stack. Do not directly access the object references
   // in this as they are roots. Use Get() that has a read barrier.
   IrtEntry* table_;
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 74aa787..d4b51af 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -517,24 +517,23 @@
   result->SetZ(class_name == nullptr);
 }
 
-static std::unique_ptr<MemMap> FindAndExtractEntry(const std::string& jar_file,
-                                                   const char* entry_name,
-                                                   size_t* size,
-                                                   std::string* error_msg) {
+static MemMap FindAndExtractEntry(const std::string& jar_file,
+                                  const char* entry_name,
+                                  size_t* size,
+                                  std::string* error_msg) {
   CHECK(size != nullptr);
 
   std::unique_ptr<ZipArchive> zip_archive(ZipArchive::Open(jar_file.c_str(), error_msg));
   if (zip_archive == nullptr) {
-    return nullptr;
+    return MemMap::Invalid();
   }
   std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(entry_name, error_msg));
   if (zip_entry == nullptr) {
-    return nullptr;
+    return MemMap::Invalid();
   }
-  std::unique_ptr<MemMap> tmp_map(
-      zip_entry->ExtractToMemMap(jar_file.c_str(), entry_name, error_msg));
-  if (tmp_map == nullptr) {
-    return nullptr;
+  MemMap tmp_map = zip_entry->ExtractToMemMap(jar_file.c_str(), entry_name, error_msg);
+  if (!tmp_map.IsValid()) {
+    return MemMap::Invalid();
   }
 
   // OK, from here everything seems fine.
@@ -577,18 +576,18 @@
     return;
   }
 
-  std::unique_ptr<MemMap> mem_map;
+  MemMap mem_map;
   size_t map_size;
   std::string last_error_msg;  // Only store the last message (we could concatenate).
 
   for (const std::string& jar_file : split) {
     mem_map = FindAndExtractEntry(jar_file, resource_cstr, &map_size, &last_error_msg);
-    if (mem_map != nullptr) {
+    if (mem_map.IsValid()) {
       break;
     }
   }
 
-  if (mem_map == nullptr) {
+  if (!mem_map.IsValid()) {
     // Didn't find it. There's a good chance this will be the same at runtime, but still
     // conservatively abort the transaction here.
     AbortTransactionOrFail(self,
@@ -607,9 +606,9 @@
     return;
   }
   // Copy in content.
-  memcpy(h_array->GetData(), mem_map->Begin(), map_size);
+  memcpy(h_array->GetData(), mem_map.Begin(), map_size);
   // Be proactive releasing memory.
-  mem_map.reset();
+  mem_map.Reset();
 
   // Create a ByteArrayInputStream.
   Handle<mirror::Class> h_class(hs.NewHandle(
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index a8692a0..d9c7900 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -205,15 +205,16 @@
   // We could do PC-relative addressing to avoid this problem, but that
   // would require reserving code and data area before submitting, which
   // means more windows for the code memory to be RWX.
-  std::unique_ptr<MemMap> data_map(MemMap::MapAnonymous(
-      "data-code-cache", nullptr,
+  MemMap data_map = MemMap::MapAnonymous(
+      "data-code-cache",
+      /* addr */ nullptr,
       max_capacity,
       kProtData,
       /* low_4gb */ true,
       /* reuse */ false,
       &error_str,
-      use_ashmem));
-  if (data_map == nullptr) {
+      use_ashmem);
+  if (!data_map.IsValid()) {
     std::ostringstream oss;
     oss << "Failed to create read write cache: " << error_str << " size=" << max_capacity;
     *error_msg = oss.str();
@@ -229,26 +230,23 @@
   size_t data_size = max_capacity / 2;
   size_t code_size = max_capacity - data_size;
   DCHECK_EQ(code_size + data_size, max_capacity);
-  uint8_t* divider = data_map->Begin() + data_size;
+  uint8_t* divider = data_map.Begin() + data_size;
 
-  MemMap* code_map = data_map->RemapAtEnd(
-      divider,
-      "jit-code-cache",
-      memmap_flags_prot_code | PROT_WRITE,
-      &error_str, use_ashmem);
-  if (code_map == nullptr) {
+  MemMap code_map = data_map.RemapAtEnd(
+      divider, "jit-code-cache", memmap_flags_prot_code | PROT_WRITE, &error_str, use_ashmem);
+  if (!code_map.IsValid()) {
     std::ostringstream oss;
     oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
     *error_msg = oss.str();
     return nullptr;
   }
-  DCHECK_EQ(code_map->Begin(), divider);
+  DCHECK_EQ(code_map.Begin(), divider);
   data_size = initial_capacity / 2;
   code_size = initial_capacity - data_size;
   DCHECK_EQ(code_size + data_size, initial_capacity);
   return new JitCodeCache(
-      code_map,
-      data_map.release(),
+      std::move(code_map),
+      std::move(data_map),
       code_size,
       data_size,
       max_capacity,
@@ -256,8 +254,8 @@
       memmap_flags_prot_code);
 }
 
-JitCodeCache::JitCodeCache(MemMap* code_map,
-                           MemMap* data_map,
+JitCodeCache::JitCodeCache(MemMap&& code_map,
+                           MemMap&& data_map,
                            size_t initial_code_capacity,
                            size_t initial_data_capacity,
                            size_t max_capacity,
@@ -266,8 +264,8 @@
     : lock_("Jit code cache", kJitCodeCacheLock),
       lock_cond_("Jit code cache condition variable", lock_),
       collection_in_progress_(false),
-      code_map_(code_map),
-      data_map_(data_map),
+      code_map_(std::move(code_map)),
+      data_map_(std::move(data_map)),
       max_capacity_(max_capacity),
       current_capacity_(initial_code_capacity + initial_data_capacity),
       code_end_(initial_code_capacity),
@@ -287,8 +285,8 @@
       memmap_flags_prot_code_(memmap_flags_prot_code) {
 
   DCHECK_GE(max_capacity, initial_code_capacity + initial_data_capacity);
-  code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_end_, false /*locked*/);
-  data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_end_, false /*locked*/);
+  code_mspace_ = create_mspace_with_base(code_map_.Begin(), code_end_, false /*locked*/);
+  data_mspace_ = create_mspace_with_base(data_map_.Begin(), data_end_, false /*locked*/);
 
   if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
     PLOG(FATAL) << "create_mspace_with_base failed";
@@ -298,13 +296,13 @@
 
   CheckedCall(mprotect,
               "mprotect jit code cache",
-              code_map_->Begin(),
-              code_map_->Size(),
+              code_map_.Begin(),
+              code_map_.Size(),
               memmap_flags_prot_code_);
   CheckedCall(mprotect,
               "mprotect jit data cache",
-              data_map_->Begin(),
-              data_map_->Size(),
+              data_map_.Begin(),
+              data_map_.Size(),
               kProtData);
 
   VLOG(jit) << "Created jit code cache: initial data size="
@@ -316,7 +314,7 @@
 JitCodeCache::~JitCodeCache() {}
 
 bool JitCodeCache::ContainsPc(const void* ptr) const {
-  return code_map_->Begin() <= ptr && ptr < code_map_->End();
+  return code_map_.Begin() <= ptr && ptr < code_map_.End();
 }
 
 bool JitCodeCache::WillExecuteJitCode(ArtMethod* method) {
@@ -387,8 +385,8 @@
     CheckedCall(
         mprotect,
         "make code writable",
-        code_cache_->code_map_->Begin(),
-        code_cache_->code_map_->Size(),
+        code_cache_->code_map_.Begin(),
+        code_cache_->code_map_.Size(),
         code_cache_->memmap_flags_prot_code_ | PROT_WRITE);
   }
 
@@ -397,8 +395,8 @@
     CheckedCall(
         mprotect,
         "make code protected",
-        code_cache_->code_map_->Begin(),
-        code_cache_->code_map_->Size(),
+        code_cache_->code_map_.Begin(),
+        code_cache_->code_map_.Size(),
         code_cache_->memmap_flags_prot_code_);
   }
 
@@ -1237,8 +1235,8 @@
       number_of_collections_++;
       live_bitmap_.reset(CodeCacheBitmap::Create(
           "code-cache-bitmap",
-          reinterpret_cast<uintptr_t>(code_map_->Begin()),
-          reinterpret_cast<uintptr_t>(code_map_->Begin() + current_capacity_ / 2)));
+          reinterpret_cast<uintptr_t>(code_map_.Begin()),
+          reinterpret_cast<uintptr_t>(code_map_.Begin() + current_capacity_ / 2)));
       collection_in_progress_ = true;
     }
   }
@@ -1610,12 +1608,12 @@
   if (code_mspace_ == mspace) {
     size_t result = code_end_;
     code_end_ += increment;
-    return reinterpret_cast<void*>(result + code_map_->Begin());
+    return reinterpret_cast<void*>(result + code_map_.Begin());
   } else {
     DCHECK_EQ(data_mspace_, mspace);
     size_t result = data_end_;
     data_end_ += increment;
-    return reinterpret_cast<void*>(result + data_map_->Begin());
+    return reinterpret_cast<void*>(result + data_map_.Begin());
   }
 }
 
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 632b45b..a4a0f8f 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -28,6 +28,7 @@
 #include "base/atomic.h"
 #include "base/histogram.h"
 #include "base/macros.h"
+#include "base/mem_map.h"
 #include "base/mutex.h"
 #include "base/safe_map.h"
 
@@ -39,7 +40,6 @@
 class InlineCache;
 class IsMarkedVisitor;
 class JitJniStubTestHelper;
-class MemMap;
 class OatQuickMethodHeader;
 struct ProfileMethodInfo;
 class ProfilingInfo;
@@ -279,8 +279,8 @@
 
  private:
   // Take ownership of maps.
-  JitCodeCache(MemMap* code_map,
-               MemMap* data_map,
+  JitCodeCache(MemMap&& code_map,
+               MemMap&& data_map,
                size_t initial_code_capacity,
                size_t initial_data_capacity,
                size_t max_capacity,
@@ -396,9 +396,9 @@
   // Whether there is a code cache collection in progress.
   bool collection_in_progress_ GUARDED_BY(lock_);
   // Mem map which holds code.
-  std::unique_ptr<MemMap> code_map_;
+  MemMap code_map_;
   // Mem map which holds data (stack maps and profiling info).
-  std::unique_ptr<MemMap> data_map_;
+  MemMap data_map_;
   // The opaque mspace for allocating code.
   void* code_mspace_ GUARDED_BY(lock_);
   // The opaque mspace for allocating data.
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index b598df3..d49ebd1 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -163,33 +163,34 @@
   void operator=(const NullableScopedUtfChars&);
 };
 
-static std::unique_ptr<MemMap> AllocateDexMemoryMap(JNIEnv* env, jint start, jint end) {
+static MemMap AllocateDexMemoryMap(JNIEnv* env, jint start, jint end) {
   if (end <= start) {
     ScopedObjectAccess soa(env);
     ThrowWrappedIOException("Bad range");
-    return nullptr;
+    return MemMap::Invalid();
   }
 
   std::string error_message;
   size_t length = static_cast<size_t>(end - start);
-  std::unique_ptr<MemMap> dex_mem_map(MemMap::MapAnonymous("DEX data",
-                                                           nullptr,
-                                                           length,
-                                                           PROT_READ | PROT_WRITE,
-                                                           /* low_4gb */ false,
-                                                           /* reuse */ false,
-                                                           &error_message));
-  if (dex_mem_map == nullptr) {
+  MemMap dex_mem_map = MemMap::MapAnonymous("DEX data",
+                                            /* addr */ nullptr,
+                                            length,
+                                            PROT_READ | PROT_WRITE,
+                                            /* low_4gb */ false,
+                                            /* reuse */ false,
+                                            &error_message);
+  if (!dex_mem_map.IsValid()) {
     ScopedObjectAccess soa(env);
     ThrowWrappedIOException("%s", error_message.c_str());
+    return MemMap::Invalid();
   }
   return dex_mem_map;
 }
 
-static const DexFile* CreateDexFile(JNIEnv* env, std::unique_ptr<MemMap> dex_mem_map) {
+static const DexFile* CreateDexFile(JNIEnv* env, MemMap&& dex_mem_map) {
   std::string location = StringPrintf("Anonymous-DexFile@%p-%p",
-                                      dex_mem_map->Begin(),
-                                      dex_mem_map->End());
+                                      dex_mem_map.Begin(),
+                                      dex_mem_map.End());
   std::string error_message;
   const ArtDexFileLoader dex_file_loader;
   std::unique_ptr<const DexFile> dex_file(dex_file_loader.Open(location,
@@ -213,7 +214,7 @@
   return dex_file.release();
 }
 
-static jobject CreateSingleDexFileCookie(JNIEnv* env, std::unique_ptr<MemMap> data) {
+static jobject CreateSingleDexFileCookie(JNIEnv* env, MemMap&& data) {
   std::unique_ptr<const DexFile> dex_file(CreateDexFile(env, std::move(data)));
   if (dex_file.get() == nullptr) {
     DCHECK(env->ExceptionCheck());
@@ -236,14 +237,14 @@
     return nullptr;
   }
 
-  std::unique_ptr<MemMap> dex_mem_map(AllocateDexMemoryMap(env, start, end));
-  if (dex_mem_map == nullptr) {
+  MemMap dex_mem_map = AllocateDexMemoryMap(env, start, end);
+  if (!dex_mem_map.IsValid()) {
     DCHECK(Thread::Current()->IsExceptionPending());
     return nullptr;
   }
 
   size_t length = static_cast<size_t>(end - start);
-  memcpy(dex_mem_map->Begin(), base_address, length);
+  memcpy(dex_mem_map.Begin(), base_address, length);
   return CreateSingleDexFileCookie(env, std::move(dex_mem_map));
 }
 
@@ -252,13 +253,13 @@
                                              jbyteArray buffer,
                                              jint start,
                                              jint end) {
-  std::unique_ptr<MemMap> dex_mem_map(AllocateDexMemoryMap(env, start, end));
-  if (dex_mem_map == nullptr) {
+  MemMap dex_mem_map = AllocateDexMemoryMap(env, start, end);
+  if (!dex_mem_map.IsValid()) {
     DCHECK(Thread::Current()->IsExceptionPending());
     return nullptr;
   }
 
-  auto destination = reinterpret_cast<jbyte*>(dex_mem_map.get()->Begin());
+  auto destination = reinterpret_cast<jbyte*>(dex_mem_map.Begin());
   env->GetByteArrayRegion(buffer, start, end - start, destination);
   return CreateSingleDexFileCookie(env, std::move(dex_mem_map));
 }
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 58e16ed..c7daef8 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -956,7 +956,7 @@
   void* dlopen_handle_;  // TODO: Unique_ptr with custom deleter.
 
   // Dummy memory map objects corresponding to the regions mapped by dlopen.
-  std::vector<std::unique_ptr<MemMap>> dlopen_mmaps_;
+  std::vector<MemMap> dlopen_mmaps_;
 
   // The number of shared objects the linker told us about before loading. Used to
   // (optimistically) optimize the PreSetup stage (see comment there).
@@ -1122,8 +1122,8 @@
             uint8_t* vaddr = reinterpret_cast<uint8_t*>(info->dlpi_addr +
                 info->dlpi_phdr[i].p_vaddr);
             size_t memsz = info->dlpi_phdr[i].p_memsz;
-            MemMap* mmap = MemMap::MapDummy(info->dlpi_name, vaddr, memsz);
-            context->dlopen_mmaps_->push_back(std::unique_ptr<MemMap>(mmap));
+            MemMap mmap = MemMap::MapDummy(info->dlpi_name, vaddr, memsz);
+            context->dlopen_mmaps_->push_back(std::move(mmap));
           }
         }
         return 1;  // Stop iteration and return 1 from dl_iterate_phdr.
@@ -1131,7 +1131,7 @@
       return 0;  // Continue iteration and return 0 from dl_iterate_phdr when finished.
     }
     const uint8_t* const begin_;
-    std::vector<std::unique_ptr<MemMap>>* const dlopen_mmaps_;
+    std::vector<MemMap>* const dlopen_mmaps_;
     const size_t shared_objects_before;
     size_t shared_objects_seen;
   };
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index facebda..9248bb9 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -425,7 +425,7 @@
   low_4gb_arena_pool_.reset();
   arena_pool_.reset();
   jit_arena_pool_.reset();
-  protected_fault_page_.reset();
+  protected_fault_page_.Reset();
   MemMap::Shutdown();
 
   // TODO: acquire a static mutex on Runtime to avoid racing.
@@ -1162,18 +1162,18 @@
   {
     constexpr uintptr_t kSentinelAddr =
         RoundDown(static_cast<uintptr_t>(Context::kBadGprBase), kPageSize);
-    protected_fault_page_.reset(MemMap::MapAnonymous("Sentinel fault page",
-                                                     reinterpret_cast<uint8_t*>(kSentinelAddr),
-                                                     kPageSize,
-                                                     PROT_NONE,
-                                                     /* low_4g */ true,
-                                                     /* reuse */ false,
-                                                     /* error_msg */ nullptr));
-    if (protected_fault_page_ == nullptr) {
+    protected_fault_page_ = MemMap::MapAnonymous("Sentinel fault page",
+                                                 reinterpret_cast<uint8_t*>(kSentinelAddr),
+                                                 kPageSize,
+                                                 PROT_NONE,
+                                                 /* low_4g */ true,
+                                                 /* reuse */ false,
+                                                 /* error_msg */ nullptr);
+    if (!protected_fault_page_.IsValid()) {
       LOG(WARNING) << "Could not reserve sentinel fault page";
-    } else if (reinterpret_cast<uintptr_t>(protected_fault_page_->Begin()) != kSentinelAddr) {
+    } else if (reinterpret_cast<uintptr_t>(protected_fault_page_.Begin()) != kSentinelAddr) {
       LOG(WARNING) << "Could not reserve sentinel fault page at the right address.";
-      protected_fault_page_.reset();
+      protected_fault_page_.Reset();
     }
   }
 
diff --git a/runtime/runtime.h b/runtime/runtime.h
index a98e8a8..f98d7b9 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -29,6 +29,7 @@
 
 #include "arch/instruction_set.h"
 #include "base/macros.h"
+#include "base/mem_map.h"
 #include "base/mutex.h"
 #include "deoptimization_kind.h"
 #include "dex/dex_file_types.h"
@@ -86,7 +87,6 @@
 class IsMarkedVisitor;
 class JavaVMExt;
 class LinearAlloc;
-class MemMap;
 class MonitorList;
 class MonitorPool;
 class NullPointerHandler;
@@ -1090,7 +1090,7 @@
   std::atomic<uint32_t> deoptimization_counts_[
       static_cast<uint32_t>(DeoptimizationKind::kLast) + 1];
 
-  std::unique_ptr<MemMap> protected_fault_page_;
+  MemMap protected_fault_page_;
 
   uint32_t verifier_logging_threshold_ms_;
 
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index 794ac19..4c4dcd8 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -190,19 +190,19 @@
 
 TEST_F(ThreadLifecycleCallbackRuntimeCallbacksTest, ThreadLifecycleCallbackAttach) {
   std::string error_msg;
-  std::unique_ptr<MemMap> stack(MemMap::MapAnonymous("ThreadLifecycleCallback Thread",
-                                                     nullptr,
-                                                     128 * kPageSize,  // Just some small stack.
-                                                     PROT_READ | PROT_WRITE,
-                                                     false,
-                                                     false,
-                                                     &error_msg));
-  ASSERT_FALSE(stack == nullptr) << error_msg;
+  MemMap stack = MemMap::MapAnonymous("ThreadLifecycleCallback Thread",
+                                      /* addr */ nullptr,
+                                      128 * kPageSize,  // Just some small stack.
+                                      PROT_READ | PROT_WRITE,
+                                      false,
+                                      false,
+                                      &error_msg);
+  ASSERT_TRUE(stack.IsValid()) << error_msg;
 
   const char* reason = "ThreadLifecycleCallback test thread";
   pthread_attr_t attr;
   CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), reason);
-  CHECK_PTHREAD_CALL(pthread_attr_setstack, (&attr, stack->Begin(), stack->Size()), reason);
+  CHECK_PTHREAD_CALL(pthread_attr_setstack, (&attr, stack.Begin(), stack.Size()), reason);
   pthread_t pthread;
   CHECK_PTHREAD_CALL(pthread_create,
                      (&pthread,
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index 26ca190..2a69bc6 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -46,19 +46,24 @@
   // Add an inaccessible page to catch stack overflow.
   stack_size += kPageSize;
   std::string error_msg;
-  stack_.reset(MemMap::MapAnonymous(name.c_str(), nullptr, stack_size, PROT_READ | PROT_WRITE,
-                                    false, false, &error_msg));
-  CHECK(stack_.get() != nullptr) << error_msg;
-  CHECK_ALIGNED(stack_->Begin(), kPageSize);
+  stack_ = MemMap::MapAnonymous(name.c_str(),
+                                /* addr */ nullptr,
+                                stack_size,
+                                PROT_READ | PROT_WRITE,
+                                /* low_4gb */ false,
+                                /* reuse */ false,
+                                &error_msg);
+  CHECK(stack_.IsValid()) << error_msg;
+  CHECK_ALIGNED(stack_.Begin(), kPageSize);
   CheckedCall(mprotect,
               "mprotect bottom page of thread pool worker stack",
-              stack_->Begin(),
+              stack_.Begin(),
               kPageSize,
               PROT_NONE);
   const char* reason = "new thread pool worker thread";
   pthread_attr_t attr;
   CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), reason);
-  CHECK_PTHREAD_CALL(pthread_attr_setstack, (&attr, stack_->Begin(), stack_->Size()), reason);
+  CHECK_PTHREAD_CALL(pthread_attr_setstack, (&attr, stack_.Begin(), stack_.Size()), reason);
   CHECK_PTHREAD_CALL(pthread_create, (&pthread_, &attr, &Callback, this), reason);
   CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), reason);
 }
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index 2784953..98a1193 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -53,8 +53,8 @@
   static const size_t kDefaultStackSize = 1 * MB;
 
   size_t GetStackSize() const {
-    DCHECK(stack_.get() != nullptr);
-    return stack_->Size();
+    DCHECK(stack_.IsValid());
+    return stack_.Size();
   }
 
   virtual ~ThreadPoolWorker();
@@ -71,7 +71,7 @@
 
   ThreadPool* const thread_pool_;
   const std::string name_;
-  std::unique_ptr<MemMap> stack_;
+  MemMap stack_;
   pthread_t pthread_;
   Thread* thread_;
 
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index 32aa86d..ad34584 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -144,7 +144,7 @@
     mmap_reuse = false;
   }
   CHECK(!mmap_reuse || mmap_addr != nullptr);
-  std::unique_ptr<MemMap> mmap(MemMap::MapFileAtAddress(
+  MemMap mmap = MemMap::MapFileAtAddress(
       mmap_addr,
       vdex_length,
       (writable || unquicken) ? PROT_READ | PROT_WRITE : PROT_READ,
@@ -154,13 +154,13 @@
       low_4gb,
       mmap_reuse,
       vdex_filename.c_str(),
-      error_msg));
-  if (mmap == nullptr) {
+      error_msg);
+  if (!mmap.IsValid()) {
     *error_msg = "Failed to mmap file " + vdex_filename + " : " + *error_msg;
     return nullptr;
   }
 
-  std::unique_ptr<VdexFile> vdex(new VdexFile(mmap.release()));
+  std::unique_ptr<VdexFile> vdex(new VdexFile(std::move(mmap)));
   if (!vdex->IsValid()) {
     *error_msg = "Vdex file is not valid";
     return nullptr;
@@ -175,7 +175,7 @@
                     /* decompile_return_instruction */ false);
     // Update the quickening info size to pretend there isn't any.
     size_t offset = vdex->GetDexSectionHeaderOffset();
-    reinterpret_cast<DexSectionHeader*>(vdex->mmap_->Begin() + offset)->quickening_info_size_ = 0;
+    reinterpret_cast<DexSectionHeader*>(vdex->mmap_.Begin() + offset)->quickening_info_size_ = 0;
   }
 
   *error_msg = "Success";
diff --git a/runtime/vdex_file.h b/runtime/vdex_file.h
index 866a57e..a39ec31 100644
--- a/runtime/vdex_file.h
+++ b/runtime/vdex_file.h
@@ -153,7 +153,7 @@
   typedef uint32_t VdexChecksum;
   using QuickeningTableOffsetType = uint32_t;
 
-  explicit VdexFile(MemMap* mmap) : mmap_(mmap) {}
+  explicit VdexFile(MemMap&& mmap) : mmap_(std::move(mmap)) {}
 
   // Returns nullptr if the vdex file cannot be opened or is not valid.
   // The mmap_* parameters can be left empty (nullptr/0/false) to allocate at random address.
@@ -215,9 +215,9 @@
                          error_msg);
   }
 
-  const uint8_t* Begin() const { return mmap_->Begin(); }
-  const uint8_t* End() const { return mmap_->End(); }
-  size_t Size() const { return mmap_->Size(); }
+  const uint8_t* Begin() const { return mmap_.Begin(); }
+  const uint8_t* End() const { return mmap_.End(); }
+  size_t Size() const { return mmap_.Size(); }
 
   const VerifierDepsHeader& GetVerifierDepsHeader() const {
     return *reinterpret_cast<const VerifierDepsHeader*>(Begin());
@@ -260,7 +260,7 @@
   }
 
   bool IsValid() const {
-    return mmap_->Size() >= sizeof(VerifierDepsHeader) && GetVerifierDepsHeader().IsValid();
+    return mmap_.Size() >= sizeof(VerifierDepsHeader) && GetVerifierDepsHeader().IsValid();
   }
 
   // This method is for iterating over the dex files in the vdex. If `cursor` is null,
@@ -328,7 +328,7 @@
     return DexBegin() + GetDexSectionHeader().GetDexSize();
   }
 
-  std::unique_ptr<MemMap> mmap_;
+  MemMap mmap_;
 
   DISALLOW_COPY_AND_ASSIGN(VdexFile);
 };
diff --git a/test/305-other-fault-handler/fault_handler.cc b/test/305-other-fault-handler/fault_handler.cc
index 211d142..093a93f 100644
--- a/test/305-other-fault-handler/fault_handler.cc
+++ b/test/305-other-fault-handler/fault_handler.cc
@@ -33,7 +33,7 @@
  public:
   explicit TestFaultHandler(FaultManager* manager)
       : FaultHandler(manager),
-        map_error_(""),
+        map_error_(),
         target_map_(MemMap::MapAnonymous("test-305-mmap",
                                          /* addr */ nullptr,
                                          /* byte_count */ kPageSize,
@@ -43,7 +43,7 @@
                                          /* error_msg */ &map_error_,
                                          /* use_ashmem */ false)),
         was_hit_(false) {
-    CHECK(target_map_ != nullptr) << "Unable to create segfault target address " << map_error_;
+    CHECK(target_map_.IsValid()) << "Unable to create segfault target address " << map_error_;
     manager_->AddHandler(this, /*in_generated_code*/false);
   }
 
@@ -59,16 +59,16 @@
     was_hit_ = true;
 
     LOG(INFO) << "SEGV Caught. mprotecting map.";
-    CHECK(target_map_->Protect(PROT_READ | PROT_WRITE)) << "Failed to mprotect R/W";
+    CHECK(target_map_.Protect(PROT_READ | PROT_WRITE)) << "Failed to mprotect R/W";
     LOG(INFO) << "Setting value to be read.";
     *GetTargetPointer() = kDataValue;
     LOG(INFO) << "Changing prot to be read-only.";
-    CHECK(target_map_->Protect(PROT_READ)) << "Failed to mprotect R-only";
+    CHECK(target_map_.Protect(PROT_READ)) << "Failed to mprotect R-only";
     return true;
   }
 
   void CauseSegfault() {
-    CHECK_EQ(target_map_->GetProtect(), PROT_NONE);
+    CHECK_EQ(target_map_.GetProtect(), PROT_NONE);
 
     // This will segfault. The handler should deal with it though and we will get a value out of it.
     uint32_t data = *GetTargetPointer();
@@ -78,19 +78,19 @@
 
     CHECK(was_hit_);
     CHECK_EQ(data, kDataValue) << "Unexpected read value from mmap";
-    CHECK_EQ(target_map_->GetProtect(), PROT_READ);
+    CHECK_EQ(target_map_.GetProtect(), PROT_READ);
     LOG(INFO) << "Success!";
   }
 
  private:
   uint32_t* GetTargetPointer() {
-    return reinterpret_cast<uint32_t*>(target_map_->Begin() + 8);
+    return reinterpret_cast<uint32_t*>(target_map_.Begin() + 8);
   }
 
   static constexpr uint32_t kDataValue = 0xDEADBEEF;
 
   std::string map_error_;
-  std::unique_ptr<MemMap> target_map_;
+  MemMap target_map_;
   bool was_hit_;
 };