Change MemMap::maps_ to not be global variable
Runtime.exit() was causing globals to get destructed at the same time
that another thread was using it for allocating a new mem map.
(cherry picked from commit 6e88ef6b604a7a945a466784580c42e6554c1289)
Bug: 17962201
Change-Id: I400cb7b8141d858f3c08a6fe59a02838c04c6962
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 1e6c038..86167ec 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -404,6 +404,7 @@
// Reserve where the image will be loaded up front so that other parts of test set up don't
// accidentally end up colliding with the fixed memory address when we need to load the image.
std::string error_msg;
+ MemMap::Init();
image_reservation_.reset(MemMap::MapAnonymous("image reservation",
reinterpret_cast<byte*>(ART_BASE_ADDRESS),
(size_t)100 * 1024 * 1024, // 100MB
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 3d119bb..355036b 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -129,14 +129,17 @@
compiler_driver_.reset();
// Tear down old runtime before making a new one, clearing out misc state.
+
+ // Remove the reservation of the memory for use to load the image.
+ // Need to do this before we reset the runtime.
+ UnreserveImageSpace();
+
runtime_.reset();
java_lang_dex_file_ = NULL;
+ MemMap::Init();
std::unique_ptr<const DexFile> dex(LoadExpectSingleDexFile(GetLibCoreDexFileName().c_str()));
- // Remove the reservation of the memory for use to load the image.
- UnreserveImageSpace();
-
RuntimeOptions options;
std::string image("-Ximage:");
image.append(image_location.GetFilename());
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 05a940cb..6d7f115 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1240,6 +1240,7 @@
RuntimeOptions runtime_options;
std::vector<const DexFile*> boot_class_path;
+ art::MemMap::Init(); // For ZipEntry::ExtractToMemMap.
if (boot_image_option.empty()) {
size_t failure_count = OpenDexFiles(dex_filenames, dex_locations, boot_class_path);
if (failure_count > 0) {
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index a9b2a43..74f6779 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -761,6 +761,7 @@
static int patchoat(int argc, char **argv) {
InitLogging(argv);
+ MemMap::Init();
const bool debug = kIsDebugBuild;
orig_argc = argc;
orig_argv = argv;
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 989384e..e9e11f6 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -185,6 +185,8 @@
int mkdir_result = mkdir(dalvik_cache_.c_str(), 0700);
ASSERT_EQ(mkdir_result, 0);
+ MemMap::Init(); // For LoadExpectSingleDexFile
+
std::string error_msg;
java_lang_dex_file_ = LoadExpectSingleDexFile(GetLibCoreDexFileName().c_str());
boot_class_path_.push_back(java_lang_dex_file_);
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 0615abd..bad86db 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -70,7 +70,7 @@
return os;
}
-MemMap::Maps MemMap::maps_;
+MemMap::Maps* MemMap::maps_ = nullptr;
#if USE_ART_LOW_4G_ALLOCATOR
// Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT.
@@ -461,11 +461,12 @@
// Remove it from maps_.
MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
bool found = false;
- for (auto it = maps_.lower_bound(base_begin_), end = maps_.end();
+ DCHECK(maps_ != nullptr);
+ for (auto it = maps_->lower_bound(base_begin_), end = maps_->end();
it != end && it->first == base_begin_; ++it) {
if (it->second == this) {
found = true;
- maps_.erase(it);
+ maps_->erase(it);
break;
}
}
@@ -487,7 +488,8 @@
// Add it to maps_.
MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
- maps_.insert(std::pair<void*, MemMap*>(base_begin_, this));
+ DCHECK(maps_ != nullptr);
+ maps_->insert(std::make_pair(base_begin_, this));
}
};
@@ -618,7 +620,7 @@
bool MemMap::HasMemMap(MemMap* map) {
void* base_begin = map->BaseBegin();
- for (auto it = maps_.lower_bound(base_begin), end = maps_.end();
+ for (auto it = maps_->lower_bound(base_begin), end = maps_->end();
it != end && it->first == base_begin; ++it) {
if (it->second == map) {
return true;
@@ -630,7 +632,8 @@
MemMap* MemMap::GetLargestMemMapAt(void* address) {
size_t largest_size = 0;
MemMap* largest_map = nullptr;
- for (auto it = maps_.lower_bound(address), end = maps_.end();
+ DCHECK(maps_ != nullptr);
+ for (auto it = maps_->lower_bound(address), end = maps_->end();
it != end && it->first == address; ++it) {
MemMap* map = it->second;
CHECK(map != nullptr);
@@ -642,6 +645,20 @@
return largest_map;
}
+void MemMap::Init() {
+ MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+ if (maps_ == nullptr) {
+ // dex2oat calls MemMap::Init twice since its needed before the runtime is created.
+ maps_ = new Maps;
+ }
+}
+
+void MemMap::Shutdown() {
+ MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+ delete maps_;
+ maps_ = nullptr;
+}
+
std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) {
os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]",
mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(),
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index e49ed48..ad62f83 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -138,6 +138,9 @@
typedef AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps> Maps;
+ static void Init() LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+ static void Shutdown() LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+
private:
MemMap(const std::string& name, byte* begin, size_t size, void* base_begin, size_t base_size,
int prot, bool reuse) LOCKS_EXCLUDED(Locks::mem_maps_lock_);
@@ -167,7 +170,7 @@
#endif
// All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
- static Maps maps_ GUARDED_BY(Locks::mem_maps_lock_);
+ static Maps* maps_ GUARDED_BY(Locks::mem_maps_lock_);
friend class MemMapTest; // To allow access to base_begin_ and base_size_.
};
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index 69f618c..5ec3335 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -85,6 +85,10 @@
delete m1;
}
+ void CommonInit() {
+ MemMap::Init();
+ }
+
#if defined(__LP64__) && !defined(__x86_64__)
static uintptr_t GetLinearScanPos() {
return MemMap::next_mem_pos_;
@@ -99,10 +103,10 @@
#endif
TEST_F(MemMapTest, Start) {
+ CommonInit();
uintptr_t start = GetLinearScanPos();
EXPECT_LE(64 * KB, start);
EXPECT_LT(start, static_cast<uintptr_t>(ART_BASE_ADDRESS));
-
#ifdef __BIONIC__
// Test a couple of values. Make sure they are different.
uintptr_t last = 0;
@@ -120,6 +124,7 @@
#endif
TEST_F(MemMapTest, MapAnonymousEmpty) {
+ CommonInit();
std::string error_msg;
std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
nullptr,
@@ -141,6 +146,7 @@
#ifdef __LP64__
TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
+ CommonInit();
std::string error_msg;
std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
nullptr,
@@ -155,6 +161,7 @@
#endif
TEST_F(MemMapTest, MapAnonymousExactAddr) {
+ CommonInit();
std::string error_msg;
// Map at an address that should work, which should succeed.
std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
@@ -200,6 +207,7 @@
TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
uintptr_t start_addr = ART_BASE_ADDRESS + 0x1000000;
std::string error_msg;
+ CommonInit();
std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
reinterpret_cast<byte*>(start_addr),
0x21000000,
@@ -212,6 +220,7 @@
}
TEST_F(MemMapTest, MapAnonymousOverflow) {
+ CommonInit();
std::string error_msg;
uintptr_t ptr = 0;
ptr -= kPageSize; // Now it's close to the top.
@@ -227,6 +236,7 @@
#ifdef __LP64__
TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
+ CommonInit();
std::string error_msg;
std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
reinterpret_cast<byte*>(UINT64_C(0x100000000)),
@@ -239,6 +249,7 @@
}
TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
+ CommonInit();
std::string error_msg;
std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
reinterpret_cast<byte*>(0xF0000000),
@@ -252,6 +263,7 @@
#endif
TEST_F(MemMapTest, CheckNoGaps) {
+ CommonInit();
std::string error_msg;
constexpr size_t kNumPages = 3;
// Map a 3-page mem map.
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index f9e5338..bcb9e0c 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -196,6 +196,7 @@
Thread::Shutdown();
QuasiAtomic::Shutdown();
verifier::MethodVerifier::Shutdown();
+ MemMap::Shutdown();
// TODO: acquire a static mutex on Runtime to avoid racing.
CHECK(instance_ == nullptr || instance_ == this);
instance_ = nullptr;
@@ -652,6 +653,8 @@
bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized) {
CHECK_EQ(sysconf(_SC_PAGE_SIZE), kPageSize);
+ MemMap::Init();
+
std::unique_ptr<ParsedOptions> options(ParsedOptions::Create(raw_options, ignore_unrecognized));
if (options.get() == nullptr) {
LOG(ERROR) << "Failed to parse options";