ART: Resolve MAP_32BIT limitation in x86_64
Add checks that ensure when low4gb is set and an expected pointer
is given the requested memory fits into 4GB.
On x86_64, only use MAP_32BIT when there is no expected pointer.
This avoids a limitation in mmap (only 2GB visible).
Add tests to check behavior.
Original Author: Qiming Shi <qiming.shi@intel.com>
Change-Id: Ia2e3e0a46764ef70126b0c264f1fae681622d3cb
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 1594338..98b0bbf 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -128,6 +128,20 @@
// We need to store and potentially set an error number for pretty printing of errors
int saved_errno = 0;
+#ifdef __LP64__
+ // When requesting low_4g memory and having an expectation, the requested range should fit into
+ // 4GB.
+ if (low_4gb && (
+ // Start out of bounds.
+ (reinterpret_cast<uintptr_t>(expected) >> 32) != 0 ||
+ // End out of bounds. For simplicity, this will fail for the last page of memory.
+ (reinterpret_cast<uintptr_t>(expected + page_aligned_byte_count) >> 32) != 0)) {
+ *error_msg = StringPrintf("The requested address space (%p, %p) cannot fit in low_4gb",
+ expected, expected + page_aligned_byte_count);
+ return nullptr;
+ }
+#endif
+
// TODO:
// A page allocator would be a useful abstraction here, as
// 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
@@ -192,7 +206,7 @@
#else
#ifdef __x86_64__
- if (low_4gb) {
+ if (low_4gb && expected == nullptr) {
flags |= MAP_32BIT;
}
#endif
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index eea3307..2b59cd9 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -163,4 +163,57 @@
}
#endif
+TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
+ std::string error_msg;
+ UniquePtr<MemMap> map(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
+ reinterpret_cast<byte*>(0x71000000),
+ 0x21000000,
+ PROT_READ | PROT_WRITE,
+ true,
+ &error_msg));
+ ASSERT_TRUE(map.get() != nullptr) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
+ ASSERT_EQ(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 0x71000000U);
+}
+
+TEST_F(MemMapTest, MapAnonymousOverflow) {
+ std::string error_msg;
+ uintptr_t ptr = 0;
+ ptr -= kPageSize; // Now it's close to the top.
+ UniquePtr<MemMap> map(MemMap::MapAnonymous("MapAnonymousOverflow",
+ reinterpret_cast<byte*>(ptr),
+ 2 * kPageSize, // brings it over the top.
+ PROT_READ | PROT_WRITE,
+ false,
+ &error_msg));
+ ASSERT_EQ(nullptr, map.get());
+ ASSERT_FALSE(error_msg.empty());
+}
+
+#ifdef __LP64__
+TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
+ std::string error_msg;
+ UniquePtr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
+ reinterpret_cast<byte*>(UINT64_C(0x100000000)),
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ true,
+ &error_msg));
+ ASSERT_EQ(nullptr, map.get());
+ ASSERT_FALSE(error_msg.empty());
+}
+
+TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
+ std::string error_msg;
+ UniquePtr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
+ reinterpret_cast<byte*>(0xF0000000),
+ 0x20000000,
+ PROT_READ | PROT_WRITE,
+ true,
+ &error_msg));
+ ASSERT_EQ(nullptr, map.get());
+ ASSERT_FALSE(error_msg.empty());
+}
+#endif
+
} // namespace art