Separate rw from rx views of jit code cache
Create two separate mappings of the ashmem region used for the JIT code
cache. These two mappings have unrelated addresses.
Bug: 62356545
Test: make -j test-art-target
Change-Id: If176552e6d72ef71e17c96994901f3f3baac44f6
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index a186f4c..1154620 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -191,10 +191,14 @@
VLOG(heap) << "Size " << GetMemMap()->Size();
VLOG(heap) << "GrowthLimit " << PrettySize(growth_limit);
VLOG(heap) << "Capacity " << PrettySize(capacity);
- // Remap the tail.
+ // Remap the tail. Pass MAP_PRIVATE since we don't want to share the same ashmem as the zygote
+ // space.
std::string error_msg;
- std::unique_ptr<MemMap> mem_map(GetMemMap()->RemapAtEnd(End(), alloc_space_name,
- PROT_READ | PROT_WRITE, &error_msg));
+ std::unique_ptr<MemMap> mem_map(GetMemMap()->RemapAtEnd(End(),
+ alloc_space_name,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE,
+ &error_msg));
CHECK(mem_map.get() != nullptr) << error_msg;
void* allocator = CreateAllocator(End(), starting_size_, initial_size_, capacity,
low_memory_mode);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 1c36bde..8295f46 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -47,9 +47,13 @@
static constexpr int kProtAll = PROT_READ | PROT_WRITE | PROT_EXEC;
static constexpr int kProtData = PROT_READ | PROT_WRITE;
static constexpr int kProtCode = PROT_READ | PROT_EXEC;
+static constexpr int kProtReadOnly = PROT_READ;
+static constexpr int kProtNone = PROT_NONE;
static constexpr size_t kCodeSizeLogThreshold = 50 * KB;
static constexpr size_t kStackMapSizeLogThreshold = 50 * KB;
+static constexpr size_t kMinMapSpacingPages = 1;
+static constexpr size_t kMaxMapSpacingPages = 128;
#define CHECKED_MPROTECT(memory, size, prot) \
do { \
@@ -60,19 +64,52 @@
} \
} while (false) \
+static MemMap* SplitMemMap(MemMap* existing_map,
+ const char* name,
+ size_t split_offset,
+ int split_prot,
+ std::string* error_msg,
+ bool use_ashmem,
+ unique_fd* shmem_fd = nullptr) {
+ std::string error_str;
+ uint8_t* divider = existing_map->Begin() + split_offset;
+ MemMap* new_map = existing_map->RemapAtEnd(divider,
+ name,
+ split_prot,
+ MAP_SHARED,
+ &error_str,
+ use_ashmem,
+ shmem_fd);
+ if (new_map == nullptr) {
+ std::ostringstream oss;
+ oss << "Failed to create spacing for " << name << ": "
+ << error_str << " offset=" << split_offset;
+ *error_msg = oss.str();
+ return nullptr;
+ }
+ return new_map;
+}
+
+
JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
size_t max_capacity,
bool generate_debug_info,
std::string* error_msg) {
ScopedTrace trace(__PRETTY_FUNCTION__);
- CHECK_GE(max_capacity, initial_capacity);
+ CHECK_GT(max_capacity, initial_capacity);
+ CHECK_GE(max_capacity - kMaxMapSpacingPages * kPageSize, initial_capacity);
- // Generating debug information is mostly for using the 'perf' tool, which does
- // not work with ashmem.
+ // Generating debug information is for using the Linux perf tool on
+ // host which does not work with ashmem.
bool use_ashmem = !generate_debug_info;
+
// With 'perf', we want a 1-1 mapping between an address and a method.
bool garbage_collect_code = !generate_debug_info;
+ // We only use two mappings (separating rw from rx) if we are able to use ashmem.
+ // See the above comment for debug information and not using ashmem.
+ bool use_two_mappings = !generate_debug_info;
+
// We need to have 32 bit offsets from method headers in code cache which point to things
// in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
// Ensure we're below 1 GB to be safe.
@@ -109,30 +146,114 @@
initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
max_capacity = RoundDown(max_capacity, 2 * kPageSize);
- // Data cache is 1 / 2 of the map.
- // TODO: Make this variable?
- size_t data_size = max_capacity / 2;
- size_t code_size = max_capacity - data_size;
- DCHECK_EQ(code_size + data_size, max_capacity);
- uint8_t* divider = data_map->Begin() + data_size;
+ // Create a region for JIT data and executable code. This will be
+ // laid out as:
+ //
+ // +----------------+ --------------------
+ // : : ^ ^
+ // : post_code_map : | post_code_size |
+ // : [padding] : v |
+ // +----------------+ - |
+ // | | ^ |
+ // | code_map | | code_size |
+ // | [JIT Code] | v |
+ // +----------------+ - | total_mapping_size
+ // : : ^ |
+ // : pre_code_map : | pre_code_size |
+ // : [padding] : v |
+ // +----------------+ - |
+ // | | ^ |
+ // | data_map | | data_size |
+ // | [Jit Data] | v v
+ // +----------------+ --------------------
+ //
+ // The padding regions - pre_code_map and post_code_map - exist to
+ // put some random distance between the writable JIT code mapping
+ // and the executable mapping. The padding is discarded at the end
+ // of this function.
+ size_t total_mapping_size = kMaxMapSpacingPages * kPageSize;
+ size_t data_size = RoundUp((max_capacity - total_mapping_size) / 2, kPageSize);
+ size_t pre_code_size =
+ GetRandomNumber(kMinMapSpacingPages, kMaxMapSpacingPages) * kPageSize;
+ size_t code_size = max_capacity - total_mapping_size - data_size;
+ size_t post_code_size = total_mapping_size - pre_code_size;
+ DCHECK_EQ(code_size + data_size + total_mapping_size, max_capacity);
- MemMap* code_map =
- data_map->RemapAtEnd(divider, "jit-code-cache", kProtAll, &error_str, use_ashmem);
- if (code_map == nullptr) {
- std::ostringstream oss;
- oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
- *error_msg = oss.str();
+ // Create pre-code padding region after data region, discarded after
+ // code and data regions are set-up.
+ std::unique_ptr<MemMap> pre_code_map(SplitMemMap(data_map.get(),
+ "jit-code-cache-padding",
+ data_size,
+ kProtNone,
+ error_msg,
+ use_ashmem));
+ if (pre_code_map == nullptr) {
return nullptr;
}
- DCHECK_EQ(code_map->Begin(), divider);
+ DCHECK_EQ(data_map->Size(), data_size);
+ DCHECK_EQ(pre_code_map->Size(), pre_code_size + code_size + post_code_size);
+
+ // Create code region.
+ unique_fd writable_code_fd;
+ std::unique_ptr<MemMap> code_map(SplitMemMap(pre_code_map.get(),
+ "jit-code-cache",
+ pre_code_size,
+ use_two_mappings ? kProtCode : kProtAll,
+ error_msg,
+ use_ashmem,
+ &writable_code_fd));
+ if (code_map == nullptr) {
+ return nullptr;
+ }
+ DCHECK_EQ(pre_code_map->Size(), pre_code_size);
+ DCHECK_EQ(code_map->Size(), code_size + post_code_size);
+
+ // Padding after code region, discarded after code and data regions
+ // are set-up.
+ std::unique_ptr<MemMap> post_code_map(SplitMemMap(code_map.get(),
+ "jit-code-cache-padding",
+ code_size,
+ kProtNone,
+ error_msg,
+ use_ashmem));
+ if (post_code_map == nullptr) {
+ return nullptr;
+ }
+ DCHECK_EQ(code_map->Size(), code_size);
+ DCHECK_EQ(post_code_map->Size(), post_code_size);
+
+ std::unique_ptr<MemMap> writable_code_map;
+ if (use_two_mappings) {
+ // Allocate the R/W view.
+ writable_code_map.reset(MemMap::MapFile(code_size,
+ kProtData,
+ MAP_SHARED,
+ writable_code_fd.get(),
+ /* start */ 0,
+ /* low_4gb */ true,
+ "jit-writable-code",
+ &error_str));
+ if (writable_code_map == nullptr) {
+ std::ostringstream oss;
+ oss << "Failed to create writable code cache: " << error_str << " size=" << code_size;
+ *error_msg = oss.str();
+ return nullptr;
+ }
+ }
data_size = initial_capacity / 2;
code_size = initial_capacity - data_size;
DCHECK_EQ(code_size + data_size, initial_capacity);
- return new JitCodeCache(
- code_map, data_map.release(), code_size, data_size, max_capacity, garbage_collect_code);
+ return new JitCodeCache(writable_code_map.release(),
+ code_map.release(),
+ data_map.release(),
+ code_size,
+ data_size,
+ max_capacity,
+ garbage_collect_code);
}
-JitCodeCache::JitCodeCache(MemMap* code_map,
+JitCodeCache::JitCodeCache(MemMap* writable_code_map,
+ MemMap* executable_code_map,
MemMap* data_map,
size_t initial_code_capacity,
size_t initial_data_capacity,
@@ -141,8 +262,9 @@
: lock_("Jit code cache", kJitCodeCacheLock),
lock_cond_("Jit code cache condition variable", lock_),
collection_in_progress_(false),
- code_map_(code_map),
data_map_(data_map),
+ executable_code_map_(executable_code_map),
+ writable_code_map_(writable_code_map),
max_capacity_(max_capacity),
current_capacity_(initial_code_capacity + initial_data_capacity),
code_end_(initial_code_capacity),
@@ -162,7 +284,8 @@
inline_cache_cond_("Jit inline cache condition variable", lock_) {
DCHECK_GE(max_capacity, initial_code_capacity + initial_data_capacity);
- code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_end_, false /*locked*/);
+ MemMap* writable_map = GetWritableMemMap();
+ code_mspace_ = create_mspace_with_base(writable_map->Begin(), code_end_, false /*locked*/);
data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_end_, false /*locked*/);
if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
@@ -171,7 +294,10 @@
SetFootprintLimit(current_capacity_);
- CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
+ if (writable_code_map_ != nullptr) {
+ CHECKED_MPROTECT(writable_code_map_->Begin(), writable_code_map_->Size(), kProtReadOnly);
+ }
+ CHECKED_MPROTECT(executable_code_map_->Begin(), executable_code_map_->Size(), kProtCode);
CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData);
VLOG(jit) << "Created jit code cache: initial data size="
@@ -181,7 +307,7 @@
}
bool JitCodeCache::ContainsPc(const void* ptr) const {
- return code_map_->Begin() <= ptr && ptr < code_map_->End();
+ return executable_code_map_->Begin() <= ptr && ptr < executable_code_map_->End();
}
bool JitCodeCache::ContainsMethod(ArtMethod* method) {
@@ -194,27 +320,96 @@
return false;
}
+/* This method is only for CHECK/DCHECK that pointers are within to a region. */
+static bool IsAddressInMap(const void* addr,
+ const MemMap* mem_map,
+ const char* check_name) {
+ if (addr == nullptr || mem_map->HasAddress(addr)) {
+ return true;
+ }
+ LOG(ERROR) << "Is" << check_name << "Address " << addr
+ << " not in [" << reinterpret_cast<void*>(mem_map->Begin())
+ << ", " << reinterpret_cast<void*>(mem_map->Begin() + mem_map->Size()) << ")";
+ return false;
+}
+
+bool JitCodeCache::IsDataAddress(const void* raw_addr) const {
+ return IsAddressInMap(raw_addr, data_map_.get(), "Data");
+}
+
+bool JitCodeCache::IsExecutableAddress(const void* raw_addr) const {
+ return IsAddressInMap(raw_addr, executable_code_map_.get(), "Executable");
+}
+
+bool JitCodeCache::IsWritableAddress(const void* raw_addr) const {
+ return IsAddressInMap(raw_addr, GetWritableMemMap(), "Writable");
+}
+
+// Convert one address within the source map to the same offset within the destination map.
+static void* ConvertAddress(const void* source_address,
+ const MemMap* source_map,
+ const MemMap* destination_map) {
+ DCHECK(source_map->HasAddress(source_address)) << source_address;
+ ptrdiff_t offset = reinterpret_cast<const uint8_t*>(source_address) - source_map->Begin();
+ uintptr_t address = reinterpret_cast<uintptr_t>(destination_map->Begin()) + offset;
+ return reinterpret_cast<void*>(address);
+}
+
+template <typename T>
+T* JitCodeCache::ToExecutableAddress(T* writable_address) const {
+ CHECK(IsWritableAddress(writable_address));
+ if (writable_address == nullptr) {
+ return nullptr;
+ }
+ void* executable_address = ConvertAddress(writable_address,
+ GetWritableMemMap(),
+ executable_code_map_.get());
+ CHECK(IsExecutableAddress(executable_address));
+ return reinterpret_cast<T*>(executable_address);
+}
+
+void* JitCodeCache::ToWritableAddress(const void* executable_address) const {
+ CHECK(IsExecutableAddress(executable_address));
+ if (executable_address == nullptr) {
+ return nullptr;
+ }
+ void* writable_address = ConvertAddress(executable_address,
+ executable_code_map_.get(),
+ GetWritableMemMap());
+ CHECK(IsWritableAddress(writable_address));
+ return writable_address;
+}
+
class ScopedCodeCacheWrite : ScopedTrace {
public:
- explicit ScopedCodeCacheWrite(MemMap* code_map, bool only_for_tlb_shootdown = false)
- : ScopedTrace("ScopedCodeCacheWrite"),
- code_map_(code_map),
- only_for_tlb_shootdown_(only_for_tlb_shootdown) {
+ explicit ScopedCodeCacheWrite(JitCodeCache* code_cache, bool only_for_tlb_shootdown = false)
+ : ScopedTrace("ScopedCodeCacheWrite") {
ScopedTrace trace("mprotect all");
- CHECKED_MPROTECT(
- code_map_->Begin(), only_for_tlb_shootdown_ ? kPageSize : code_map_->Size(), kProtAll);
+ int prot_to_start_writing = kProtAll;
+ if (code_cache->writable_code_map_ == nullptr) {
+ // If there is only one mapping, use the executable mapping and toggle between rwx and rx.
+ prot_to_start_writing = kProtAll;
+ prot_to_stop_writing_ = kProtCode;
+ } else {
+ // If there are two mappings, use the writable mapping and toggle between rw and r.
+ prot_to_start_writing = kProtData;
+ prot_to_stop_writing_ = kProtReadOnly;
+ }
+ writable_map_ = code_cache->GetWritableMemMap();
+ // If we're using ScopedCacheWrite only for TLB shootdown, we limit the scope of mprotect to
+ // one page.
+ size_ = only_for_tlb_shootdown ? kPageSize : writable_map_->Size();
+ CHECKED_MPROTECT(writable_map_->Begin(), size_, prot_to_start_writing);
}
~ScopedCodeCacheWrite() {
ScopedTrace trace("mprotect code");
- CHECKED_MPROTECT(
- code_map_->Begin(), only_for_tlb_shootdown_ ? kPageSize : code_map_->Size(), kProtCode);
+ CHECKED_MPROTECT(writable_map_->Begin(), size_, prot_to_stop_writing_);
}
- private:
- MemMap* const code_map_;
- // If we're using ScopedCacheWrite only for TLB shootdown, we limit the scope of mprotect to
- // one page.
- const bool only_for_tlb_shootdown_;
+ private:
+ int prot_to_stop_writing_;
+ MemMap* writable_map_;
+ size_t size_;
DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
};
@@ -324,8 +519,10 @@
}
}
-static uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = nullptr) {
+uint8_t* JitCodeCache::GetRootTable(const void* code_ptr, uint32_t* number_of_roots) {
+ CHECK(IsExecutableAddress(code_ptr));
OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ // GetOptimizedCodeInfoPtr uses offsets relative to the EXECUTABLE address.
uint8_t* data = method_header->GetOptimizedCodeInfoPtr();
uint32_t roots = GetNumberOfRoots(data);
if (number_of_roots != nullptr) {
@@ -370,6 +567,8 @@
void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) {
MutexLock mu(Thread::Current(), lock_);
for (const auto& entry : method_code_map_) {
+ // GetRootTable takes an EXECUTABLE address.
+ CHECK(IsExecutableAddress(entry.first));
uint32_t number_of_roots = 0;
uint8_t* roots_data = GetRootTable(entry.first, &number_of_roots);
GcRoot<mirror::Object>* roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
@@ -407,17 +606,19 @@
}
}
-void JitCodeCache::FreeCode(const void* code_ptr) {
- uintptr_t allocation = FromCodeToAllocation(code_ptr);
+void JitCodeCache::FreeCodeAndData(const void* code_ptr) {
+ CHECK(IsExecutableAddress(code_ptr));
// Notify native debugger that we are about to remove the code.
// It does nothing if we are not using native debugger.
DeleteJITCodeEntryForAddress(reinterpret_cast<uintptr_t>(code_ptr));
+ // GetRootTable takes an EXECUTABLE address.
FreeData(GetRootTable(code_ptr));
- FreeCode(reinterpret_cast<uint8_t*>(allocation));
+ FreeRawCode(reinterpret_cast<uint8_t*>(FromCodeToAllocation(code_ptr)));
}
void JitCodeCache::FreeAllMethodHeaders(
const std::unordered_set<OatQuickMethodHeader*>& method_headers) {
+ // method_headers are expected to be in the executable region.
{
MutexLock mu(Thread::Current(), *Locks::cha_lock_);
Runtime::Current()->GetClassHierarchyAnalysis()
@@ -429,9 +630,9 @@
// so it's possible for the same method_header to start representing
// different compile code.
MutexLock mu(Thread::Current(), lock_);
- ScopedCodeCacheWrite scc(code_map_.get());
+ ScopedCodeCacheWrite scc(this);
for (const OatQuickMethodHeader* method_header : method_headers) {
- FreeCode(method_header->GetCode());
+ FreeCodeAndData(method_header->GetCode());
}
}
@@ -448,9 +649,10 @@
// with the classlinker_classes_lock_ held, and suspending ourselves could
// lead to a deadlock.
{
- ScopedCodeCacheWrite scc(code_map_.get());
+ ScopedCodeCacheWrite scc(this);
for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
if (alloc.ContainsUnsafe(it->second)) {
+ CHECK(IsExecutableAddress(OatQuickMethodHeader::FromCodePointer(it->first)));
method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->first));
it = method_code_map_.erase(it);
} else {
@@ -572,16 +774,21 @@
MutexLock mu(self, lock_);
WaitForPotentialCollectionToComplete(self);
{
- ScopedCodeCacheWrite scc(code_map_.get());
+ ScopedCodeCacheWrite scc(this);
memory = AllocateCode(total_size);
if (memory == nullptr) {
return nullptr;
}
- code_ptr = memory + header_size;
+ uint8_t* writable_ptr = memory + header_size;
+ code_ptr = ToExecutableAddress(writable_ptr);
- std::copy(code, code + code_size, code_ptr);
- method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
- new (method_header) OatQuickMethodHeader(
+ std::copy(code, code + code_size, writable_ptr);
+ OatQuickMethodHeader* writable_method_header =
+ OatQuickMethodHeader::FromCodePointer(writable_ptr);
+ // We need to be able to write the OatQuickMethodHeader, so we use writable_method_header.
+ // Otherwise, the offsets encoded in OatQuickMethodHeader are used relative to an executable
+ // address, so we use code_ptr.
+ new (writable_method_header) OatQuickMethodHeader(
code_ptr - stack_map,
code_ptr - method_info,
frame_size_in_bytes,
@@ -597,10 +804,16 @@
// https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
FlushInstructionCache(reinterpret_cast<char*>(code_ptr),
reinterpret_cast<char*>(code_ptr + code_size));
+ if (writable_ptr != code_ptr) {
+ FlushDataCache(reinterpret_cast<char*>(writable_ptr),
+ reinterpret_cast<char*>(writable_ptr + code_size));
+ }
DCHECK(!Runtime::Current()->IsAotCompiler());
if (has_should_deoptimize_flag) {
- method_header->SetHasShouldDeoptimizeFlag();
+ writable_method_header->SetHasShouldDeoptimizeFlag();
}
+ // All the pointers exported from the cache are executable addresses.
+ method_header = ToExecutableAddress(writable_method_header);
}
number_of_compilations_++;
@@ -639,13 +852,14 @@
// but below we still make the compiled code valid for the method.
MutexLock mu(self, lock_);
// Fill the root table before updating the entry point.
+ CHECK(IsDataAddress(roots_data));
DCHECK_EQ(FromStackMapToRoots(stack_map), roots_data);
DCHECK_LE(roots_data, stack_map);
FillRootTable(roots_data, roots);
{
// Flush data cache, as compiled code references literals in it.
// We also need a TLB shootdown to act as memory barrier across cores.
- ScopedCodeCacheWrite ccw(code_map_.get(), /* only_for_tlb_shootdown */ true);
+ ScopedCodeCacheWrite ccw(this, /* only_for_tlb_shootdown */ true);
FlushDataCache(reinterpret_cast<char*>(roots_data),
reinterpret_cast<char*>(roots_data + data_size));
}
@@ -696,11 +910,11 @@
bool in_cache = false;
{
- ScopedCodeCacheWrite ccw(code_map_.get());
+ ScopedCodeCacheWrite ccw(this);
for (auto code_iter = method_code_map_.begin(); code_iter != method_code_map_.end();) {
if (code_iter->second == method) {
if (release_memory) {
- FreeCode(code_iter->first);
+ FreeCodeAndData(code_iter->first);
}
code_iter = method_code_map_.erase(code_iter);
in_cache = true;
@@ -754,10 +968,10 @@
profiling_infos_.erase(profile);
}
method->SetProfilingInfo(nullptr);
- ScopedCodeCacheWrite ccw(code_map_.get());
+ ScopedCodeCacheWrite ccw(this);
for (auto code_iter = method_code_map_.begin(); code_iter != method_code_map_.end();) {
if (code_iter->second == method) {
- FreeCode(code_iter->first);
+ FreeCodeAndData(code_iter->first);
code_iter = method_code_map_.erase(code_iter);
continue;
}
@@ -823,6 +1037,7 @@
uint8_t* stack_map_data,
uint8_t* roots_data) {
DCHECK_EQ(FromStackMapToRoots(stack_map_data), roots_data);
+ CHECK(IsDataAddress(roots_data));
MutexLock mu(self, lock_);
FreeData(reinterpret_cast<uint8_t*>(roots_data));
}
@@ -944,11 +1159,11 @@
void JitCodeCache::SetFootprintLimit(size_t new_footprint) {
size_t per_space_footprint = new_footprint / 2;
- DCHECK(IsAlignedParam(per_space_footprint, kPageSize));
+ CHECK(IsAlignedParam(per_space_footprint, kPageSize));
DCHECK_EQ(per_space_footprint * 2, new_footprint);
mspace_set_footprint_limit(data_mspace_, per_space_footprint);
{
- ScopedCodeCacheWrite scc(code_map_.get());
+ ScopedCodeCacheWrite scc(this);
mspace_set_footprint_limit(code_mspace_, per_space_footprint);
}
}
@@ -1026,8 +1241,8 @@
number_of_collections_++;
live_bitmap_.reset(CodeCacheBitmap::Create(
"code-cache-bitmap",
- reinterpret_cast<uintptr_t>(code_map_->Begin()),
- reinterpret_cast<uintptr_t>(code_map_->Begin() + current_capacity_ / 2)));
+ reinterpret_cast<uintptr_t>(executable_code_map_->Begin()),
+ reinterpret_cast<uintptr_t>(executable_code_map_->Begin() + current_capacity_ / 2)));
collection_in_progress_ = true;
}
}
@@ -1103,14 +1318,16 @@
std::unordered_set<OatQuickMethodHeader*> method_headers;
{
MutexLock mu(self, lock_);
- ScopedCodeCacheWrite scc(code_map_.get());
+ ScopedCodeCacheWrite scc(this);
// Iterate over all compiled code and remove entries that are not marked.
for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
const void* code_ptr = it->first;
+ CHECK(IsExecutableAddress(code_ptr));
uintptr_t allocation = FromCodeToAllocation(code_ptr);
if (GetLiveBitmap()->Test(allocation)) {
++it;
} else {
+ CHECK(IsExecutableAddress(it->first));
method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->first));
it = method_code_map_.erase(it);
}
@@ -1153,6 +1370,7 @@
for (const auto& it : method_code_map_) {
ArtMethod* method = it.second;
const void* code_ptr = it.first;
+ CHECK(IsExecutableAddress(code_ptr));
const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
@@ -1178,6 +1396,7 @@
// Free all profiling infos of methods not compiled nor being compiled.
auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(),
[this] (ProfilingInfo* info) NO_THREAD_SAFETY_ANALYSIS {
+ CHECK(IsDataAddress(info));
const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
// We have previously cleared the ProfilingInfo pointer in the ArtMethod in the hope
// that the compiled code would not get revived. As mutator threads run concurrently,
@@ -1238,6 +1457,7 @@
--it;
const void* code_ptr = it->first;
+ CHECK(IsExecutableAddress(code_ptr));
OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
if (!method_header->Contains(pc)) {
return nullptr;
@@ -1320,6 +1540,7 @@
// store in the ArtMethod's ProfilingInfo pointer.
QuasiAtomic::ThreadFenceRelease();
+ CHECK(IsDataAddress(info));
method->SetProfilingInfo(info);
profiling_infos_.push_back(info);
histogram_profiling_info_memory_use_.AddValue(profile_info_size);
@@ -1332,7 +1553,8 @@
if (code_mspace_ == mspace) {
size_t result = code_end_;
code_end_ += increment;
- return reinterpret_cast<void*>(result + code_map_->Begin());
+ MemMap* writable_map = GetWritableMemMap();
+ return reinterpret_cast<void*>(result + writable_map->Begin());
} else {
DCHECK_EQ(data_mspace_, mspace);
size_t result = data_end_;
@@ -1484,6 +1706,7 @@
size_t JitCodeCache::GetMemorySizeOfCodePointer(const void* ptr) {
MutexLock mu(Thread::Current(), lock_);
+ CHECK(IsExecutableAddress(ptr));
return mspace_usable_size(reinterpret_cast<const void*>(FromCodeToAllocation(ptr)));
}
@@ -1519,22 +1742,27 @@
size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
// Ensure the header ends up at expected instruction alignment.
DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(result + header_size), alignment);
+ CHECK(IsWritableAddress(result));
used_memory_for_code_ += mspace_usable_size(result);
return result;
}
-void JitCodeCache::FreeCode(uint8_t* code) {
- used_memory_for_code_ -= mspace_usable_size(code);
- mspace_free(code_mspace_, code);
+void JitCodeCache::FreeRawCode(void* code) {
+ CHECK(IsExecutableAddress(code));
+ void* writable_code = ToWritableAddress(code);
+ used_memory_for_code_ -= mspace_usable_size(writable_code);
+ mspace_free(code_mspace_, writable_code);
}
uint8_t* JitCodeCache::AllocateData(size_t data_size) {
void* result = mspace_malloc(data_mspace_, data_size);
+ CHECK(IsDataAddress(reinterpret_cast<uint8_t*>(result)));
used_memory_for_data_ += mspace_usable_size(result);
return reinterpret_cast<uint8_t*>(result);
}
void JitCodeCache::FreeData(uint8_t* data) {
+ CHECK(IsDataAddress(data));
used_memory_for_data_ -= mspace_usable_size(data);
mspace_free(data_mspace_, data);
}
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index daa1d61..a062ce4 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -229,6 +229,8 @@
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = nullptr);
+
// The GC needs to disallow the reading of inline caches when it processes them,
// to avoid having a class being used while it is being deleted.
void AllowInlineCacheAccess() REQUIRES(!lock_);
@@ -247,9 +249,12 @@
}
private:
+ friend class ScopedCodeCacheWrite;
+
// Take ownership of maps.
JitCodeCache(MemMap* code_map,
MemMap* data_map,
+ MemMap* writable_code_map,
size_t initial_code_capacity,
size_t initial_data_capacity,
size_t max_capacity,
@@ -292,7 +297,7 @@
REQUIRES(!Locks::cha_lock_);
// Free in the mspace allocations for `code_ptr`.
- void FreeCode(const void* code_ptr) REQUIRES(lock_);
+ void FreeCodeAndData(const void* code_ptr) REQUIRES(lock_);
// Number of bytes allocated in the code cache.
size_t CodeCacheSizeLocked() REQUIRES(lock_);
@@ -325,7 +330,7 @@
bool CheckLiveCompiledCodeHasProfilingInfo()
REQUIRES(lock_);
- void FreeCode(uint8_t* code) REQUIRES(lock_);
+ void FreeRawCode(void* code) REQUIRES(lock_);
uint8_t* AllocateCode(size_t code_size) REQUIRES(lock_);
void FreeData(uint8_t* data) REQUIRES(lock_);
uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
@@ -335,25 +340,58 @@
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ MemMap* GetWritableMemMap() const {
+ if (writable_code_map_ == nullptr) {
+ // The system required us to map the JIT Code Cache RWX (see
+ // JitCodeCache::Create()).
+ return executable_code_map_.get();
+ } else {
+ // Executable code is mapped RX, and writable code is mapped RW
+ // to the underlying same memory, but at a different address.
+ return writable_code_map_.get();
+ }
+ }
+
+ bool IsDataAddress(const void* raw_addr) const;
+
+ bool IsExecutableAddress(const void* raw_addr) const;
+
+ bool IsWritableAddress(const void* raw_addr) const;
+
+ template <typename T>
+ T* ToExecutableAddress(T* writable_address) const;
+
+ void* ToWritableAddress(const void* executable_address) const;
+
// Lock for guarding allocations, collections, and the method_code_map_.
Mutex lock_;
// Condition to wait on during collection.
ConditionVariable lock_cond_ GUARDED_BY(lock_);
// Whether there is a code cache collection in progress.
bool collection_in_progress_ GUARDED_BY(lock_);
- // Mem map which holds code.
- std::unique_ptr<MemMap> code_map_;
+ // JITting methods obviously requires both write and execute permissions on a region of memory.
+ // In tye typical (non-debugging) case, we separate the memory mapped view that can write the code
+ // from a view that the runtime uses to execute the code. Having these two views eliminates any
+ // single address region having rwx permissions. An attacker could still write the writable
+ // address and then execute the executable address. We allocate the mappings with a random
+ // address relationship to each other which makes the attacker need two addresses rather than
+ // just one. In the debugging case there is no file descriptor to back the
+ // shared memory, and hence we have to use a single mapping.
// Mem map which holds data (stack maps and profiling info).
std::unique_ptr<MemMap> data_map_;
+ // Mem map which holds a non-writable view of code for JIT.
+ std::unique_ptr<MemMap> executable_code_map_;
+ // Mem map which holds a non-executable view of code for JIT.
+ std::unique_ptr<MemMap> writable_code_map_;
// The opaque mspace for allocating code.
void* code_mspace_ GUARDED_BY(lock_);
// The opaque mspace for allocating data.
void* data_mspace_ GUARDED_BY(lock_);
// Bitmap for collecting code and data.
std::unique_ptr<CodeCacheBitmap> live_bitmap_;
- // Holds compiled code associated to the ArtMethod.
+ // Holds non-writable compiled code associated to the ArtMethod.
SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
- // Holds osr compiled code associated to the ArtMethod.
+ // Holds non-writable osr compiled code associated to the ArtMethod.
SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(lock_);
// ProfilingInfo objects we have allocated.
std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(lock_);
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 7b41608..17035dd 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -536,8 +536,13 @@
}
}
-MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
- std::string* error_msg, bool use_ashmem) {
+MemMap* MemMap::RemapAtEnd(uint8_t* new_end,
+ const char* tail_name,
+ int tail_prot,
+ int sharing_flags,
+ std::string* error_msg,
+ bool use_ashmem,
+ unique_fd* shmem_fd) {
use_ashmem = use_ashmem && !kIsTargetLinux;
DCHECK_GE(new_end, Begin());
DCHECK_LE(new_end, End());
@@ -563,14 +568,14 @@
DCHECK_ALIGNED(tail_base_size, kPageSize);
unique_fd fd;
- int flags = MAP_PRIVATE | MAP_ANONYMOUS;
+ int flags = MAP_ANONYMOUS | sharing_flags;
if (use_ashmem) {
// android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
// prefixed "dalvik-".
std::string debug_friendly_name("dalvik-");
debug_friendly_name += tail_name;
fd.reset(ashmem_create_region(debug_friendly_name.c_str(), tail_base_size));
- flags = MAP_PRIVATE | MAP_FIXED;
+ flags = MAP_FIXED | sharing_flags;
if (fd.get() == -1) {
*error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
tail_name, strerror(errno));
@@ -604,6 +609,9 @@
fd.get());
return nullptr;
}
+ if (shmem_fd != nullptr) {
+ shmem_fd->reset(fd.release());
+ }
return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
}
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 5603963..d8908ad 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -25,6 +25,7 @@
#include <string>
#include "android-base/thread_annotations.h"
+#include "android-base/unique_fd.h"
namespace art {
@@ -37,6 +38,8 @@
#define USE_ART_LOW_4G_ALLOCATOR 0
#endif
+using android::base::unique_fd;
+
#ifdef __linux__
static constexpr bool kMadviseZeroes = true;
#else
@@ -168,11 +171,14 @@
}
// Unmap the pages at end and remap them to create another memory map.
+ // sharing_flags should be either MAP_PRIVATE or MAP_SHARED.
MemMap* RemapAtEnd(uint8_t* new_end,
const char* tail_name,
int tail_prot,
+ int sharing_flags,
std::string* error_msg,
- bool use_ashmem = true);
+ bool use_ashmem = true,
+ unique_fd* shmem_fd = nullptr);
static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
REQUIRES(!MemMap::mem_maps_lock_);
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index 5f027b1..8d6bb38 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -74,6 +74,7 @@
MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
"MemMapTest_RemapAtEndTest_map1",
PROT_READ | PROT_WRITE,
+ MAP_PRIVATE,
&error_msg);
// Check the states of the two maps.
EXPECT_EQ(m0->Begin(), base0) << error_msg;
@@ -456,6 +457,7 @@
std::unique_ptr<MemMap> m1(m0->RemapAtEnd(base0 + 3 * page_size,
"MemMapTest_AlignByTest_map1",
PROT_READ | PROT_WRITE,
+ MAP_PRIVATE,
&error_msg));
uint8_t* base1 = m1->Begin();
ASSERT_TRUE(base1 != nullptr) << error_msg;
@@ -465,6 +467,7 @@
std::unique_ptr<MemMap> m2(m1->RemapAtEnd(base1 + 4 * page_size,
"MemMapTest_AlignByTest_map2",
PROT_READ | PROT_WRITE,
+ MAP_PRIVATE,
&error_msg));
uint8_t* base2 = m2->Begin();
ASSERT_TRUE(base2 != nullptr) << error_msg;
@@ -474,6 +477,7 @@
std::unique_ptr<MemMap> m3(m2->RemapAtEnd(base2 + 3 * page_size,
"MemMapTest_AlignByTest_map1",
PROT_READ | PROT_WRITE,
+ MAP_PRIVATE,
&error_msg));
uint8_t* base3 = m3->Begin();
ASSERT_TRUE(base3 != nullptr) << error_msg;