Generalize Valgrind annotations in ART to support ASan.

Also add redzones around non-fixed mem_map(s).
Also extend -Wframe-larger-than limit to enable arm64 ASan build.

Change-Id: Ie572481a25fead59fc8978d2c317a33ac418516c
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index ee0cb09..83dd690 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -266,10 +266,10 @@
   # Larger frame-size for host clang builds today
   ifneq ($(ART_COVERAGE),true)
     ifneq ($(NATIVE_COVERAGE),true)
-      ifndef SANITIZE_HOST
-        art_host_non_debug_cflags += -Wframe-larger-than=2700
-      endif
-      ifndef SANITIZE_TARGET
+      art_host_non_debug_cflags += -Wframe-larger-than=2700
+      ifdef SANITIZE_TARGET
+        art_target_non_debug_cflags += -Wframe-larger-than=5450
+      else
         art_target_non_debug_cflags += -Wframe-larger-than=1728
       endif
     endif
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 9d45ce2..754fe84 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -18,7 +18,7 @@
 #define ART_COMPILER_IMAGE_WRITER_H_
 
 #include <stdint.h>
-#include <valgrind.h>
+#include "base/memory_tool.h"
 
 #include <cstddef>
 #include <memory>
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index a4e74d4..74ec2ed 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -18,7 +18,7 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <sys/stat.h>
-#include <valgrind.h>
+#include "base/memory_tool.h"
 
 #include <fstream>
 #include <iostream>
@@ -519,7 +519,7 @@
     // the runtime.
     LogCompletionTime();
 
-    if (kIsDebugBuild || (RUNNING_ON_VALGRIND != 0)) {
+    if (kIsDebugBuild || (RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
       delete runtime_;  // See field declaration for why this is manual.
     }
   }
@@ -2003,7 +2003,7 @@
   // Everything was done, do an explicit exit here to avoid running Runtime destructors that take
   // time (bug 10645725) unless we're a debug build or running on valgrind. Note: The Dex2Oat class
   // should not destruct the runtime in this case.
-  if (!art::kIsDebugBuild && (RUNNING_ON_VALGRIND == 0)) {
+  if (!art::kIsDebugBuild && (RUNNING_ON_MEMORY_TOOL == 0)) {
     exit(result);
   }
   return result;
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index 8f2d94b..e5832e1 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -23,11 +23,11 @@
 #include "mem_map.h"
 #include "mutex.h"
 #include "thread-inl.h"
-#include <memcheck/memcheck.h>
+#include "base/memory_tool.h"
 
 namespace art {
 
-static constexpr size_t kValgrindRedZoneBytes = 8;
+static constexpr size_t kMemoryToolRedZoneBytes = 8;
 constexpr size_t Arena::kDefaultSize;
 
 template <bool kCount>
@@ -217,9 +217,9 @@
 }
 
 void ArenaPool::FreeArenaChain(Arena* first) {
-  if (UNLIKELY(RUNNING_ON_VALGRIND > 0)) {
+  if (UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) {
     for (Arena* arena = first; arena != nullptr; arena = arena->next_) {
-      VALGRIND_MAKE_MEM_UNDEFINED(arena->memory_, arena->bytes_allocated_);
+      MEMORY_TOOL_MAKE_UNDEFINED(arena->memory_, arena->bytes_allocated_);
     }
   }
   if (first != nullptr) {
@@ -255,7 +255,7 @@
     end_(nullptr),
     ptr_(nullptr),
     arena_head_(nullptr),
-    running_on_valgrind_(RUNNING_ON_VALGRIND > 0) {
+    is_running_on_memory_tool_(RUNNING_ON_MEMORY_TOOL) {
 }
 
 void ArenaAllocator::UpdateBytesAllocated() {
@@ -267,7 +267,7 @@
 }
 
 void* ArenaAllocator::AllocValgrind(size_t bytes, ArenaAllocKind kind) {
-  size_t rounded_bytes = RoundUp(bytes + kValgrindRedZoneBytes, 8);
+  size_t rounded_bytes = RoundUp(bytes + kMemoryToolRedZoneBytes, 8);
   if (UNLIKELY(ptr_ + rounded_bytes > end_)) {
     // Obtain a new block.
     ObtainNewArenaForAllocation(rounded_bytes);
@@ -282,7 +282,7 @@
   for (uint8_t* ptr = ret; ptr < ptr_; ++ptr) {
     CHECK_EQ(*ptr, 0U);
   }
-  VALGRIND_MAKE_MEM_NOACCESS(ret + bytes, rounded_bytes - bytes);
+  MEMORY_TOOL_MAKE_NOACCESS(ret + bytes, rounded_bytes - bytes);
   return ret;
 }
 
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index d9723b5..d977941 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -207,7 +207,7 @@
 
   // Returns zeroed memory.
   void* Alloc(size_t bytes, ArenaAllocKind kind = kArenaAllocMisc) ALWAYS_INLINE {
-    if (UNLIKELY(running_on_valgrind_)) {
+    if (UNLIKELY(is_running_on_memory_tool_)) {
       return AllocValgrind(bytes, kind);
     }
     bytes = RoundUp(bytes, kAlignment);
@@ -280,7 +280,7 @@
   uint8_t* end_;
   uint8_t* ptr_;
   Arena* arena_head_;
-  bool running_on_valgrind_;
+  bool is_running_on_memory_tool_;
 
   template <typename U>
   friend class ArenaAllocatorAdapter;
diff --git a/runtime/base/memory_tool.h b/runtime/base/memory_tool.h
new file mode 100644
index 0000000..36469b8
--- /dev/null
+++ b/runtime/base/memory_tool.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_MEMORY_TOOL_H_
+#define ART_RUNTIME_BASE_MEMORY_TOOL_H_
+
+#include <stddef.h>
+
+#if !defined(__has_feature)
+#define __has_feature(x) 0
+#endif
+
+#if __has_feature(address_sanitizer)
+
+#include <sanitizer/asan_interface.h>
+#define ADDRESS_SANITIZER
+#define MEMORY_TOOL_MAKE_NOACCESS(p, s) __asan_poison_memory_region(p, s)
+#define MEMORY_TOOL_MAKE_UNDEFINED(p, s) __asan_unpoison_memory_region(p, s)
+#define MEMORY_TOOL_MAKE_DEFINED(p, s) __asan_unpoison_memory_region(p, s)
+#define RUNNING_ON_MEMORY_TOOL 1U
+constexpr bool kMemoryToolIsValgrind = false;
+constexpr bool kMemoryToolDetectsLeaks = true;
+constexpr bool kMemoryToolAddsRedzones = true;
+constexpr size_t kMemoryToolStackGuardSizeScale = 2;
+
+#else
+
+#include <valgrind.h>
+#include <memcheck/memcheck.h>
+#define MEMORY_TOOL_MAKE_NOACCESS(p, s) VALGRIND_MAKE_MEM_NOACCESS(p, s)
+#define MEMORY_TOOL_MAKE_UNDEFINED(p, s) VALGRIND_MAKE_MEM_UNDEFINED(p, s)
+#define MEMORY_TOOL_MAKE_DEFINED(p, s) VALGRIND_MAKE_MEM_DEFINED(p, s)
+#define RUNNING_ON_MEMORY_TOOL RUNNING_ON_VALGRIND
+constexpr bool kMemoryToolIsValgrind = true;
+constexpr bool kMemoryToolDetectsLeaks = true;
+constexpr bool kMemoryToolAddsRedzones = true;
+constexpr size_t kMemoryToolStackGuardSizeScale = 1;
+
+#endif
+
+#endif  // ART_RUNTIME_BASE_MEMORY_TOOL_H_
diff --git a/runtime/base/scoped_arena_allocator.cc b/runtime/base/scoped_arena_allocator.cc
index 4a7be38..d823edd 100644
--- a/runtime/base/scoped_arena_allocator.cc
+++ b/runtime/base/scoped_arena_allocator.cc
@@ -17,11 +17,11 @@
 #include "scoped_arena_allocator.h"
 
 #include "arena_allocator.h"
-#include <memcheck/memcheck.h>
+#include "base/memory_tool.h"
 
 namespace art {
 
-static constexpr size_t kValgrindRedZoneBytes = 8;
+static constexpr size_t kMemoryToolRedZoneBytes = 8;
 
 ArenaStack::ArenaStack(ArenaPool* arena_pool)
   : DebugStackRefCounter(),
@@ -30,7 +30,7 @@
     top_arena_(nullptr),
     top_ptr_(nullptr),
     top_end_(nullptr),
-    running_on_valgrind_(RUNNING_ON_VALGRIND > 0) {
+    is_running_on_memory_tool_(RUNNING_ON_MEMORY_TOOL > 0) {
 }
 
 ArenaStack::~ArenaStack() {
@@ -92,7 +92,7 @@
 }
 
 void* ArenaStack::AllocValgrind(size_t bytes, ArenaAllocKind kind) {
-  size_t rounded_bytes = RoundUp(bytes + kValgrindRedZoneBytes, 8);
+  size_t rounded_bytes = RoundUp(bytes + kMemoryToolRedZoneBytes, 8);
   uint8_t* ptr = top_ptr_;
   if (UNLIKELY(static_cast<size_t>(top_end_ - ptr) < rounded_bytes)) {
     ptr = AllocateFromNextArena(rounded_bytes);
@@ -100,8 +100,8 @@
   }
   CurrentStats()->RecordAlloc(bytes, kind);
   top_ptr_ = ptr + rounded_bytes;
-  VALGRIND_MAKE_MEM_UNDEFINED(ptr, bytes);
-  VALGRIND_MAKE_MEM_NOACCESS(ptr + bytes, rounded_bytes - bytes);
+  MEMORY_TOOL_MAKE_UNDEFINED(ptr, bytes);
+  MEMORY_TOOL_MAKE_NOACCESS(ptr + bytes, rounded_bytes - bytes);
   return ptr;
 }
 
diff --git a/runtime/base/scoped_arena_allocator.h b/runtime/base/scoped_arena_allocator.h
index bbedeac..ca514e4 100644
--- a/runtime/base/scoped_arena_allocator.h
+++ b/runtime/base/scoped_arena_allocator.h
@@ -64,7 +64,7 @@
 
   // Private - access via ScopedArenaAllocator or ScopedArenaAllocatorAdapter.
   void* Alloc(size_t bytes, ArenaAllocKind kind) ALWAYS_INLINE {
-    if (UNLIKELY(running_on_valgrind_)) {
+    if (UNLIKELY(is_running_on_memory_tool_)) {
       return AllocValgrind(bytes, kind);
     }
     size_t rounded_bytes = RoundUp(bytes, 8);
@@ -88,7 +88,7 @@
   uint8_t* top_ptr_;
   uint8_t* top_end_;
 
-  const bool running_on_valgrind_;
+  const bool is_running_on_memory_tool_;
 
   friend class ScopedArenaAllocator;
   template <typename T>
diff --git a/runtime/gc/allocator/rosalloc-inl.h b/runtime/gc/allocator/rosalloc-inl.h
index bba92a1..25fdd7c 100644
--- a/runtime/gc/allocator/rosalloc-inl.h
+++ b/runtime/gc/allocator/rosalloc-inl.h
@@ -24,7 +24,7 @@
 namespace allocator {
 
 inline ALWAYS_INLINE bool RosAlloc::ShouldCheckZeroMemory() {
-  return kCheckZeroMemory && !running_on_valgrind_;
+  return kCheckZeroMemory && !is_running_on_memory_tool_;
 }
 
 template<bool kThreadSafe>
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 49c7fda..bd10f7b 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -16,8 +16,9 @@
 
 #include "rosalloc.h"
 
+#include "base/memory_tool.h"
 #include "base/mutex-inl.h"
-#include "gc/space/valgrind_settings.h"
+#include "gc/space/memory_tool_settings.h"
 #include "mem_map.h"
 #include "mirror/class-inl.h"
 #include "mirror/object.h"
@@ -50,7 +51,7 @@
     reinterpret_cast<RosAlloc::Run*>(dedicated_full_run_storage_);
 
 RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
-                   PageReleaseMode page_release_mode, bool running_on_valgrind,
+                   PageReleaseMode page_release_mode, bool running_on_memory_tool,
                    size_t page_release_size_threshold)
     : base_(reinterpret_cast<uint8_t*>(base)), footprint_(capacity),
       capacity_(capacity), max_capacity_(max_capacity),
@@ -58,7 +59,7 @@
       bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
       page_release_mode_(page_release_mode),
       page_release_size_threshold_(page_release_size_threshold),
-      running_on_valgrind_(running_on_valgrind) {
+      is_running_on_memory_tool_(running_on_memory_tool) {
   DCHECK_EQ(RoundUp(capacity, kPageSize), capacity);
   DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity);
   CHECK_LE(capacity, max_capacity);
@@ -110,6 +111,9 @@
   for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
     delete size_bracket_locks_[i];
   }
+  if (is_running_on_memory_tool_) {
+    MEMORY_TOOL_MAKE_DEFINED(base_, capacity_);
+  }
 }
 
 void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) {
@@ -1897,8 +1901,8 @@
     MutexLock lock_mu(self, lock_);
     size_t pm_end = page_map_size_;
     size_t i = 0;
-    size_t valgrind_modifier =  running_on_valgrind_ ?
-        2 * ::art::gc::space::kDefaultValgrindRedZoneBytes :  // Redzones before and after.
+    size_t memory_tool_modifier =  is_running_on_memory_tool_ ?
+        2 * ::art::gc::space::kDefaultMemoryToolRedZoneBytes :  // Redzones before and after.
         0;
     while (i < pm_end) {
       uint8_t pm = page_map_[i];
@@ -1938,15 +1942,15 @@
             idx++;
           }
           uint8_t* start = base_ + i * kPageSize;
-          if (running_on_valgrind_) {
-            start += ::art::gc::space::kDefaultValgrindRedZoneBytes;
+          if (is_running_on_memory_tool_) {
+            start += ::art::gc::space::kDefaultMemoryToolRedZoneBytes;
           }
           mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
           size_t obj_size = obj->SizeOf();
-          CHECK_GT(obj_size + valgrind_modifier, kLargeSizeThreshold)
+          CHECK_GT(obj_size + memory_tool_modifier, kLargeSizeThreshold)
               << "A rosalloc large object size must be > " << kLargeSizeThreshold;
-          CHECK_EQ(num_pages, RoundUp(obj_size + valgrind_modifier, kPageSize) / kPageSize)
-              << "A rosalloc large object size " << obj_size + valgrind_modifier
+          CHECK_EQ(num_pages, RoundUp(obj_size + memory_tool_modifier, kPageSize) / kPageSize)
+              << "A rosalloc large object size " << obj_size + memory_tool_modifier
               << " does not match the page map table " << (num_pages * kPageSize)
               << std::endl << DumpPageMap();
           i += num_pages;
@@ -2011,11 +2015,11 @@
   }
   // Call Verify() here for the lock order.
   for (auto& run : runs) {
-    run->Verify(self, this, running_on_valgrind_);
+    run->Verify(self, this, is_running_on_memory_tool_);
   }
 }
 
-void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc, bool running_on_valgrind) {
+void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc, bool running_on_memory_tool) {
   DCHECK_EQ(magic_num_, kMagicNum) << "Bad magic number : " << Dump();
   const size_t idx = size_bracket_idx_;
   CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
@@ -2098,8 +2102,8 @@
   }
   // Check each slot.
   size_t slots = 0;
-  size_t valgrind_modifier = running_on_valgrind ?
-      2 * ::art::gc::space::kDefaultValgrindRedZoneBytes :
+  size_t memory_tool_modifier = running_on_memory_tool ?
+      2 * ::art::gc::space::kDefaultMemoryToolRedZoneBytes :
       0U;
   for (size_t v = 0; v < num_vec; v++, slots += 32) {
     DCHECK_GE(num_slots, slots) << "Out of bounds";
@@ -2113,16 +2117,16 @@
       bool is_thread_local_freed = IsThreadLocal() && ((thread_local_free_vec >> i) & 0x1) != 0;
       if (is_allocated && !is_thread_local_freed) {
         uint8_t* slot_addr = slot_base + (slots + i) * bracket_size;
-        if (running_on_valgrind) {
-          slot_addr += ::art::gc::space::kDefaultValgrindRedZoneBytes;
+        if (running_on_memory_tool) {
+          slot_addr += ::art::gc::space::kDefaultMemoryToolRedZoneBytes;
         }
         mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
         size_t obj_size = obj->SizeOf();
-        CHECK_LE(obj_size + valgrind_modifier, kLargeSizeThreshold)
+        CHECK_LE(obj_size + memory_tool_modifier, kLargeSizeThreshold)
             << "A run slot contains a large object " << Dump();
-        CHECK_EQ(SizeToIndex(obj_size + valgrind_modifier), idx)
+        CHECK_EQ(SizeToIndex(obj_size + memory_tool_modifier), idx)
             << PrettyTypeOf(obj) << " "
-            << "obj_size=" << obj_size << "(" << obj_size + valgrind_modifier << "), idx=" << idx
+            << "obj_size=" << obj_size << "(" << obj_size + memory_tool_modifier << "), idx=" << idx
             << " A run slot contains an object with wrong size " << Dump();
       }
     }
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 0fcfe72..c356a39 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -253,7 +253,7 @@
     // Dump the run metadata for debugging.
     std::string Dump();
     // Verify for debugging.
-    void Verify(Thread* self, RosAlloc* rosalloc, bool running_on_valgrind)
+    void Verify(Thread* self, RosAlloc* rosalloc, bool running_on_memory_tool)
         EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
         EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_);
 
@@ -503,7 +503,7 @@
   const size_t page_release_size_threshold_;
 
   // Whether this allocator is running under Valgrind.
-  bool running_on_valgrind_;
+  bool is_running_on_memory_tool_;
 
   // The base address of the memory region that's managed by this allocator.
   uint8_t* Begin() { return base_; }
@@ -561,7 +561,7 @@
  public:
   RosAlloc(void* base, size_t capacity, size_t max_capacity,
            PageReleaseMode page_release_mode,
-           bool running_on_valgrind,
+           bool running_on_memory_tool,
            size_t page_release_size_threshold = kDefaultPageReleaseSizeThreshold);
   ~RosAlloc();
 
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 2e66160..cb750eb 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -92,7 +92,7 @@
   } else if (!kInstrumented && allocator == kAllocatorTypeRosAlloc &&
              (obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) &&
              LIKELY(obj != nullptr)) {
-    DCHECK(!running_on_valgrind_);
+    DCHECK(!is_running_on_memory_tool_);
     obj->SetClass(klass);
     if (kUseBakerOrBrooksReadBarrier) {
       if (kUseBrooksReadBarrier) {
@@ -244,8 +244,8 @@
       break;
     }
     case kAllocatorTypeRosAlloc: {
-      if (kInstrumented && UNLIKELY(running_on_valgrind_)) {
-        // If running on valgrind, we should be using the instrumented path.
+      if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
+        // If running on valgrind or asan, we should be using the instrumented path.
         size_t max_bytes_tl_bulk_allocated = rosalloc_space_->MaxBytesBulkAllocatedFor(alloc_size);
         if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type,
                                                       max_bytes_tl_bulk_allocated))) {
@@ -254,7 +254,7 @@
         ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
                                      bytes_tl_bulk_allocated);
       } else {
-        DCHECK(!running_on_valgrind_);
+        DCHECK(!is_running_on_memory_tool_);
         size_t max_bytes_tl_bulk_allocated =
             rosalloc_space_->MaxBytesBulkAllocatedForNonvirtual(alloc_size);
         if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type,
@@ -270,12 +270,12 @@
       break;
     }
     case kAllocatorTypeDlMalloc: {
-      if (kInstrumented && UNLIKELY(running_on_valgrind_)) {
+      if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
         // If running on valgrind, we should be using the instrumented path.
         ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
                                      bytes_tl_bulk_allocated);
       } else {
-        DCHECK(!running_on_valgrind_);
+        DCHECK(!is_running_on_memory_tool_);
         ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size,
                                                bytes_tl_bulk_allocated);
       }
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 0ae9cdf..6317351 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -192,7 +192,7 @@
       total_allocation_time_(0),
       verify_object_mode_(kVerifyObjectModeDisabled),
       disable_moving_gc_count_(0),
-      running_on_valgrind_(Runtime::Current()->RunningOnValgrind()),
+      is_running_on_memory_tool_(Runtime::Current()->IsRunningOnMemoryTool()),
       use_tlab_(use_tlab),
       main_space_backup_(nullptr),
       min_interval_homogeneous_space_compaction_by_oom_(
@@ -518,7 +518,7 @@
   if (gc_stress_mode_) {
     backtrace_lock_ = new Mutex("GC complete lock");
   }
-  if (running_on_valgrind_ || gc_stress_mode_) {
+  if (is_running_on_memory_tool_ || gc_stress_mode_) {
     instrumentation->InstrumentQuickAllocEntryPoints();
   }
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
@@ -2077,9 +2077,12 @@
 // Special compacting collector which uses sub-optimal bin packing to reduce zygote space size.
 class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
  public:
-  explicit ZygoteCompactingCollector(gc::Heap* heap) : SemiSpace(heap, false, "zygote collector"),
-      bin_live_bitmap_(nullptr), bin_mark_bitmap_(nullptr) {
-  }
+  explicit ZygoteCompactingCollector(gc::Heap* heap,
+                                     bool is_running_on_memory_tool)
+      : SemiSpace(heap, false, "zygote collector"),
+        bin_live_bitmap_(nullptr),
+        bin_mark_bitmap_(nullptr),
+        is_running_on_memory_tool_(is_running_on_memory_tool) {}
 
   void BuildBins(space::ContinuousSpace* space) {
     bin_live_bitmap_ = space->GetLiveBitmap();
@@ -2105,6 +2108,7 @@
   accounting::ContinuousSpaceBitmap* bin_live_bitmap_;
   // Mark bitmap of the space which contains the bins.
   accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
+  const bool is_running_on_memory_tool_;
 
   static void Callback(mirror::Object* obj, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -2119,6 +2123,9 @@
   }
 
   void AddBin(size_t size, uintptr_t position) {
+    if (is_running_on_memory_tool_) {
+      MEMORY_TOOL_MAKE_DEFINED(reinterpret_cast<void*>(position), size);
+    }
     if (size != 0) {
       bins_.insert(std::make_pair(size, position));
     }
@@ -2212,7 +2219,7 @@
     // Temporarily disable rosalloc verification because the zygote
     // compaction will mess up the rosalloc internal metadata.
     ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
-    ZygoteCompactingCollector zygote_collector(this);
+    ZygoteCompactingCollector zygote_collector(this, is_running_on_memory_tool_);
     zygote_collector.BuildBins(non_moving_space_);
     // Create a new bump pointer space which we will compact into.
     space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index d0040f2..2df5a4e 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -1173,7 +1173,7 @@
   collector::MarkCompact* mark_compact_collector_;
   collector::ConcurrentCopying* concurrent_copying_collector_;
 
-  const bool running_on_valgrind_;
+  const bool is_running_on_memory_tool_;
   const bool use_tlab_;
 
   // Pointer to the space which becomes the new main space when we do homogeneous space compaction.
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 5237c7b..e1c5b64 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -20,13 +20,13 @@
 #include "gc/accounting/card_table.h"
 #include "gc/accounting/space_bitmap-inl.h"
 #include "gc/heap.h"
+#include "memory_tool_malloc_space-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
 #include "runtime.h"
 #include "thread.h"
 #include "thread_list.h"
 #include "utils.h"
-#include "valgrind_malloc_space-inl.h"
 
 namespace art {
 namespace gc {
@@ -62,8 +62,8 @@
 
   // Everything is set so record in immutable structure and leave
   uint8_t* begin = mem_map->Begin();
-  if (Runtime::Current()->RunningOnValgrind()) {
-    return new ValgrindMallocSpace<DlMallocSpace, kDefaultValgrindRedZoneBytes, true, false>(
+  if (Runtime::Current()->IsRunningOnMemoryTool()) {
+    return new MemoryToolMallocSpace<DlMallocSpace, kDefaultMemoryToolRedZoneBytes, true, false>(
         mem_map, initial_size, name, mspace, begin, end, begin + capacity, growth_limit,
         can_move_objects, starting_size);
   } else {
@@ -152,8 +152,8 @@
                                            void* allocator, uint8_t* begin, uint8_t* end,
                                            uint8_t* limit, size_t growth_limit,
                                            bool can_move_objects) {
-  if (Runtime::Current()->RunningOnValgrind()) {
-    return new ValgrindMallocSpace<DlMallocSpace, kDefaultValgrindRedZoneBytes, true, false>(
+  if (Runtime::Current()->IsRunningOnMemoryTool()) {
+    return new MemoryToolMallocSpace<DlMallocSpace, kDefaultMemoryToolRedZoneBytes, true, false>(
         mem_map, initial_size_, name, allocator, begin, end, limit, growth_limit,
         can_move_objects, starting_size_);
   } else {
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 1f80f1f..ab527a4 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -30,7 +30,7 @@
 namespace space {
 
 // An alloc space is a space where objects may be allocated and garbage collected. Not final as may
-// be overridden by a ValgrindMallocSpace.
+// be overridden by a MemoryToolMallocSpace.
 class DlMallocSpace : public MallocSpace {
  public:
   // Create a DlMallocSpace from an existing mem_map.
@@ -46,27 +46,27 @@
   static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
                                size_t capacity, uint8_t* requested_begin, bool can_move_objects);
 
-  // Virtual to allow ValgrindMallocSpace to intercept.
+  // Virtual to allow MemoryToolMallocSpace to intercept.
   virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                           size_t* usable_size,
                                           size_t* bytes_tl_bulk_allocated)
       OVERRIDE LOCKS_EXCLUDED(lock_);
-  // Virtual to allow ValgrindMallocSpace to intercept.
+  // Virtual to allow MemoryToolMallocSpace to intercept.
   virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
       OVERRIDE LOCKS_EXCLUDED(lock_) {
     return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
                            bytes_tl_bulk_allocated);
   }
-  // Virtual to allow ValgrindMallocSpace to intercept.
+  // Virtual to allow MemoryToolMallocSpace to intercept.
   virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
     return AllocationSizeNonvirtual(obj, usable_size);
   }
-  // Virtual to allow ValgrindMallocSpace to intercept.
+  // Virtual to allow MemoryToolMallocSpace to intercept.
   virtual size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
       LOCKS_EXCLUDED(lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  // Virtual to allow ValgrindMallocSpace to intercept.
+  // Virtual to allow MemoryToolMallocSpace to intercept.
   virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
       LOCKS_EXCLUDED(lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 2b567fe..a913e59 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -34,12 +34,12 @@
 namespace gc {
 namespace space {
 
-class ValgrindLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
+class MemoryToolLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
  public:
-  explicit ValgrindLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
+  explicit MemoryToolLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
   }
 
-  ~ValgrindLargeObjectMapSpace() OVERRIDE {
+  ~MemoryToolLargeObjectMapSpace() OVERRIDE {
     // Keep valgrind happy if there is any large objects such as dex cache arrays which aren't
     // freed since they are held live by the class linker.
     MutexLock mu(Thread::Current(), lock_);
@@ -52,13 +52,14 @@
                         size_t* usable_size, size_t* bytes_tl_bulk_allocated)
       OVERRIDE {
     mirror::Object* obj =
-        LargeObjectMapSpace::Alloc(self, num_bytes + kValgrindRedZoneBytes * 2, bytes_allocated,
+        LargeObjectMapSpace::Alloc(self, num_bytes + kMemoryToolRedZoneBytes * 2, bytes_allocated,
                                    usable_size, bytes_tl_bulk_allocated);
     mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>(
-        reinterpret_cast<uintptr_t>(obj) + kValgrindRedZoneBytes);
-    VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<void*>(obj), kValgrindRedZoneBytes);
-    VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<uint8_t*>(object_without_rdz) + num_bytes,
-                               kValgrindRedZoneBytes);
+        reinterpret_cast<uintptr_t>(obj) + kMemoryToolRedZoneBytes);
+    MEMORY_TOOL_MAKE_NOACCESS(reinterpret_cast<void*>(obj), kMemoryToolRedZoneBytes);
+    MEMORY_TOOL_MAKE_NOACCESS(
+        reinterpret_cast<uint8_t*>(object_without_rdz) + num_bytes,
+        kMemoryToolRedZoneBytes);
     if (usable_size != nullptr) {
       *usable_size = num_bytes;  // Since we have redzones, shrink the usable size.
     }
@@ -75,7 +76,7 @@
 
   size_t Free(Thread* self, mirror::Object* obj) OVERRIDE {
     mirror::Object* object_with_rdz = ObjectWithRedzone(obj);
-    VALGRIND_MAKE_MEM_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
+    MEMORY_TOOL_MAKE_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
     return LargeObjectMapSpace::Free(self, object_with_rdz);
   }
 
@@ -86,15 +87,15 @@
  private:
   static const mirror::Object* ObjectWithRedzone(const mirror::Object* obj) {
     return reinterpret_cast<const mirror::Object*>(
-        reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
+        reinterpret_cast<uintptr_t>(obj) - kMemoryToolRedZoneBytes);
   }
 
   static mirror::Object* ObjectWithRedzone(mirror::Object* obj) {
     return reinterpret_cast<mirror::Object*>(
-        reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
+        reinterpret_cast<uintptr_t>(obj) - kMemoryToolRedZoneBytes);
   }
 
-  static constexpr size_t kValgrindRedZoneBytes = kPageSize;
+  static constexpr size_t kMemoryToolRedZoneBytes = kPageSize;
 };
 
 void LargeObjectSpace::SwapBitmaps() {
@@ -121,8 +122,8 @@
       lock_("large object map space lock", kAllocSpaceLock) {}
 
 LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
-  if (Runtime::Current()->RunningOnValgrind()) {
-    return new ValgrindLargeObjectMapSpace(name);
+  if (Runtime::Current()->IsRunningOnMemoryTool()) {
+    return new MemoryToolLargeObjectMapSpace(name);
   } else {
     return new LargeObjectMapSpace(name);
   }
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index 9495864..6c689cd 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -20,6 +20,7 @@
 #include "space.h"
 
 #include <ostream>
+#include "base/memory_tool.h"
 
 namespace art {
 namespace gc {
diff --git a/runtime/gc/space/valgrind_malloc_space-inl.h b/runtime/gc/space/memory_tool_malloc_space-inl.h
similarity index 72%
rename from runtime/gc/space/valgrind_malloc_space-inl.h
rename to runtime/gc/space/memory_tool_malloc_space-inl.h
index bc329e1..ea8b8aa 100644
--- a/runtime/gc/space/valgrind_malloc_space-inl.h
+++ b/runtime/gc/space/memory_tool_malloc_space-inl.h
@@ -14,22 +14,20 @@
  * limitations under the License.
  */
 
-#ifndef ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_INL_H_
-#define ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_INL_H_
+#ifndef ART_RUNTIME_GC_SPACE_MEMORY_TOOL_MALLOC_SPACE_INL_H_
+#define ART_RUNTIME_GC_SPACE_MEMORY_TOOL_MALLOC_SPACE_INL_H_
 
-#include "valgrind_malloc_space.h"
-
-#include <memcheck/memcheck.h>
-
-#include "valgrind_settings.h"
+#include "base/memory_tool.h"
+#include "memory_tool_malloc_space.h"
+#include "memory_tool_settings.h"
 
 namespace art {
 namespace gc {
 namespace space {
 
-namespace valgrind_details {
+namespace memory_tool_details {
 
-template <size_t kValgrindRedZoneBytes, bool kUseObjSizeForUsable>
+template <size_t kMemoryToolRedZoneBytes, bool kUseObjSizeForUsable>
 inline mirror::Object* AdjustForValgrind(void* obj_with_rdz, size_t num_bytes,
                                          size_t bytes_allocated, size_t usable_size,
                                          size_t bytes_tl_bulk_allocated,
@@ -48,26 +46,26 @@
     if (kUseObjSizeForUsable) {
       *usable_size_out = num_bytes;
     } else {
-      *usable_size_out = usable_size - 2 * kValgrindRedZoneBytes;
+      *usable_size_out = usable_size - 2 * kMemoryToolRedZoneBytes;
     }
   }
 
   // Left redzone.
-  VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
+  MEMORY_TOOL_MAKE_NOACCESS(obj_with_rdz, kMemoryToolRedZoneBytes);
 
   // Make requested memory readable.
   // (If the allocator assumes memory is zeroed out, we might get UNDEFINED warnings, so make
   //  everything DEFINED initially.)
   mirror::Object* result = reinterpret_cast<mirror::Object*>(
-      reinterpret_cast<uint8_t*>(obj_with_rdz) + kValgrindRedZoneBytes);
-  VALGRIND_MAKE_MEM_DEFINED(result, num_bytes);
+      reinterpret_cast<uint8_t*>(obj_with_rdz) + kMemoryToolRedZoneBytes);
+  MEMORY_TOOL_MAKE_DEFINED(result, num_bytes);
 
   // Right redzone. Assumes that if bytes_allocated > usable_size, then the difference is
   // management data at the upper end, and for simplicity we will not protect that.
   // At the moment, this fits RosAlloc (no management data in a slot, usable_size == alloc_size)
   // and DlMalloc (allocation_size = (usable_size == num_bytes) + 4, 4 is management)
-  VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<uint8_t*>(result) + num_bytes,
-                             usable_size - (num_bytes + kValgrindRedZoneBytes));
+  MEMORY_TOOL_MAKE_NOACCESS(reinterpret_cast<uint8_t*>(result) + num_bytes,
+                    usable_size - (num_bytes + kMemoryToolRedZoneBytes));
 
   return result;
 }
@@ -76,15 +74,15 @@
   return obj->SizeOf<kVerifyNone>();
 }
 
-}  // namespace valgrind_details
+}  // namespace memory_tool_details
 
 template <typename S,
-          size_t kValgrindRedZoneBytes,
+          size_t kMemoryToolRedZoneBytes,
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
 mirror::Object*
-ValgrindMallocSpace<S,
-                    kValgrindRedZoneBytes,
+MemoryToolMallocSpace<S,
+                    kMemoryToolRedZoneBytes,
                     kAdjustForRedzoneInAllocSize,
                     kUseObjSizeForUsable>::AllocWithGrowth(
     Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out,
@@ -92,14 +90,14 @@
   size_t bytes_allocated;
   size_t usable_size;
   size_t bytes_tl_bulk_allocated;
-  void* obj_with_rdz = S::AllocWithGrowth(self, num_bytes + 2 * kValgrindRedZoneBytes,
+  void* obj_with_rdz = S::AllocWithGrowth(self, num_bytes + 2 * kMemoryToolRedZoneBytes,
                                           &bytes_allocated, &usable_size,
                                           &bytes_tl_bulk_allocated);
   if (obj_with_rdz == nullptr) {
     return nullptr;
   }
 
-  return valgrind_details::AdjustForValgrind<kValgrindRedZoneBytes, kUseObjSizeForUsable>(
+  return memory_tool_details::AdjustForValgrind<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>(
       obj_with_rdz, num_bytes,
       bytes_allocated, usable_size,
       bytes_tl_bulk_allocated,
@@ -109,11 +107,11 @@
 }
 
 template <typename S,
-          size_t kValgrindRedZoneBytes,
+          size_t kMemoryToolRedZoneBytes,
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
-mirror::Object* ValgrindMallocSpace<S,
-                                    kValgrindRedZoneBytes,
+mirror::Object* MemoryToolMallocSpace<S,
+                                    kMemoryToolRedZoneBytes,
                                     kAdjustForRedzoneInAllocSize,
                                     kUseObjSizeForUsable>::Alloc(
     Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out,
@@ -121,13 +119,13 @@
   size_t bytes_allocated;
   size_t usable_size;
   size_t bytes_tl_bulk_allocated;
-  void* obj_with_rdz = S::Alloc(self, num_bytes + 2 * kValgrindRedZoneBytes,
+  void* obj_with_rdz = S::Alloc(self, num_bytes + 2 * kMemoryToolRedZoneBytes,
                                 &bytes_allocated, &usable_size, &bytes_tl_bulk_allocated);
   if (obj_with_rdz == nullptr) {
     return nullptr;
   }
 
-  return valgrind_details::AdjustForValgrind<kValgrindRedZoneBytes,
+  return memory_tool_details::AdjustForValgrind<kMemoryToolRedZoneBytes,
                                              kUseObjSizeForUsable>(obj_with_rdz, num_bytes,
                                                                    bytes_allocated, usable_size,
                                                                    bytes_tl_bulk_allocated,
@@ -137,11 +135,11 @@
 }
 
 template <typename S,
-          size_t kValgrindRedZoneBytes,
+          size_t kMemoryToolRedZoneBytes,
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
-mirror::Object* ValgrindMallocSpace<S,
-                                    kValgrindRedZoneBytes,
+mirror::Object* MemoryToolMallocSpace<S,
+                                    kMemoryToolRedZoneBytes,
                                     kAdjustForRedzoneInAllocSize,
                                     kUseObjSizeForUsable>::AllocThreadUnsafe(
     Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out,
@@ -149,14 +147,14 @@
   size_t bytes_allocated;
   size_t usable_size;
   size_t bytes_tl_bulk_allocated;
-  void* obj_with_rdz = S::AllocThreadUnsafe(self, num_bytes + 2 * kValgrindRedZoneBytes,
+  void* obj_with_rdz = S::AllocThreadUnsafe(self, num_bytes + 2 * kMemoryToolRedZoneBytes,
                                             &bytes_allocated, &usable_size,
                                             &bytes_tl_bulk_allocated);
   if (obj_with_rdz == nullptr) {
     return nullptr;
   }
 
-  return valgrind_details::AdjustForValgrind<kValgrindRedZoneBytes, kUseObjSizeForUsable>(
+  return memory_tool_details::AdjustForValgrind<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>(
       obj_with_rdz, num_bytes,
       bytes_allocated, usable_size,
       bytes_tl_bulk_allocated,
@@ -166,38 +164,39 @@
 }
 
 template <typename S,
-          size_t kValgrindRedZoneBytes,
+          size_t kMemoryToolRedZoneBytes,
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
-size_t ValgrindMallocSpace<S,
-                           kValgrindRedZoneBytes,
+size_t MemoryToolMallocSpace<S,
+                           kMemoryToolRedZoneBytes,
                            kAdjustForRedzoneInAllocSize,
                            kUseObjSizeForUsable>::AllocationSize(
     mirror::Object* obj, size_t* usable_size) {
   size_t result = S::AllocationSize(reinterpret_cast<mirror::Object*>(
-      reinterpret_cast<uint8_t*>(obj) - (kAdjustForRedzoneInAllocSize ? kValgrindRedZoneBytes : 0)),
+      reinterpret_cast<uint8_t*>(obj) - (kAdjustForRedzoneInAllocSize ? kMemoryToolRedZoneBytes : 0)),
       usable_size);
   if (usable_size != nullptr) {
     if (kUseObjSizeForUsable) {
-      *usable_size = valgrind_details::GetObjSizeNoThreadSafety(obj);
+      *usable_size = memory_tool_details::GetObjSizeNoThreadSafety(obj);
     } else {
-      *usable_size = *usable_size - 2 * kValgrindRedZoneBytes;
+      *usable_size = *usable_size - 2 * kMemoryToolRedZoneBytes;
     }
   }
   return result;
 }
 
 template <typename S,
-          size_t kValgrindRedZoneBytes,
+          size_t kMemoryToolRedZoneBytes,
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
-size_t ValgrindMallocSpace<S,
-                           kValgrindRedZoneBytes,
+size_t MemoryToolMallocSpace<S,
+                           kMemoryToolRedZoneBytes,
                            kAdjustForRedzoneInAllocSize,
                            kUseObjSizeForUsable>::Free(
     Thread* self, mirror::Object* ptr) {
   void* obj_after_rdz = reinterpret_cast<void*>(ptr);
-  uint8_t* obj_with_rdz = reinterpret_cast<uint8_t*>(obj_after_rdz) - kValgrindRedZoneBytes;
+  uint8_t* obj_with_rdz = reinterpret_cast<uint8_t*>(obj_after_rdz) - kMemoryToolRedZoneBytes;
+
   // Make redzones undefined.
   size_t usable_size;
   size_t allocation_size = AllocationSize(ptr, &usable_size);
@@ -206,20 +205,20 @@
   // Use the obj-size-for-usable flag to determine whether usable_size is the more important one,
   // e.g., whether there's data in the allocation_size (and usable_size can't be trusted).
   if (kUseObjSizeForUsable) {
-    VALGRIND_MAKE_MEM_UNDEFINED(obj_with_rdz, allocation_size);
+    MEMORY_TOOL_MAKE_UNDEFINED(obj_with_rdz, allocation_size);
   } else {
-    VALGRIND_MAKE_MEM_UNDEFINED(obj_with_rdz, usable_size + 2 * kValgrindRedZoneBytes);
+    MEMORY_TOOL_MAKE_UNDEFINED(obj_with_rdz, usable_size + 2 * kMemoryToolRedZoneBytes);
   }
 
   return S::Free(self, reinterpret_cast<mirror::Object*>(obj_with_rdz));
 }
 
 template <typename S,
-          size_t kValgrindRedZoneBytes,
+          size_t kMemoryToolRedZoneBytes,
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
-size_t ValgrindMallocSpace<S,
-                           kValgrindRedZoneBytes,
+size_t MemoryToolMallocSpace<S,
+                           kMemoryToolRedZoneBytes,
                            kAdjustForRedzoneInAllocSize,
                            kUseObjSizeForUsable>::FreeList(
     Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
@@ -232,32 +231,33 @@
 }
 
 template <typename S,
-          size_t kValgrindRedZoneBytes,
+          size_t kMemoryToolRedZoneBytes,
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
 template <typename... Params>
-ValgrindMallocSpace<S,
-                    kValgrindRedZoneBytes,
+MemoryToolMallocSpace<S,
+                    kMemoryToolRedZoneBytes,
                     kAdjustForRedzoneInAllocSize,
-                    kUseObjSizeForUsable>::ValgrindMallocSpace(
+                    kUseObjSizeForUsable>::MemoryToolMallocSpace(
     MemMap* mem_map, size_t initial_size, Params... params) : S(mem_map, initial_size, params...) {
-  VALGRIND_MAKE_MEM_UNDEFINED(mem_map->Begin() + initial_size,
-                              mem_map->Size() - initial_size);
+  MEMORY_TOOL_MAKE_DEFINED(mem_map->Begin(), initial_size);
+  MEMORY_TOOL_MAKE_UNDEFINED(mem_map->Begin() + initial_size,
+                     mem_map->Size() - initial_size);
 }
 
 template <typename S,
-          size_t kValgrindRedZoneBytes,
+          size_t kMemoryToolRedZoneBytes,
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
-size_t ValgrindMallocSpace<S,
-                           kValgrindRedZoneBytes,
+size_t MemoryToolMallocSpace<S,
+                           kMemoryToolRedZoneBytes,
                            kAdjustForRedzoneInAllocSize,
                            kUseObjSizeForUsable>::MaxBytesBulkAllocatedFor(size_t num_bytes) {
-  return S::MaxBytesBulkAllocatedFor(num_bytes + 2 * kValgrindRedZoneBytes);
+  return S::MaxBytesBulkAllocatedFor(num_bytes + 2 * kMemoryToolRedZoneBytes);
 }
 
 }  // namespace space
 }  // namespace gc
 }  // namespace art
 
-#endif  // ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_INL_H_
+#endif  // ART_RUNTIME_GC_SPACE_MEMORY_TOOL_MALLOC_SPACE_INL_H_
diff --git a/runtime/gc/space/valgrind_malloc_space.h b/runtime/gc/space/memory_tool_malloc_space.h
similarity index 78%
rename from runtime/gc/space/valgrind_malloc_space.h
rename to runtime/gc/space/memory_tool_malloc_space.h
index a6b010a..64c6f35 100644
--- a/runtime/gc/space/valgrind_malloc_space.h
+++ b/runtime/gc/space/memory_tool_malloc_space.h
@@ -14,24 +14,22 @@
  * limitations under the License.
  */
 
-#ifndef ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_H_
-#define ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_H_
+#ifndef ART_RUNTIME_GC_SPACE_MEMORY_TOOL_MALLOC_SPACE_H_
+#define ART_RUNTIME_GC_SPACE_MEMORY_TOOL_MALLOC_SPACE_H_
 
 #include "malloc_space.h"
 
-#include <valgrind.h>
-
 namespace art {
 namespace gc {
 namespace space {
 
-// A specialization of DlMallocSpace/RosAllocSpace that places valgrind red zones around
-// allocations.
+// A specialization of DlMallocSpace/RosAllocSpace that places memory tool red
+// zones around allocations.
 template <typename BaseMallocSpaceType,
-          size_t kValgrindRedZoneBytes,
+          size_t kMemoryToolRedZoneBytes,
           bool kAdjustForRedzoneInAllocSize,
           bool kUseObjSizeForUsable>
-class ValgrindMallocSpace FINAL : public BaseMallocSpaceType {
+class MemoryToolMallocSpace FINAL : public BaseMallocSpaceType {
  public:
   mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                   size_t* usable_size, size_t* bytes_tl_bulk_allocated)
@@ -57,15 +55,15 @@
   size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE;
 
   template <typename... Params>
-  explicit ValgrindMallocSpace(MemMap* mem_map, size_t initial_size, Params... params);
-  virtual ~ValgrindMallocSpace() {}
+  explicit MemoryToolMallocSpace(MemMap* mem_map, size_t initial_size, Params... params);
+  virtual ~MemoryToolMallocSpace() {}
 
  private:
-  DISALLOW_COPY_AND_ASSIGN(ValgrindMallocSpace);
+  DISALLOW_COPY_AND_ASSIGN(MemoryToolMallocSpace);
 };
 
 }  // namespace space
 }  // namespace gc
 }  // namespace art
 
-#endif  // ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_H_
+#endif  // ART_RUNTIME_GC_SPACE_MEMORY_TOOL_MALLOC_SPACE_H_
diff --git a/runtime/gc/space/valgrind_settings.h b/runtime/gc/space/memory_tool_settings.h
similarity index 80%
rename from runtime/gc/space/valgrind_settings.h
rename to runtime/gc/space/memory_tool_settings.h
index 73da0fd..e9333c8 100644
--- a/runtime/gc/space/valgrind_settings.h
+++ b/runtime/gc/space/memory_tool_settings.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ART_RUNTIME_GC_SPACE_VALGRIND_SETTINGS_H_
-#define ART_RUNTIME_GC_SPACE_VALGRIND_SETTINGS_H_
+#ifndef ART_RUNTIME_GC_SPACE_MEMORY_TOOL_SETTINGS_H_
+#define ART_RUNTIME_GC_SPACE_MEMORY_TOOL_SETTINGS_H_
 
 namespace art {
 namespace gc {
@@ -23,10 +23,10 @@
 
 // Default number of bytes to use as a red zone (rdz). A red zone of this size will be placed before
 // and after each allocation. 8 bytes provides long/double alignment.
-static constexpr size_t kDefaultValgrindRedZoneBytes = 8;
+static constexpr size_t kDefaultMemoryToolRedZoneBytes = 8;
 
 }  // namespace space
 }  // namespace gc
 }  // namespace art
 
-#endif  // ART_RUNTIME_GC_SPACE_VALGRIND_SETTINGS_H_
+#endif  // ART_RUNTIME_GC_SPACE_MEMORY_TOOL_SETTINGS_H_
diff --git a/runtime/gc/space/rosalloc_space-inl.h b/runtime/gc/space/rosalloc_space-inl.h
index f94ec23..8bff2b4 100644
--- a/runtime/gc/space/rosalloc_space-inl.h
+++ b/runtime/gc/space/rosalloc_space-inl.h
@@ -17,10 +17,9 @@
 #ifndef ART_RUNTIME_GC_SPACE_ROSALLOC_SPACE_INL_H_
 #define ART_RUNTIME_GC_SPACE_ROSALLOC_SPACE_INL_H_
 
-#include <valgrind.h>
-
+#include "base/memory_tool.h"
 #include "gc/allocator/rosalloc-inl.h"
-#include "gc/space/valgrind_settings.h"
+#include "gc/space/memory_tool_settings.h"
 #include "rosalloc_space.h"
 #include "thread.h"
 
@@ -28,26 +27,26 @@
 namespace gc {
 namespace space {
 
-template<bool kMaybeRunningOnValgrind>
+template<bool kMaybeIsRunningOnMemoryTool>
 inline size_t RosAllocSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
   // obj is a valid object. Use its class in the header to get the size.
   // Don't use verification since the object may be dead if we are sweeping.
   size_t size = obj->SizeOf<kVerifyNone>();
-  bool running_on_valgrind = false;
-  if (kMaybeRunningOnValgrind) {
-    running_on_valgrind = RUNNING_ON_VALGRIND != 0;
-    if (running_on_valgrind) {
-      size += 2 * kDefaultValgrindRedZoneBytes;
+  bool add_redzones = false;
+  if (kMaybeIsRunningOnMemoryTool) {
+    add_redzones = RUNNING_ON_MEMORY_TOOL ? kMemoryToolAddsRedzones : 0;
+    if (add_redzones) {
+      size += 2 * kDefaultMemoryToolRedZoneBytes;
     }
   } else {
-    DCHECK_EQ(RUNNING_ON_VALGRIND, 0U);
+    DCHECK_EQ(RUNNING_ON_MEMORY_TOOL, 0U);
   }
   size_t size_by_size = rosalloc_->UsableSize(size);
   if (kIsDebugBuild) {
-    // On valgrind, the red zone has an impact...
+    // On memory tool, the red zone has an impact...
     const uint8_t* obj_ptr = reinterpret_cast<const uint8_t*>(obj);
     size_t size_by_ptr = rosalloc_->UsableSize(
-        obj_ptr - (running_on_valgrind ? kDefaultValgrindRedZoneBytes : 0));
+        obj_ptr - (add_redzones ? kDefaultMemoryToolRedZoneBytes : 0));
     if (size_by_size != size_by_ptr) {
       LOG(INFO) << "Found a bad sized obj of size " << size
                 << " at " << std::hex << reinterpret_cast<intptr_t>(obj_ptr) << std::dec
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index bc4414d..1a193c3 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -30,7 +30,7 @@
 #include "thread.h"
 #include "thread_list.h"
 #include "utils.h"
-#include "valgrind_malloc_space-inl.h"
+#include "memory_tool_malloc_space-inl.h"
 
 namespace art {
 namespace gc {
@@ -43,7 +43,7 @@
 static constexpr bool kVerifyFreedBytes = false;
 
 // TODO: Fix
-// template class ValgrindMallocSpace<RosAllocSpace, allocator::RosAlloc*>;
+// template class MemoryToolMallocSpace<RosAllocSpace, allocator::RosAlloc*>;
 
 RosAllocSpace::RosAllocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
                              art::gc::allocator::RosAlloc* rosalloc, uint8_t* begin, uint8_t* end,
@@ -61,10 +61,10 @@
                                                bool low_memory_mode, bool can_move_objects) {
   DCHECK(mem_map != nullptr);
 
-  bool running_on_valgrind = Runtime::Current()->RunningOnValgrind();
+  bool running_on_memory_tool = Runtime::Current()->IsRunningOnMemoryTool();
 
   allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size,
-                                                 capacity, low_memory_mode, running_on_valgrind);
+                                                 capacity, low_memory_mode, running_on_memory_tool);
   if (rosalloc == nullptr) {
     LOG(ERROR) << "Failed to initialize rosalloc for alloc space (" << name << ")";
     return nullptr;
@@ -78,10 +78,10 @@
 
   // Everything is set so record in immutable structure and leave
   uint8_t* begin = mem_map->Begin();
-  // TODO: Fix RosAllocSpace to support valgrind. There is currently some issues with
+  // TODO: Fix RosAllocSpace to support Valgrind/ASan. There is currently some issues with
   // AllocationSize caused by redzones. b/12944686
-  if (running_on_valgrind) {
-    return new ValgrindMallocSpace<RosAllocSpace, kDefaultValgrindRedZoneBytes, false, true>(
+  if (running_on_memory_tool) {
+    return new MemoryToolMallocSpace<RosAllocSpace, kDefaultMemoryToolRedZoneBytes, false, true>(
         mem_map, initial_size, name, rosalloc, begin, end, begin + capacity, growth_limit,
         can_move_objects, starting_size, low_memory_mode);
   } else {
@@ -134,7 +134,7 @@
 allocator::RosAlloc* RosAllocSpace::CreateRosAlloc(void* begin, size_t morecore_start,
                                                    size_t initial_size,
                                                    size_t maximum_size, bool low_memory_mode,
-                                                   bool running_on_valgrind) {
+                                                   bool running_on_memory_tool) {
   // clear errno to allow PLOG on error
   errno = 0;
   // create rosalloc using our backing storage starting at begin and
@@ -145,7 +145,7 @@
       low_memory_mode ?
           art::gc::allocator::RosAlloc::kPageReleaseModeAll :
           art::gc::allocator::RosAlloc::kPageReleaseModeSizeAndEnd,
-      running_on_valgrind);
+      running_on_memory_tool);
   if (rosalloc != nullptr) {
     rosalloc->SetFootprintLimit(initial_size);
   } else {
@@ -180,8 +180,8 @@
                                            void* allocator, uint8_t* begin, uint8_t* end,
                                            uint8_t* limit, size_t growth_limit,
                                            bool can_move_objects) {
-  if (Runtime::Current()->RunningOnValgrind()) {
-    return new ValgrindMallocSpace<RosAllocSpace, kDefaultValgrindRedZoneBytes, false, true>(
+  if (Runtime::Current()->IsRunningOnMemoryTool()) {
+    return new MemoryToolMallocSpace<RosAllocSpace, kDefaultMemoryToolRedZoneBytes, false, true>(
         mem_map, initial_size_, name, reinterpret_cast<allocator::RosAlloc*>(allocator), begin, end,
         limit, growth_limit, can_move_objects, starting_size_, low_memory_mode_);
   } else {
@@ -370,7 +370,7 @@
   delete rosalloc_;
   rosalloc_ = CreateRosAlloc(mem_map_->Begin(), starting_size_, initial_size_,
                              NonGrowthLimitCapacity(), low_memory_mode_,
-                             Runtime::Current()->RunningOnValgrind());
+                             Runtime::Current()->IsRunningOnMemoryTool());
   SetFootprintLimit(footprint_limit);
 }
 
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 36268f7..9dc6f31 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -31,7 +31,7 @@
 namespace space {
 
 // An alloc space implemented using a runs-of-slots memory allocator. Not final as may be
-// overridden by a ValgrindMallocSpace.
+// overridden by a MemoryToolMallocSpace.
 class RosAllocSpace : public MallocSpace {
  public:
   // Create a RosAllocSpace with the requested sizes. The requested
@@ -95,7 +95,7 @@
   ALWAYS_INLINE size_t MaxBytesBulkAllocatedForNonvirtual(size_t num_bytes);
 
   // TODO: NO_THREAD_SAFETY_ANALYSIS because SizeOf() requires that mutator_lock is held.
-  template<bool kMaybeRunningOnValgrind>
+  template<bool kMaybeIsRunningOnMemoryTool>
   size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
       NO_THREAD_SAFETY_ANALYSIS;
 
@@ -158,11 +158,11 @@
   void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
                         size_t maximum_size, bool low_memory_mode) OVERRIDE {
     return CreateRosAlloc(base, morecore_start, initial_size, maximum_size, low_memory_mode,
-                          RUNNING_ON_VALGRIND != 0);
+                          RUNNING_ON_MEMORY_TOOL != 0);
   }
   static allocator::RosAlloc* CreateRosAlloc(void* base, size_t morecore_start, size_t initial_size,
                                              size_t maximum_size, bool low_memory_mode,
-                                             bool running_on_valgrind);
+                                             bool running_on_memory_tool);
 
   void InspectAllRosAlloc(void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
                           void* arg, bool do_null_callback_at_end)
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 7e640c6..dbae7f8 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -16,6 +16,7 @@
 
 #include "mem_map.h"
 
+#include "base/memory_tool.h"
 #include <backtrace/BacktraceMap.h>
 #include <inttypes.h>
 
@@ -481,6 +482,12 @@
   uint8_t* page_aligned_expected =
       (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
 
+  size_t redzone_size = 0;
+  if (RUNNING_ON_MEMORY_TOOL && kMemoryToolAddsRedzones && expected_ptr == nullptr) {
+    redzone_size = kPageSize;
+    page_aligned_byte_count += redzone_size;
+  }
+
   uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(page_aligned_expected,
                                               page_aligned_byte_count,
                                               prot,
@@ -503,15 +510,35 @@
   if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
     return nullptr;
   }
+  if (redzone_size != 0) {
+    const uint8_t *real_start = actual + page_offset;
+    const uint8_t *real_end = actual + page_offset + byte_count;
+    const uint8_t *mapping_end = actual + page_aligned_byte_count;
+
+    MEMORY_TOOL_MAKE_NOACCESS(actual, real_start - actual);
+    MEMORY_TOOL_MAKE_NOACCESS(real_end, mapping_end - real_end);
+    page_aligned_byte_count -= redzone_size;
+  }
+
   return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count,
-                    prot, reuse);
+                    prot, reuse, redzone_size);
 }
 
 MemMap::~MemMap() {
   if (base_begin_ == nullptr && base_size_ == 0) {
     return;
   }
+
+  // Unlike Valgrind, AddressSanitizer requires that all manually poisoned memory is unpoisoned
+  // before it is returned to the system.
+  if (redzone_size_ != 0) {
+    MEMORY_TOOL_MAKE_UNDEFINED(
+        reinterpret_cast<char*>(base_begin_) + base_size_ - redzone_size_,
+        redzone_size_);
+  }
+
   if (!reuse_) {
+    MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_);
     int result = munmap(base_begin_, base_size_);
     if (result == -1) {
       PLOG(FATAL) << "munmap failed";
@@ -534,9 +561,9 @@
 }
 
 MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin,
-               size_t base_size, int prot, bool reuse)
+               size_t base_size, int prot, bool reuse, size_t redzone_size)
     : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
-      prot_(prot), reuse_(reuse) {
+      prot_(prot), reuse_(reuse), redzone_size_(redzone_size) {
   if (size_ == 0) {
     CHECK(begin_ == nullptr);
     CHECK(base_begin_ == nullptr);
@@ -595,6 +622,8 @@
   int flags = MAP_PRIVATE | MAP_ANONYMOUS;
 #endif
 
+
+  MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
   // Unmap/map the tail region.
   int result = munmap(tail_base_begin, tail_base_size);
   if (result == -1) {
@@ -778,6 +807,10 @@
   CHECK_ALIGNED(new_size, kPageSize);
   CHECK_EQ(base_size_, size_) << "Unsupported";
   CHECK_LE(new_size, base_size_);
+  MEMORY_TOOL_MAKE_UNDEFINED(
+      reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) +
+                              new_size),
+      base_size_ - new_size);
   CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_size),
                   base_size_ - new_size), 0) << new_size << " " << base_size_;
   base_size_ = new_size;
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 14387ee..01e29c9 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -153,7 +153,7 @@
 
  private:
   MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin, size_t base_size,
-         int prot, bool reuse) LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+         int prot, bool reuse, size_t redzone_size = 0) LOCKS_EXCLUDED(Locks::mem_maps_lock_);
 
   static void DumpMapsLocked(std::ostream& os, bool terse)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_);
@@ -175,6 +175,8 @@
   // unmapping.
   const bool reuse_;
 
+  const size_t redzone_size_;
+
 #if USE_ART_LOW_4G_ALLOCATOR
   static uintptr_t next_mem_pos_;   // Next memory location to check for low_4g extent.
 #endif
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index f635b5d..13bf5b7 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -18,7 +18,7 @@
 
 #include <memory>
 
-#include <valgrind.h>
+#include "base/memory_tool.h"
 
 #include "gtest/gtest.h"
 
@@ -216,7 +216,7 @@
 TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
   CommonInit();
   // This test may not work under valgrind.
-  if (RUNNING_ON_VALGRIND == 0) {
+  if (RUNNING_ON_MEMORY_TOOL == 0) {
     uintptr_t start_addr = ART_BASE_ADDRESS + 0x1000000;
     std::string error_msg;
     std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 884662d..a0dcbbc 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -26,7 +26,7 @@
 #include <cutils/trace.h>
 #include <signal.h>
 #include <sys/syscall.h>
-#include <valgrind.h>
+#include "base/memory_tool.h"
 
 #include <cstdio>
 #include <cstdlib>
@@ -177,7 +177,7 @@
       exit_(nullptr),
       abort_(nullptr),
       stats_enabled_(false),
-      running_on_valgrind_(RUNNING_ON_VALGRIND > 0),
+      is_running_on_memory_tool_(RUNNING_ON_MEMORY_TOOL),
       profiler_started_(false),
       instrumentation_(),
       main_thread_group_(nullptr),
@@ -938,7 +938,7 @@
     case kMips64:
       implicit_null_checks_ = true;
       // Installing stack protection does not play well with valgrind.
-      implicit_so_checks_ = (RUNNING_ON_VALGRIND == 0);
+      implicit_so_checks_ = !(RUNNING_ON_MEMORY_TOOL && kMemoryToolIsValgrind);
       break;
     default:
       // Keep the defaults.
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 6fd1b07..70e62f8 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -511,8 +511,8 @@
     return cpu_abilist_;
   }
 
-  bool RunningOnValgrind() const {
-    return running_on_valgrind_;
+  bool IsRunningOnMemoryTool() const {
+    return is_running_on_memory_tool_;
   }
 
   void SetTargetSdkVersion(int32_t version) {
@@ -677,7 +677,7 @@
   bool stats_enabled_;
   RuntimeStats stats_;
 
-  const bool running_on_valgrind_;
+  const bool is_running_on_memory_tool_;
 
   std::string profile_output_filename_;
   ProfilerOptions profiler_options_;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 6656fe5..75fff43 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -35,6 +35,7 @@
 #include "art_field-inl.h"
 #include "art_method-inl.h"
 #include "base/bit_utils.h"
+#include "base/memory_tool.h"
 #include "base/mutex.h"
 #include "base/timing_logger.h"
 #include "base/to_str.h"
@@ -81,6 +82,12 @@
 ConditionVariable* Thread::resume_cond_ = nullptr;
 const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA);
 
+// For implicit overflow checks we reserve an extra piece of memory at the bottom
+// of the stack (lowest memory).  The higher portion of the memory
+// is protected against reads and the lower is available for use while
+// throwing the StackOverflow exception.
+constexpr size_t kStackOverflowProtectedSize = 4 * kMemoryToolStackGuardSizeScale * KB;
+
 static const char* kThreadNameDuringStartup = "<native thread without managed peer>";
 
 void Thread::InitCardTable() {
diff --git a/runtime/thread.h b/runtime/thread.h
index 0e71c08..6a2f17c 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -138,11 +138,6 @@
 
 class Thread {
  public:
-  // For implicit overflow checks we reserve an extra piece of memory at the bottom
-  // of the stack (lowest memory).  The higher portion of the memory
-  // is protected against reads and the lower is available for use while
-  // throwing the StackOverflow exception.
-  static constexpr size_t kStackOverflowProtectedSize = 4 * KB;
   static const size_t kStackOverflowImplicitCheckSize;
 
   // Creates a new native thread corresponding to the given managed peer.
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 4923342..194d9fe 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1094,7 +1094,7 @@
     ArtMethod* current_method, void* ucontext_ptr) {
 #if __linux__
   // b/18119146
-  if (RUNNING_ON_VALGRIND != 0) {
+  if (RUNNING_ON_MEMORY_TOOL != 0) {
     return;
   }
 
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index 66e38b1..f00edff 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -26,7 +26,7 @@
 #include "scoped_thread_state_change.h"
 #include "handle_scope-inl.h"
 
-#include <valgrind.h>
+#include "base/memory_tool.h"
 
 namespace art {
 
@@ -358,7 +358,7 @@
     command.push_back("/usr/bin/id");
   }
   std::string error_msg;
-  if (RUNNING_ON_VALGRIND == 0) {
+  if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
     // Running on valgrind fails due to some memory that leaks in thread alternate signal stacks.
     EXPECT_TRUE(Exec(command, &error_msg));
   }
@@ -372,7 +372,7 @@
   std::vector<std::string> command;
   command.push_back("bogus");
   std::string error_msg;
-  if (RUNNING_ON_VALGRIND == 0) {
+  if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
     // Running on valgrind fails due to some memory that leaks in thread alternate signal stacks.
     EXPECT_FALSE(Exec(command, &error_msg));
     EXPECT_NE(0U, error_msg.size());