Handle segfaults while walking native heap

Vendor blobs on ryu mprotect heap pages, causing segfaults when dumping
unreachable memory.  Handle segfaults within HeapWalker by mapping a
zero page over any unreadable pages.  HeapWalker runs in the forked
process, so the mapping will not affect the original process.

Bug: 28269332
Change-Id: I16245af722123f2ad467cbc6f245a70666c55544
(cherry picked from commit ba5d9ff6d9674a0f1e985b49f53863045aff558d)
diff --git a/libmemunreachable/HeapWalker.cpp b/libmemunreachable/HeapWalker.cpp
index 19393ec..faa6fe2 100644
--- a/libmemunreachable/HeapWalker.cpp
+++ b/libmemunreachable/HeapWalker.cpp
@@ -14,7 +14,10 @@
  * limitations under the License.
  */
 
+#include <errno.h>
 #include <inttypes.h>
+#include <sys/mman.h>
+#include <unistd.h>
 
 #include <map>
 #include <utility>
@@ -22,6 +25,7 @@
 #include "Allocator.h"
 #include "HeapWalker.h"
 #include "LeakFolding.h"
+#include "ScopedSignalHandler.h"
 #include "log.h"
 
 bool HeapWalker::Allocation(uintptr_t begin, uintptr_t end) {
@@ -46,9 +50,15 @@
   }
 }
 
-bool HeapWalker::IsAllocationPtr(uintptr_t ptr, Range* range, AllocationInfo** info) {
-  if (ptr >= valid_allocations_range_.begin && ptr < valid_allocations_range_.end) {
-    AllocationMap::iterator it = allocations_.find(Range{ptr, ptr + 1});
+bool HeapWalker::WordContainsAllocationPtr(uintptr_t word_ptr, Range* range, AllocationInfo** info) {
+  walking_ptr_ = word_ptr;
+  // This access may segfault if the process under test has done something strange,
+  // for example mprotect(PROT_NONE) on a native heap page.  If so, it will be
+  // caught and handled by mmaping a zero page over the faulting page.
+  uintptr_t value = *reinterpret_cast<uintptr_t*>(word_ptr);
+  walking_ptr_ = 0;
+  if (value >= valid_allocations_range_.begin && value < valid_allocations_range_.end) {
+    AllocationMap::iterator it = allocations_.find(Range{value, value + 1});
     if (it != allocations_.end()) {
       *range = it->first;
       *info = &it->second;
@@ -135,3 +145,30 @@
 
   return true;
 }
+
+static bool MapOverPage(void* addr) {
+  const size_t page_size = sysconf(_SC_PAGE_SIZE);
+  void *page = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & ~(page_size-1));
+
+  void* ret = mmap(page, page_size, PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0);
+  if (ret == MAP_FAILED) {
+    ALOGE("failed to map page at %p: %s", page, strerror(errno));
+    return false;
+  }
+
+  return true;
+}
+
+void HeapWalker::HandleSegFault(ScopedSignalHandler& handler, int signal, siginfo_t* si, void* /*uctx*/) {
+  uintptr_t addr = reinterpret_cast<uintptr_t>(si->si_addr);
+  if (addr != walking_ptr_) {
+    handler.reset();
+    return;
+  }
+  ALOGW("failed to read page at %p, signal %d", si->si_addr, signal);
+  if (!MapOverPage(si->si_addr)) {
+    handler.reset();
+  }
+}
+
+ScopedSignalHandler::SignalFn ScopedSignalHandler::handler_;
diff --git a/libmemunreachable/HeapWalker.h b/libmemunreachable/HeapWalker.h
index 7b851c4..140f3ea 100644
--- a/libmemunreachable/HeapWalker.h
+++ b/libmemunreachable/HeapWalker.h
@@ -17,9 +17,12 @@
 #ifndef LIBMEMUNREACHABLE_HEAP_WALKER_H_
 #define LIBMEMUNREACHABLE_HEAP_WALKER_H_
 
+#include <signal.h>
+
 #include "android-base/macros.h"
 
 #include "Allocator.h"
+#include "ScopedSignalHandler.h"
 #include "Tarjan.h"
 
 // A range [begin, end)
@@ -41,10 +44,17 @@
  public:
   HeapWalker(Allocator<HeapWalker> allocator) : allocator_(allocator),
     allocations_(allocator), allocation_bytes_(0),
-	roots_(allocator), root_vals_(allocator) {
+	roots_(allocator), root_vals_(allocator),
+	segv_handler_(allocator), walking_ptr_(0) {
     valid_allocations_range_.end = 0;
     valid_allocations_range_.begin = ~valid_allocations_range_.end;
+
+    segv_handler_.install(SIGSEGV,
+        [=](ScopedSignalHandler& handler, int signal, siginfo_t* siginfo, void* uctx) {
+          this->HandleSegFault(handler, signal, siginfo, uctx);
+      });
   }
+
   ~HeapWalker() {}
   bool Allocation(uintptr_t begin, uintptr_t end);
   void Root(uintptr_t begin, uintptr_t end);
@@ -70,7 +80,8 @@
  private:
 
   void RecurseRoot(const Range& root);
-  bool IsAllocationPtr(uintptr_t ptr, Range* range, AllocationInfo** info);
+  bool WordContainsAllocationPtr(uintptr_t ptr, Range* range, AllocationInfo** info);
+  void HandleSegFault(ScopedSignalHandler&, int, siginfo_t*, void*);
 
   DISALLOW_COPY_AND_ASSIGN(HeapWalker);
   Allocator<HeapWalker> allocator_;
@@ -81,6 +92,9 @@
 
   allocator::vector<Range> roots_;
   allocator::vector<uintptr_t> root_vals_;
+
+  ScopedSignalHandler segv_handler_;
+  uintptr_t walking_ptr_;
 };
 
 template<class F>
@@ -92,7 +106,7 @@
   for (uintptr_t i = begin; i < range.end; i += sizeof(uintptr_t)) {
     Range ref_range;
     AllocationInfo* ref_info;
-    if (IsAllocationPtr(*reinterpret_cast<uintptr_t*>(i), &ref_range, &ref_info)) {
+    if (WordContainsAllocationPtr(i, &ref_range, &ref_info)) {
       f(ref_range, ref_info);
     }
   }
diff --git a/libmemunreachable/ScopedSignalHandler.h b/libmemunreachable/ScopedSignalHandler.h
new file mode 100644
index 0000000..e006d43
--- /dev/null
+++ b/libmemunreachable/ScopedSignalHandler.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBMEMUNREACHABLE_SCOPED_SIGNAL_HANDLER_H_
+#define LIBMEMUNREACHABLE_SCOPED_SIGNAL_HANDLER_H_
+
+#include <errno.h>
+#include <signal.h>
+
+#include <functional>
+
+#include "android-base/macros.h"
+
+#include "log.h"
+
+class ScopedSignalHandler {
+ public:
+  using Fn = std::function<void(ScopedSignalHandler&, int, siginfo_t*, void*)>;
+
+  ScopedSignalHandler(Allocator<Fn> allocator) : allocator_(allocator), signal_(-1) {}
+  ~ScopedSignalHandler() {
+    reset();
+  }
+
+  template <class F>
+  void install(int signal, F&& f) {
+    LOG_ALWAYS_FATAL_IF(signal_ != -1, "ScopedSignalHandler already installed");
+
+    handler_ = SignalFn(std::allocator_arg, allocator_,
+        [=](int signal, siginfo_t* si, void* uctx) {
+          f(*this, signal, si, uctx);
+        });
+
+    struct sigaction act{};
+    act.sa_sigaction = [](int signal, siginfo_t* si, void* uctx) {
+      handler_(signal, si, uctx);
+    };
+    act.sa_flags = SA_SIGINFO;
+
+    int ret = sigaction(signal, &act, &old_act_);
+    if (ret < 0) {
+      LOG_ALWAYS_FATAL("failed to install segfault handler: %s", strerror(errno));
+    }
+
+    signal_ = signal;
+  }
+
+  void reset() {
+    if (signal_ != -1) {
+      int ret = sigaction(signal_, &old_act_, NULL);
+      if (ret < 0) {
+        ALOGE("failed to uninstall segfault handler");
+      }
+      handler_ = SignalFn{};
+      signal_ = -1;
+    }
+  }
+
+
+ private:
+  using SignalFn = std::function<void(int, siginfo_t*, void*)>;
+  DISALLOW_COPY_AND_ASSIGN(ScopedSignalHandler);
+  Allocator<Fn> allocator_;
+  int signal_;
+  struct sigaction old_act_;
+  // TODO(ccross): to support multiple ScopedSignalHandlers handler_ would need
+  // to be a static map of signals to handlers, but allocated with Allocator.
+  static SignalFn handler_;
+};
+
+#endif // LIBMEMUNREACHABLE_SCOPED_SIGNAL_HANDLER_H_
diff --git a/libmemunreachable/tests/HeapWalker_test.cpp b/libmemunreachable/tests/HeapWalker_test.cpp
index c3e1c4d..98e4aa1 100644
--- a/libmemunreachable/tests/HeapWalker_test.cpp
+++ b/libmemunreachable/tests/HeapWalker_test.cpp
@@ -14,6 +14,9 @@
  * limitations under the License.
  */
 
+#include <sys/mman.h>
+#include <unistd.h>
+
 #include "HeapWalker.h"
 
 #include <gtest/gtest.h>
@@ -172,3 +175,27 @@
   EXPECT_EQ(2*sizeof(uintptr_t), leaked_bytes);
   ASSERT_EQ(2U, leaked.size());
 }
+
+TEST_F(HeapWalkerTest, segv) {
+  const size_t page_size = sysconf(_SC_PAGE_SIZE);
+  void* buffer1 = mmap(NULL, page_size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+  ASSERT_NE(buffer1, nullptr);
+  void* buffer2;
+
+  buffer2 = &buffer1;
+
+  HeapWalker heap_walker(heap_);
+  heap_walker.Allocation(buffer_begin(buffer1), buffer_begin(buffer1)+page_size);
+  heap_walker.Root(buffer_begin(buffer2), buffer_end(buffer2));
+
+  ASSERT_EQ(true, heap_walker.DetectLeaks());
+
+  allocator::vector<Range> leaked(heap_);
+  size_t num_leaks = 0;
+  size_t leaked_bytes = 0;
+  ASSERT_EQ(true, heap_walker.Leaked(leaked, 100, &num_leaks, &leaked_bytes));
+
+  EXPECT_EQ(0U, num_leaks);
+  EXPECT_EQ(0U, leaked_bytes);
+  ASSERT_EQ(0U, leaked.size());
+}