Merge "libmemunreachable: clang-format everything" am: 75752c1911
am: 8f596198ed

Change-Id: Id94a854054aa7837aced4b9560900c584e2566b9
diff --git a/Allocator.cpp b/Allocator.cpp
index 6fe67a4..da6db20 100644
--- a/Allocator.cpp
+++ b/Allocator.cpp
@@ -33,9 +33,9 @@
 
 #include "android-base/macros.h"
 
-#include "anon_vma_naming.h"
 #include "Allocator.h"
 #include "LinkedList.h"
+#include "anon_vma_naming.h"
 
 // runtime interfaces used:
 // abort
@@ -57,10 +57,9 @@
 static constexpr size_t kUsableChunkSize = kChunkSize - kPageSize;
 static constexpr size_t kMaxBucketAllocationSize = kChunkSize / 4;
 static constexpr size_t kMinBucketAllocationSize = 8;
-static constexpr unsigned int kNumBuckets = const_log2(kMaxBucketAllocationSize)
-    - const_log2(kMinBucketAllocationSize) + 1;
-static constexpr unsigned int kUsablePagesPerChunk = kUsableChunkSize
-    / kPageSize;
+static constexpr unsigned int kNumBuckets =
+    const_log2(kMaxBucketAllocationSize) - const_log2(kMinBucketAllocationSize) + 1;
+static constexpr unsigned int kUsablePagesPerChunk = kUsableChunkSize / kPageSize;
 
 std::atomic<int> heap_count;
 
@@ -93,7 +92,7 @@
   void FreeLocked(void* ptr);
 
   struct MapAllocation {
-    void *ptr;
+    void* ptr;
     size_t size;
     MapAllocation* next;
   };
@@ -107,8 +106,7 @@
 }
 
 static inline unsigned int size_to_bucket(size_t size) {
-  if (size < kMinBucketAllocationSize)
-    return kMinBucketAllocationSize;
+  if (size < kMinBucketAllocationSize) return kMinBucketAllocationSize;
   return log2(size - 1) + 1 - const_log2(kMinBucketAllocationSize);
 }
 
@@ -140,8 +138,7 @@
 
   // Trim beginning
   if (aligned_ptr != ptr) {
-    ptrdiff_t extra = reinterpret_cast<uintptr_t>(aligned_ptr)
-        - reinterpret_cast<uintptr_t>(ptr);
+    ptrdiff_t extra = reinterpret_cast<uintptr_t>(aligned_ptr) - reinterpret_cast<uintptr_t>(ptr);
     munmap(ptr, extra);
     map_size -= extra;
     ptr = aligned_ptr;
@@ -151,14 +148,13 @@
   if (map_size != size) {
     assert(map_size > size);
     assert(ptr != NULL);
-    munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) + size),
-        map_size - size);
+    munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) + size), map_size - size);
   }
 
-#define PR_SET_VMA   0x53564d41
-#define PR_SET_VMA_ANON_NAME    0
-  prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME,
-      reinterpret_cast<uintptr_t>(ptr), size, "leak_detector_malloc");
+#define PR_SET_VMA 0x53564d41
+#define PR_SET_VMA_ANON_NAME 0
+  prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, reinterpret_cast<uintptr_t>(ptr), size,
+        "leak_detector_malloc");
 
   return ptr;
 }
@@ -170,36 +166,31 @@
   Chunk(HeapImpl* heap, int bucket);
   ~Chunk() {}
 
-  void *Alloc();
+  void* Alloc();
   void Free(void* ptr);
   void Purge();
   bool Empty();
 
   static Chunk* ptr_to_chunk(void* ptr) {
-    return reinterpret_cast<Chunk*>(reinterpret_cast<uintptr_t>(ptr)
-        & ~(kChunkSize - 1));
+    return reinterpret_cast<Chunk*>(reinterpret_cast<uintptr_t>(ptr) & ~(kChunkSize - 1));
   }
   static bool is_chunk(void* ptr) {
     return (reinterpret_cast<uintptr_t>(ptr) & (kChunkSize - 1)) != 0;
   }
 
-  unsigned int free_count() {
-    return free_count_;
-  }
-  HeapImpl* heap() {
-    return heap_;
-  }
-  LinkedList<Chunk*> node_; // linked list sorted by minimum free count
+  unsigned int free_count() { return free_count_; }
+  HeapImpl* heap() { return heap_; }
+  LinkedList<Chunk*> node_;  // linked list sorted by minimum free count
 
  private:
   DISALLOW_COPY_AND_ASSIGN(Chunk);
   HeapImpl* heap_;
   unsigned int bucket_;
-  unsigned int allocation_size_; // size of allocations in chunk, min 8 bytes
-  unsigned int max_allocations_; // maximum number of allocations in the chunk
-  unsigned int first_free_bitmap_; // index into bitmap for first non-full entry
-  unsigned int free_count_; // number of available allocations
-  unsigned int frees_since_purge_; // number of calls to Free since last Purge
+  unsigned int allocation_size_;    // size of allocations in chunk, min 8 bytes
+  unsigned int max_allocations_;    // maximum number of allocations in the chunk
+  unsigned int first_free_bitmap_;  // index into bitmap for first non-full entry
+  unsigned int free_count_;         // number of available allocations
+  unsigned int frees_since_purge_;  // number of calls to Free since last Purge
 
   // bitmap of pages that have been dirtied
   uint32_t dirty_pages_[div_round_up(kUsablePagesPerChunk, 32)];
@@ -210,13 +201,10 @@
   char data_[0];
 
   unsigned int ptr_to_n(void* ptr) {
-    ptrdiff_t offset = reinterpret_cast<uintptr_t>(ptr)
-        - reinterpret_cast<uintptr_t>(data_);
+    ptrdiff_t offset = reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(data_);
     return offset / allocation_size_;
   }
-  void* n_to_ptr(unsigned int n) {
-    return data_ + n * allocation_size_;
-  }
+  void* n_to_ptr(unsigned int n) { return data_ + n * allocation_size_; }
 };
 static_assert(sizeof(Chunk) <= kPageSize, "header must fit in page");
 
@@ -225,23 +213,27 @@
   assert(count == sizeof(Chunk));
   void* mem = MapAligned(kChunkSize, kChunkSize);
   if (!mem) {
-    abort(); //throw std::bad_alloc;
+    abort();  // throw std::bad_alloc;
   }
 
   return mem;
 }
 
 // Override new operator on chunk to use mmap to allocate kChunkSize
-void Chunk::operator delete(void *ptr) {
+void Chunk::operator delete(void* ptr) {
   assert(reinterpret_cast<Chunk*>(ptr) == ptr_to_chunk(ptr));
   munmap(ptr, kChunkSize);
 }
 
-Chunk::Chunk(HeapImpl* heap, int bucket) :
-    node_(this), heap_(heap), bucket_(bucket), allocation_size_(
-        bucket_to_size(bucket)), max_allocations_(
-        kUsableChunkSize / allocation_size_), first_free_bitmap_(0), free_count_(
-        max_allocations_), frees_since_purge_(0) {
+Chunk::Chunk(HeapImpl* heap, int bucket)
+    : node_(this),
+      heap_(heap),
+      bucket_(bucket),
+      allocation_size_(bucket_to_size(bucket)),
+      max_allocations_(kUsableChunkSize / allocation_size_),
+      first_free_bitmap_(0),
+      free_count_(max_allocations_),
+      frees_since_purge_(0) {
   memset(dirty_pages_, 0, sizeof(dirty_pages_));
   memset(free_bitmap_, 0xff, sizeof(free_bitmap_));
 }
@@ -254,8 +246,7 @@
   assert(free_count_ > 0);
 
   unsigned int i = first_free_bitmap_;
-  while (free_bitmap_[i] == 0)
-    i++;
+  while (free_bitmap_[i] == 0) i++;
   assert(i < arraysize(free_bitmap_));
   unsigned int bit = __builtin_ffs(free_bitmap_[i]) - 1;
   assert(free_bitmap_[i] & (1U << bit));
@@ -306,38 +297,35 @@
 void Chunk::Purge() {
   frees_since_purge_ = 0;
 
-  //unsigned int allocsPerPage = kPageSize / allocation_size_;
+  // unsigned int allocsPerPage = kPageSize / allocation_size_;
 }
 
 // Override new operator on HeapImpl to use mmap to allocate a page
-void* HeapImpl::operator new(std::size_t count __attribute__((unused)))
-    noexcept {
+void* HeapImpl::operator new(std::size_t count __attribute__((unused))) noexcept {
   assert(count == sizeof(HeapImpl));
   void* mem = MapAligned(kPageSize, kPageSize);
   if (!mem) {
-    abort(); //throw std::bad_alloc;
+    abort();  // throw std::bad_alloc;
   }
 
   heap_count++;
   return mem;
 }
 
-void HeapImpl::operator delete(void *ptr) {
+void HeapImpl::operator delete(void* ptr) {
   munmap(ptr, kPageSize);
 }
 
-HeapImpl::HeapImpl() :
-    free_chunks_(), full_chunks_(), map_allocation_list_(NULL) {
-}
+HeapImpl::HeapImpl() : free_chunks_(), full_chunks_(), map_allocation_list_(NULL) {}
 
 bool HeapImpl::Empty() {
   for (unsigned int i = 0; i < kNumBuckets; i++) {
-    for (LinkedList<Chunk*> *it = free_chunks_[i].next(); it->data() != NULL; it = it->next()) {
+    for (LinkedList<Chunk*>* it = free_chunks_[i].next(); it->data() != NULL; it = it->next()) {
       if (!it->data()->Empty()) {
         return false;
       }
     }
-    for (LinkedList<Chunk*> *it = full_chunks_[i].next(); it->data() != NULL; it = it->next()) {
+    for (LinkedList<Chunk*>* it = full_chunks_[i].next(); it->data() != NULL; it = it->next()) {
       if (!it->data()->Empty()) {
         return false;
       }
@@ -350,12 +338,12 @@
 HeapImpl::~HeapImpl() {
   for (unsigned int i = 0; i < kNumBuckets; i++) {
     while (!free_chunks_[i].empty()) {
-      Chunk *chunk = free_chunks_[i].next()->data();
+      Chunk* chunk = free_chunks_[i].next()->data();
       chunk->node_.remove();
       delete chunk;
     }
     while (!full_chunks_[i].empty()) {
-      Chunk *chunk = full_chunks_[i].next()->data();
+      Chunk* chunk = full_chunks_[i].next()->data();
       chunk->node_.remove();
       delete chunk;
     }
@@ -373,18 +361,18 @@
   }
   int bucket = size_to_bucket(size);
   if (free_chunks_[bucket].empty()) {
-    Chunk *chunk = new Chunk(this, bucket);
+    Chunk* chunk = new Chunk(this, bucket);
     free_chunks_[bucket].insert(chunk->node_);
   }
   return free_chunks_[bucket].next()->data()->Alloc();
 }
 
-void HeapImpl::Free(void *ptr) {
+void HeapImpl::Free(void* ptr) {
   std::lock_guard<std::mutex> lk(m_);
   FreeLocked(ptr);
 }
 
-void HeapImpl::FreeLocked(void *ptr) {
+void HeapImpl::FreeLocked(void* ptr) {
   if (!Chunk::is_chunk(ptr)) {
     HeapImpl::MapFree(ptr);
   } else {
@@ -397,12 +385,11 @@
 void* HeapImpl::MapAlloc(size_t size) {
   size = (size + kPageSize - 1) & ~(kPageSize - 1);
 
-  MapAllocation* allocation = reinterpret_cast<MapAllocation*>(AllocLocked(
-      sizeof(MapAllocation)));
+  MapAllocation* allocation = reinterpret_cast<MapAllocation*>(AllocLocked(sizeof(MapAllocation)));
   void* ptr = MapAligned(size, kChunkSize);
   if (!ptr) {
     FreeLocked(allocation);
-    abort(); //throw std::bad_alloc;
+    abort();  // throw std::bad_alloc;
   }
   allocation->ptr = ptr;
   allocation->size = size;
@@ -412,10 +399,9 @@
   return ptr;
 }
 
-void HeapImpl::MapFree(void *ptr) {
-  MapAllocation **allocation = &map_allocation_list_;
-  while (*allocation && (*allocation)->ptr != ptr)
-    allocation = &(*allocation)->next;
+void HeapImpl::MapFree(void* ptr) {
+  MapAllocation** allocation = &map_allocation_list_;
+  while (*allocation && (*allocation)->ptr != ptr) allocation = &(*allocation)->next;
 
   assert(*allocation != nullptr);
 
@@ -425,22 +411,22 @@
   *allocation = (*allocation)->next;
 }
 
-void HeapImpl::MoveToFreeList(Chunk *chunk, int bucket) {
+void HeapImpl::MoveToFreeList(Chunk* chunk, int bucket) {
   MoveToList(chunk, &free_chunks_[bucket]);
 }
 
-void HeapImpl::MoveToFullList(Chunk *chunk, int bucket) {
+void HeapImpl::MoveToFullList(Chunk* chunk, int bucket) {
   MoveToList(chunk, &full_chunks_[bucket]);
 }
 
-void HeapImpl::MoveToList(Chunk *chunk, LinkedList<Chunk*>* head) {
+void HeapImpl::MoveToList(Chunk* chunk, LinkedList<Chunk*>* head) {
   // Remove from old list
   chunk->node_.remove();
 
-  LinkedList<Chunk*> *node = head;
+  LinkedList<Chunk*>* node = head;
   // Insert into new list, sorted by lowest free count
-  while (node->next() != head && node->data() != nullptr
-      && node->data()->free_count() < chunk->free_count())
+  while (node->next() != head && node->data() != nullptr &&
+         node->data()->free_count() < chunk->free_count())
     node = node->next();
 
   node->insert(chunk->node_);
@@ -469,7 +455,7 @@
   impl_->Free(ptr);
 }
 
-void Heap::deallocate(HeapImpl*impl, void* ptr) {
+void Heap::deallocate(HeapImpl* impl, void* ptr) {
   impl->Free(ptr);
 }
 
diff --git a/Allocator.h b/Allocator.h
index 5390739..67a068f 100644
--- a/Allocator.h
+++ b/Allocator.h
@@ -31,14 +31,13 @@
 
 class HeapImpl;
 
-template<typename T>
+template <typename T>
 class Allocator;
 
-
 // Non-templated class that implements wraps HeapImpl to keep
 // implementation out of the header file
 class Heap {
-public:
+ public:
   Heap();
   ~Heap();
 
@@ -59,110 +58,99 @@
   static void deallocate(HeapImpl* impl, void* ptr);
 
   // Allocate a class of type T
-  template<class T>
+  template <class T>
   T* allocate() {
     return reinterpret_cast<T*>(allocate(sizeof(T)));
   }
 
   // Comparators, copied objects will be equal
-  bool operator ==(const Heap& other) const {
-    return impl_ == other.impl_;
-  }
-  bool operator !=(const Heap& other) const {
-    return !(*this == other);
-  }
+  bool operator==(const Heap& other) const { return impl_ == other.impl_; }
+  bool operator!=(const Heap& other) const { return !(*this == other); }
 
   // std::unique_ptr wrapper that allocates using allocate and deletes using
   // deallocate
-  template<class T>
+  template <class T>
   using unique_ptr = std::unique_ptr<T, std::function<void(void*)>>;
 
-  template<class T, class... Args>
+  template <class T, class... Args>
   unique_ptr<T> make_unique(Args&&... args) {
     HeapImpl* impl = impl_;
-    return unique_ptr<T>(new (allocate<T>()) T(std::forward<Args>(args)...),
-        [impl](void* ptr) {
-          reinterpret_cast<T*>(ptr)->~T();
-          deallocate(impl, ptr);
-        });
+    return unique_ptr<T>(new (allocate<T>()) T(std::forward<Args>(args)...), [impl](void* ptr) {
+      reinterpret_cast<T*>(ptr)->~T();
+      deallocate(impl, ptr);
+    });
   }
 
   // std::unique_ptr wrapper that allocates using allocate and deletes using
   // deallocate
-  template<class T>
+  template <class T>
   using shared_ptr = std::shared_ptr<T>;
 
-  template<class T, class... Args>
+  template <class T, class... Args>
   shared_ptr<T> make_shared(Args&&... args);
 
-protected:
+ protected:
   HeapImpl* impl_;
   bool owns_impl_;
 };
 
 // STLAllocator implements the std allocator interface on top of a Heap
-template<typename T>
+template <typename T>
 class STLAllocator {
-public:
+ public:
   using value_type = T;
-  ~STLAllocator() {
-  }
+  ~STLAllocator() {}
 
   // Construct an STLAllocator on top of a Heap
-  STLAllocator(const Heap& heap) :  // NOLINT, implicit
-      heap_(heap) {
-  }
+  STLAllocator(const Heap& heap)
+      :  // NOLINT, implicit
+        heap_(heap) {}
 
   // Rebind an STLAllocator from an another STLAllocator
-  template<typename U>
-  STLAllocator(const STLAllocator<U>& other) :  // NOLINT, implicit
-      heap_(other.heap_) {
-  }
+  template <typename U>
+  STLAllocator(const STLAllocator<U>& other)
+      :  // NOLINT, implicit
+        heap_(other.heap_) {}
 
   STLAllocator(const STLAllocator&) = default;
   STLAllocator<T>& operator=(const STLAllocator<T>&) = default;
 
-  T* allocate(std::size_t n) {
-    return reinterpret_cast<T*>(heap_.allocate(n * sizeof(T)));
-  }
+  T* allocate(std::size_t n) { return reinterpret_cast<T*>(heap_.allocate(n * sizeof(T))); }
 
-  void deallocate(T* ptr, std::size_t) {
-    heap_.deallocate(ptr);
-  }
+  void deallocate(T* ptr, std::size_t) { heap_.deallocate(ptr); }
 
-  template<typename U>
-  bool operator ==(const STLAllocator<U>& other) const {
+  template <typename U>
+  bool operator==(const STLAllocator<U>& other) const {
     return heap_ == other.heap_;
   }
-  template<typename U>
-  inline bool operator !=(const STLAllocator<U>& other) const {
+  template <typename U>
+  inline bool operator!=(const STLAllocator<U>& other) const {
     return !(this == other);
   }
 
-  template<typename U>
+  template <typename U>
   friend class STLAllocator;
 
-protected:
+ protected:
   Heap heap_;
 };
 
-
 // Allocator extends STLAllocator with some convenience methods for allocating
 // a single object and for constructing unique_ptr and shared_ptr objects with
 // appropriate deleters.
-template<class T>
+template <class T>
 class Allocator : public STLAllocator<T> {
  public:
   ~Allocator() {}
 
-  Allocator(const Heap& other) : // NOLINT, implicit
-      STLAllocator<T>(other) {
-  }
+  Allocator(const Heap& other)
+      :  // NOLINT, implicit
+        STLAllocator<T>(other) {}
 
-  template<typename U>
-  Allocator(const STLAllocator<U>& other) :  // NOLINT, implicit
-      STLAllocator<T>(other) {
-  }
+  template <typename U>
+  Allocator(const STLAllocator<U>& other)
+      :  // NOLINT, implicit
+        STLAllocator<T>(other) {}
 
   Allocator(const Allocator&) = default;
   Allocator<T>& operator=(const Allocator<T>&) = default;
@@ -171,24 +159,20 @@
   using STLAllocator<T>::deallocate;
   using STLAllocator<T>::heap_;
 
-  T* allocate() {
-    return STLAllocator<T>::allocate(1);
-  }
-  void deallocate(void* ptr) {
-    heap_.deallocate(ptr);
-  }
+  T* allocate() { return STLAllocator<T>::allocate(1); }
+  void deallocate(void* ptr) { heap_.deallocate(ptr); }
 
   using shared_ptr = Heap::shared_ptr<T>;
 
-  template<class... Args>
-  shared_ptr make_shared(Args&& ...args) {
+  template <class... Args>
+  shared_ptr make_shared(Args&&... args) {
     return heap_.template make_shared<T>(std::forward<Args>(args)...);
   }
 
   using unique_ptr = Heap::unique_ptr<T>;
 
-  template<class... Args>
-  unique_ptr make_unique(Args&& ...args) {
+  template <class... Args>
+  unique_ptr make_unique(Args&&... args) {
     return heap_.template make_unique<T>(std::forward<Args>(args)...);
   }
 };
@@ -196,30 +180,31 @@
 // std::unique_ptr wrapper that allocates using allocate and deletes using
 // deallocate.  Implemented outside class definition in order to pass
 // Allocator<T> to shared_ptr.
-template<class T, class... Args>
+template <class T, class... Args>
 inline Heap::shared_ptr<T> Heap::make_shared(Args&&... args) {
   return std::allocate_shared<T, Allocator<T>, Args...>(Allocator<T>(*this),
-      std::forward<Args>(args)...);
+                                                        std::forward<Args>(args)...);
 }
 
 namespace allocator {
 
-template<class T>
+template <class T>
 using vector = std::vector<T, Allocator<T>>;
 
-template<class T>
+template <class T>
 using list = std::list<T, Allocator<T>>;
 
-template<class Key, class T, class Compare = std::less<Key>>
+template <class Key, class T, class Compare = std::less<Key>>
 using map = std::map<Key, T, Compare, Allocator<std::pair<const Key, T>>>;
 
-template<class Key, class T, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>>
-using unordered_map = std::unordered_map<Key, T, Hash, KeyEqual, Allocator<std::pair<const Key, T>>>;
+template <class Key, class T, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>>
+using unordered_map =
+    std::unordered_map<Key, T, Hash, KeyEqual, Allocator<std::pair<const Key, T>>>;
 
-template<class Key, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>>
+template <class Key, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>>
 using unordered_set = std::unordered_set<Key, Hash, KeyEqual, Allocator<Key>>;
 
-template<class Key, class Compare = std::less<Key>>
+template <class Key, class Compare = std::less<Key>>
 using set = std::set<Key, Compare, Allocator<Key>>;
 
 using string = std::basic_string<char, std::char_traits<char>, Allocator<char>>;
diff --git a/HeapWalker.cpp b/HeapWalker.cpp
index c365ae5..df16f40 100644
--- a/HeapWalker.cpp
+++ b/HeapWalker.cpp
@@ -114,8 +114,8 @@
   return true;
 }
 
-bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit,
-    size_t* num_leaks_out, size_t* leak_bytes_out) {
+bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit, size_t* num_leaks_out,
+                        size_t* leak_bytes_out) {
   leaked.clear();
 
   size_t num_leaks = 0;
@@ -148,9 +148,9 @@
 
 static bool MapOverPage(void* addr) {
   const size_t page_size = sysconf(_SC_PAGE_SIZE);
-  void *page = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & ~(page_size-1));
+  void* page = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & ~(page_size - 1));
 
-  void* ret = mmap(page, page_size, PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0);
+  void* ret = mmap(page, page_size, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
   if (ret == MAP_FAILED) {
     MEM_ALOGE("failed to map page at %p: %s", page, strerror(errno));
     return false;
@@ -159,7 +159,8 @@
   return true;
 }
 
-void HeapWalker::HandleSegFault(ScopedSignalHandler& handler, int signal, siginfo_t* si, void* /*uctx*/) {
+void HeapWalker::HandleSegFault(ScopedSignalHandler& handler, int signal, siginfo_t* si,
+                                void* /*uctx*/) {
   uintptr_t addr = reinterpret_cast<uintptr_t>(si->si_addr);
   if (addr != walking_ptr_) {
     handler.reset();
diff --git a/HeapWalker.h b/HeapWalker.h
index b25696f..865965a 100644
--- a/HeapWalker.h
+++ b/HeapWalker.h
@@ -34,31 +34,31 @@
   bool operator==(const Range& other) const {
     return this->begin == other.begin && this->end == other.end;
   }
-  bool operator!=(const Range& other) const {
-    return !(*this == other);
-  }
+  bool operator!=(const Range& other) const { return !(*this == other); }
 };
 
 // Comparator for Ranges that returns equivalence for overlapping ranges
 struct compare_range {
-  bool operator()(const Range& a, const Range& b) const {
-    return a.end <= b.begin;
-  }
+  bool operator()(const Range& a, const Range& b) const { return a.end <= b.begin; }
 };
 
 class HeapWalker {
  public:
-  explicit HeapWalker(Allocator<HeapWalker> allocator) : allocator_(allocator),
-    allocations_(allocator), allocation_bytes_(0),
-	roots_(allocator), root_vals_(allocator),
-	segv_handler_(allocator), walking_ptr_(0) {
+  explicit HeapWalker(Allocator<HeapWalker> allocator)
+      : allocator_(allocator),
+        allocations_(allocator),
+        allocation_bytes_(0),
+        roots_(allocator),
+        root_vals_(allocator),
+        segv_handler_(allocator),
+        walking_ptr_(0) {
     valid_allocations_range_.end = 0;
     valid_allocations_range_.begin = ~valid_allocations_range_.end;
 
-    segv_handler_.install(SIGSEGV,
-        [=](ScopedSignalHandler& handler, int signal, siginfo_t* siginfo, void* uctx) {
+    segv_handler_.install(
+        SIGSEGV, [=](ScopedSignalHandler& handler, int signal, siginfo_t* siginfo, void* uctx) {
           this->HandleSegFault(handler, signal, siginfo, uctx);
-      });
+        });
   }
 
   ~HeapWalker() {}
@@ -68,15 +68,14 @@
 
   bool DetectLeaks();
 
-  bool Leaked(allocator::vector<Range>&, size_t limit, size_t* num_leaks,
-      size_t* leak_bytes);
+  bool Leaked(allocator::vector<Range>&, size_t limit, size_t* num_leaks, size_t* leak_bytes);
   size_t Allocations();
   size_t AllocationBytes();
 
-  template<class F>
+  template <class F>
   void ForEachPtrInRange(const Range& range, F&& f);
 
-  template<class F>
+  template <class F>
   void ForEachAllocation(F&& f);
 
   struct AllocationInfo {
@@ -84,7 +83,6 @@
   };
 
  private:
-
   void RecurseRoot(const Range& root);
   bool WordContainsAllocationPtr(uintptr_t ptr, Range* range, AllocationInfo** info);
   void HandleSegFault(ScopedSignalHandler&, int, siginfo_t*, void*);
@@ -103,7 +101,7 @@
   uintptr_t walking_ptr_;
 };
 
-template<class F>
+template <class F>
 inline void HeapWalker::ForEachPtrInRange(const Range& range, F&& f) {
   uintptr_t begin = (range.begin + (sizeof(uintptr_t) - 1)) & ~(sizeof(uintptr_t) - 1);
   // TODO(ccross): we might need to consider a pointer to the end of a buffer
@@ -118,7 +116,7 @@
   }
 }
 
-template<class F>
+template <class F>
 inline void HeapWalker::ForEachAllocation(F&& f) {
   for (auto& it : allocations_) {
     const Range& range = it.first;
diff --git a/Leak.h b/Leak.h
index eaeeea7..db88e29 100644
--- a/Leak.h
+++ b/Leak.h
@@ -26,7 +26,7 @@
 // as a key in std::unordered_map.
 namespace std {
 
-template<>
+template <>
 struct hash<Leak::Backtrace> {
   std::size_t operator()(const Leak::Backtrace& key) const {
     std::size_t seed = 0;
@@ -40,7 +40,7 @@
   }
 
  private:
-  template<typename T>
+  template <typename T>
   inline void hash_combine(std::size_t& seed, const T& v) const {
     std::hash<T> hasher;
     seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
@@ -51,7 +51,7 @@
 
 static bool operator==(const Leak::Backtrace& lhs, const Leak::Backtrace& rhs) {
   return (lhs.num_frames == rhs.num_frames) &&
-      memcmp(lhs.frames, rhs.frames, lhs.num_frames * sizeof(lhs.frames[0])) == 0;
+         memcmp(lhs.frames, rhs.frames, lhs.num_frames * sizeof(lhs.frames[0])) == 0;
 }
 
 #endif
diff --git a/LeakFolding.cpp b/LeakFolding.cpp
index be4d20c..2dff672 100644
--- a/LeakFolding.cpp
+++ b/LeakFolding.cpp
@@ -31,11 +31,11 @@
 
   Allocator<SCCInfo> scc_allocator = allocator_;
 
-  for (auto& scc_nodes: scc_list) {
+  for (auto& scc_nodes : scc_list) {
     Allocator<SCCInfo>::unique_ptr leak_scc;
     leak_scc = scc_allocator.make_unique(scc_allocator);
 
-    for (auto& node: scc_nodes) {
+    for (auto& node : scc_nodes) {
       node->ptr->scc = leak_scc.get();
       leak_scc->count++;
       leak_scc->size += node->ptr->range.size();
@@ -46,7 +46,7 @@
 
   for (auto& it : leak_map_) {
     LeakInfo& leak = it.second;
-    for (auto& ref: leak.node.references_out) {
+    for (auto& ref : leak.node.references_out) {
       if (leak.scc != ref->ptr->scc) {
         leak.scc->node.Edge(&ref->ptr->scc->node);
       }
@@ -55,17 +55,14 @@
 }
 
 void LeakFolding::AccumulateLeaks(SCCInfo* dominator) {
-  std::function<void(SCCInfo*)> walk(std::allocator_arg, allocator_,
-      [&](SCCInfo* scc) {
-        if (scc->accumulator != dominator) {
-          scc->accumulator = dominator;
-          dominator->cuumulative_size += scc->size;
-          dominator->cuumulative_count += scc->count;
-          scc->node.Foreach([&](SCCInfo* ref) {
-            walk(ref);
-          });
-        }
-      });
+  std::function<void(SCCInfo*)> walk(std::allocator_arg, allocator_, [&](SCCInfo* scc) {
+    if (scc->accumulator != dominator) {
+      scc->accumulator = dominator;
+      dominator->cuumulative_size += scc->size;
+      dominator->cuumulative_count += scc->count;
+      scc->node.Foreach([&](SCCInfo* ref) { walk(ref); });
+    }
+  });
   walk(dominator);
 }
 
@@ -73,27 +70,25 @@
   Allocator<LeakInfo> leak_allocator = allocator_;
 
   // Find all leaked allocations insert them into leak_map_ and leak_graph_
-  heap_walker_.ForEachAllocation(
-      [&](const Range& range, HeapWalker::AllocationInfo& allocation) {
-        if (!allocation.referenced_from_root) {
-          auto it = leak_map_.emplace(std::piecewise_construct,
-              std::forward_as_tuple(range),
-              std::forward_as_tuple(range, allocator_));
-          LeakInfo& leak = it.first->second;
-          leak_graph_.push_back(&leak.node);
-        }
-      });
+  heap_walker_.ForEachAllocation([&](const Range& range, HeapWalker::AllocationInfo& allocation) {
+    if (!allocation.referenced_from_root) {
+      auto it = leak_map_.emplace(std::piecewise_construct, std::forward_as_tuple(range),
+                                  std::forward_as_tuple(range, allocator_));
+      LeakInfo& leak = it.first->second;
+      leak_graph_.push_back(&leak.node);
+    }
+  });
 
   // Find references between leaked allocations and connect them in leak_graph_
   for (auto& it : leak_map_) {
     LeakInfo& leak = it.second;
     heap_walker_.ForEachPtrInRange(leak.range,
-        [&](Range& ptr_range, HeapWalker::AllocationInfo* ptr_info) {
-          if (!ptr_info->referenced_from_root) {
-            LeakInfo* ptr_leak = &leak_map_.at(ptr_range);
-            leak.node.Edge(&ptr_leak->node);
-          }
-        });
+                                   [&](Range& ptr_range, HeapWalker::AllocationInfo* ptr_info) {
+                                     if (!ptr_info->referenced_from_root) {
+                                       LeakInfo* ptr_leak = &leak_map_.at(ptr_range);
+                                       leak.node.Edge(&ptr_leak->node);
+                                     }
+                                   });
   }
 
   // Convert the cyclic graph to a DAG by grouping strongly connected components
@@ -110,8 +105,8 @@
   return true;
 }
 
-bool LeakFolding::Leaked(allocator::vector<LeakFolding::Leak>& leaked,
-    size_t* num_leaks_out, size_t* leak_bytes_out) {
+bool LeakFolding::Leaked(allocator::vector<LeakFolding::Leak>& leaked, size_t* num_leaks_out,
+                         size_t* leak_bytes_out) {
   size_t num_leaks = 0;
   size_t leak_bytes = 0;
   for (auto& it : leak_map_) {
@@ -123,9 +118,8 @@
   for (auto& it : leak_map_) {
     const LeakInfo& leak = it.second;
     if (leak.scc->dominator) {
-      leaked.emplace_back(Leak{leak.range,
-        leak.scc->cuumulative_count - 1,
-        leak.scc->cuumulative_size - leak.range.size()});
+      leaked.emplace_back(Leak{leak.range, leak.scc->cuumulative_count - 1,
+                               leak.scc->cuumulative_size - leak.range.size()});
     }
   }
 
diff --git a/LeakFolding.h b/LeakFolding.h
index 9c6a525..740b54f 100644
--- a/LeakFolding.h
+++ b/LeakFolding.h
@@ -22,8 +22,11 @@
 class LeakFolding {
  public:
   LeakFolding(Allocator<void> allocator, HeapWalker& heap_walker)
-   : allocator_(allocator), heap_walker_(heap_walker),
-     leak_map_(allocator), leak_graph_(allocator), leak_scc_(allocator) {}
+      : allocator_(allocator),
+        heap_walker_(heap_walker),
+        leak_map_(allocator),
+        leak_graph_(allocator),
+        leak_scc_(allocator) {}
 
   bool FoldLeaks();
 
@@ -33,8 +36,7 @@
     size_t referenced_size;
   };
 
-  bool Leaked(allocator::vector<Leak>& leaked,
-      size_t* num_leaks_out, size_t* leak_bytes_out);
+  bool Leaked(allocator::vector<Leak>& leaked, size_t* num_leaks_out, size_t* leak_bytes_out);
 
  private:
   DISALLOW_COPY_AND_ASSIGN(LeakFolding);
@@ -54,9 +56,15 @@
     bool dominator;
     SCCInfo* accumulator;
 
-    explicit SCCInfo(Allocator<SCCInfo> allocator) : node(this, allocator),
-        count(0), size(0), cuumulative_count(0), cuumulative_size(0),
-        dominator(false), accumulator(nullptr) {}
+    explicit SCCInfo(Allocator<SCCInfo> allocator)
+        : node(this, allocator),
+          count(0),
+          size(0),
+          cuumulative_count(0),
+          cuumulative_size(0),
+          dominator(false),
+          accumulator(nullptr) {}
+
    private:
     SCCInfo(SCCInfo&&) = delete;
     DISALLOW_COPY_AND_ASSIGN(SCCInfo);
@@ -71,8 +79,7 @@
     SCCInfo* scc;
 
     LeakInfo(const Range& range, Allocator<LeakInfo> allocator)
-        : node(this, allocator), range(range),
-          scc(nullptr) {}
+        : node(this, allocator), range(range), scc(nullptr) {}
 
    private:
     DISALLOW_COPY_AND_ASSIGN(LeakInfo);
@@ -86,4 +93,4 @@
   allocator::vector<Allocator<SCCInfo>::unique_ptr> leak_scc_;
 };
 
-#endif // LIBMEMUNREACHABLE_LEAK_FOLDING_H_
+#endif  // LIBMEMUNREACHABLE_LEAK_FOLDING_H_
diff --git a/LeakPipe.cpp b/LeakPipe.cpp
index 78117e2..aac5701 100644
--- a/LeakPipe.cpp
+++ b/LeakPipe.cpp
@@ -22,8 +22,8 @@
 #include "log.h"
 
 bool LeakPipe::SendFd(int sock, int fd) {
-  struct msghdr hdr{};
-  struct iovec iov{};
+  struct msghdr hdr {};
+  struct iovec iov {};
   unsigned int data = 0xfdfdfdfd;
   alignas(struct cmsghdr) char cmsgbuf[CMSG_SPACE(sizeof(int))];
 
@@ -56,8 +56,8 @@
 }
 
 int LeakPipe::ReceiveFd(int sock) {
-  struct msghdr hdr{};
-  struct iovec iov{};
+  struct msghdr hdr {};
+  struct iovec iov {};
   unsigned int data;
   alignas(struct cmsghdr) char cmsgbuf[CMSG_SPACE(sizeof(int))];
 
diff --git a/LeakPipe.h b/LeakPipe.h
index 3ea2d8f..e6aee5f 100644
--- a/LeakPipe.h
+++ b/LeakPipe.h
@@ -34,15 +34,13 @@
 class LeakPipe {
  public:
   LeakPipe() {
-    int ret = socketpair(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC, 0, sv_);
+    int ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, sv_);
     if (ret < 0) {
       MEM_LOG_ALWAYS_FATAL("failed to create socketpair: %s", strerror(errno));
     }
   }
 
-  ~LeakPipe() {
-    Close();
-  }
+  ~LeakPipe() { Close(); }
 
   void Close() {
     close(sv_[0]);
@@ -77,13 +75,9 @@
    public:
     LeakPipeBase() : fd_(-1) {}
 
-    ~LeakPipeBase() {
-      Close();
-    }
+    ~LeakPipeBase() { Close(); }
 
-    void SetFd(int fd) {
-      fd_ = fd;
-    }
+    void SetFd(int fd) { fd_ = fd; }
 
     void Close() {
       close(fd_);
@@ -101,7 +95,7 @@
    public:
     using LeakPipeBase::LeakPipeBase;
 
-    template<typename T>
+    template <typename T>
     bool Send(const T& value) {
       ssize_t ret = TEMP_FAILURE_RETRY(write(fd_, &value, sizeof(T)));
       if (ret < 0) {
@@ -115,7 +109,7 @@
       return true;
     }
 
-    template<class T, class Alloc = std::allocator<T>>
+    template <class T, class Alloc = std::allocator<T>>
     bool SendVector(const std::vector<T, Alloc>& vector) {
       size_t size = vector.size() * sizeof(T);
       if (!Send(size)) {
@@ -139,7 +133,7 @@
    public:
     using LeakPipeBase::LeakPipeBase;
 
-    template<typename T>
+    template <typename T>
     bool Receive(T* value) {
       ssize_t ret = TEMP_FAILURE_RETRY(read(fd_, reinterpret_cast<void*>(value), sizeof(T)));
       if (ret < 0) {
@@ -153,7 +147,7 @@
       return true;
     }
 
-    template<class T, class Alloc = std::allocator<T>>
+    template <class T, class Alloc = std::allocator<T>>
     bool ReceiveVector(std::vector<T, Alloc>& vector) {
       size_t size = 0;
       if (!Receive(&size)) {
@@ -178,16 +172,11 @@
 
       return true;
     }
-
   };
 
-  LeakPipeReceiver& Receiver() {
-    return receiver_;
-  }
+  LeakPipeReceiver& Receiver() { return receiver_; }
 
-  LeakPipeSender& Sender() {
-    return sender_;
-  }
+  LeakPipeSender& Sender() { return sender_; }
 
  private:
   LeakPipeReceiver receiver_;
@@ -198,4 +187,4 @@
   int sv_[2];
 };
 
-#endif // LIBMEMUNREACHABLE_LEAK_PIPE_H_
+#endif  // LIBMEMUNREACHABLE_LEAK_PIPE_H_
diff --git a/LineBuffer.cpp b/LineBuffer.cpp
index d3580c0..0709fdd 100644
--- a/LineBuffer.cpp
+++ b/LineBuffer.cpp
@@ -23,8 +23,8 @@
 
 #include "LineBuffer.h"
 
-LineBuffer::LineBuffer(int fd, char* buffer, size_t buffer_len) : fd_(fd), buffer_(buffer), buffer_len_(buffer_len) {
-}
+LineBuffer::LineBuffer(int fd, char* buffer, size_t buffer_len)
+    : fd_(fd), buffer_(buffer), buffer_len_(buffer_len) {}
 
 bool LineBuffer::GetLine(char** line, size_t* line_len) {
   while (true) {
diff --git a/LineBuffer.h b/LineBuffer.h
index a015c46..604836b 100644
--- a/LineBuffer.h
+++ b/LineBuffer.h
@@ -33,4 +33,4 @@
   size_t bytes_ = 0;
 };
 
-#endif // _LIBMEMUNREACHABLE_LINE_BUFFER_H
+#endif  // _LIBMEMUNREACHABLE_LINE_BUFFER_H
diff --git a/LinkedList.h b/LinkedList.h
index 132842d..f1b8a17 100644
--- a/LinkedList.h
+++ b/LinkedList.h
@@ -17,44 +17,43 @@
 #ifndef LIBMEMUNREACHABLE_LINKED_LIST_H_
 #define LIBMEMUNREACHABLE_LINKED_LIST_H_
 
-template<class T>
+template <class T>
 class LinkedList {
-public:
-    LinkedList() : next_(this), prev_(this), data_() {}
-    explicit LinkedList(T data) : LinkedList() {
-        data_ = data;
-    }
-    ~LinkedList() {}
-    void insert(LinkedList<T>& node) {
-        assert(node.empty());
-        node.next_ = this->next_;
-        node.next_->prev_ = &node;
-        this->next_ = &node;
-        node.prev_ = this;
-    }
-    void remove() {
-        this->next_->prev_ = this->prev_;
-        this->prev_->next_ = this->next_;
-        this->next_ = this;
-        this->prev_ = this;
-    }
-    T data() { return data_; }
-    bool empty() { return next_ == this && prev_ == this; }
-    LinkedList<T> *next() { return next_; }
-private:
-    LinkedList<T> *next_;
-    LinkedList<T> *prev_;
-    T data_;
+ public:
+  LinkedList() : next_(this), prev_(this), data_() {}
+  explicit LinkedList(T data) : LinkedList() { data_ = data; }
+  ~LinkedList() {}
+  void insert(LinkedList<T>& node) {
+    assert(node.empty());
+    node.next_ = this->next_;
+    node.next_->prev_ = &node;
+    this->next_ = &node;
+    node.prev_ = this;
+  }
+  void remove() {
+    this->next_->prev_ = this->prev_;
+    this->prev_->next_ = this->next_;
+    this->next_ = this;
+    this->prev_ = this;
+  }
+  T data() { return data_; }
+  bool empty() { return next_ == this && prev_ == this; }
+  LinkedList<T>* next() { return next_; }
+
+ private:
+  LinkedList<T>* next_;
+  LinkedList<T>* prev_;
+  T data_;
 };
 
-template<class T>
+template <class T>
 class LinkedListHead {
-public:
-    LinkedListHead() : node_() {}
-    ~LinkedListHead() {}
+ public:
+  LinkedListHead() : node_() {}
+  ~LinkedListHead() {}
 
-private:
-    LinkedList<T> node_;
+ private:
+  LinkedList<T> node_;
 };
 
 #endif
diff --git a/MemUnreachable.cpp b/MemUnreachable.cpp
index 1c84744..870cd1d 100644
--- a/MemUnreachable.cpp
+++ b/MemUnreachable.cpp
@@ -19,12 +19,12 @@
 #include <functional>
 #include <iomanip>
 #include <mutex>
-#include <string>
 #include <sstream>
+#include <string>
 #include <unordered_map>
 
-#include <backtrace.h>
 #include <android-base/macros.h>
+#include <backtrace.h>
 
 #include "Allocator.h"
 #include "HeapWalker.h"
@@ -37,9 +37,9 @@
 #include "Semaphore.h"
 #include "ThreadCapture.h"
 
-#include "memunreachable/memunreachable.h"
 #include "bionic.h"
 #include "log.h"
+#include "memunreachable/memunreachable.h"
 
 const size_t Leak::contents_length;
 
@@ -47,20 +47,21 @@
 
 class MemUnreachable {
  public:
-  MemUnreachable(pid_t pid, Allocator<void> allocator) : pid_(pid), allocator_(allocator),
-      heap_walker_(allocator_) {}
+  MemUnreachable(pid_t pid, Allocator<void> allocator)
+      : pid_(pid), allocator_(allocator), heap_walker_(allocator_) {}
   bool CollectAllocations(const allocator::vector<ThreadInfo>& threads,
-      const allocator::vector<Mapping>& mappings);
-  bool GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit,
-      size_t* num_leaks, size_t* leak_bytes);
+                          const allocator::vector<Mapping>& mappings);
+  bool GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit, size_t* num_leaks,
+                            size_t* leak_bytes);
   size_t Allocations() { return heap_walker_.Allocations(); }
   size_t AllocationBytes() { return heap_walker_.AllocationBytes(); }
+
  private:
   bool ClassifyMappings(const allocator::vector<Mapping>& mappings,
-      allocator::vector<Mapping>& heap_mappings,
-      allocator::vector<Mapping>& anon_mappings,
-      allocator::vector<Mapping>& globals_mappings,
-      allocator::vector<Mapping>& stack_mappings);
+                        allocator::vector<Mapping>& heap_mappings,
+                        allocator::vector<Mapping>& anon_mappings,
+                        allocator::vector<Mapping>& globals_mappings,
+                        allocator::vector<Mapping>& stack_mappings);
   DISALLOW_COPY_AND_ASSIGN(MemUnreachable);
   pid_t pid_;
   Allocator<void> allocator_;
@@ -68,16 +69,17 @@
 };
 
 static void HeapIterate(const Mapping& heap_mapping,
-    const std::function<void(uintptr_t, size_t)>& func) {
+                        const std::function<void(uintptr_t, size_t)>& func) {
   malloc_iterate(heap_mapping.begin, heap_mapping.end - heap_mapping.begin,
-      [](uintptr_t base, size_t size, void* arg) {
-    auto f = reinterpret_cast<const std::function<void(uintptr_t, size_t)>*>(arg);
-    (*f)(base, size);
-  }, const_cast<void*>(reinterpret_cast<const void*>(&func)));
+                 [](uintptr_t base, size_t size, void* arg) {
+                   auto f = reinterpret_cast<const std::function<void(uintptr_t, size_t)>*>(arg);
+                   (*f)(base, size);
+                 },
+                 const_cast<void*>(reinterpret_cast<const void*>(&func)));
 }
 
 bool MemUnreachable::CollectAllocations(const allocator::vector<ThreadInfo>& threads,
-    const allocator::vector<Mapping>& mappings) {
+                                        const allocator::vector<Mapping>& mappings) {
   MEM_ALOGI("searching process %d for allocations", pid_);
   allocator::vector<Mapping> heap_mappings{mappings};
   allocator::vector<Mapping> anon_mappings{mappings};
@@ -118,8 +120,8 @@
   return true;
 }
 
-bool MemUnreachable::GetUnreachableMemory(allocator::vector<Leak>& leaks,
-    size_t limit, size_t* num_leaks, size_t* leak_bytes) {
+bool MemUnreachable::GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit,
+                                          size_t* num_leaks, size_t* leak_bytes) {
   MEM_ALOGI("sweeping process %d for unreachable memory", pid_);
   leaks.clear();
 
@@ -127,7 +129,6 @@
     return false;
   }
 
-
   allocator::vector<Range> leaked1{allocator_};
   heap_walker_.Leaked(leaked1, 0, num_leaks, leak_bytes);
 
@@ -152,12 +153,12 @@
   // in backtrace_map.
   leaks.reserve(leaked.size());
 
-  for (auto& it: leaked) {
+  for (auto& it : leaked) {
     leaks.emplace_back();
     Leak* leak = &leaks.back();
 
-    ssize_t num_backtrace_frames = malloc_backtrace(reinterpret_cast<void*>(it.range.begin),
-        leak->backtrace.frames, leak->backtrace.max_frames);
+    ssize_t num_backtrace_frames = malloc_backtrace(
+        reinterpret_cast<void*>(it.range.begin), leak->backtrace.frames, leak->backtrace.max_frames);
     if (num_backtrace_frames > 0) {
       leak->backtrace.num_frames = num_backtrace_frames;
 
@@ -183,14 +184,13 @@
     leak->referenced_size = it.referenced_size;
     leak->total_size = leak->size + leak->referenced_size;
     memcpy(leak->contents, reinterpret_cast<void*>(it.range.begin),
-        std::min(leak->size, Leak::contents_length));
+           std::min(leak->size, Leak::contents_length));
   }
 
   MEM_ALOGI("folding done");
 
-  std::sort(leaks.begin(), leaks.end(), [](const Leak& a, const Leak& b) {
-    return a.total_size > b.total_size;
-  });
+  std::sort(leaks.begin(), leaks.end(),
+            [](const Leak& a, const Leak& b) { return a.total_size > b.total_size; });
 
   if (leaks.size() > limit) {
     leaks.resize(limit);
@@ -205,11 +205,10 @@
 }
 
 bool MemUnreachable::ClassifyMappings(const allocator::vector<Mapping>& mappings,
-    allocator::vector<Mapping>& heap_mappings,
-    allocator::vector<Mapping>& anon_mappings,
-    allocator::vector<Mapping>& globals_mappings,
-    allocator::vector<Mapping>& stack_mappings)
-{
+                                      allocator::vector<Mapping>& heap_mappings,
+                                      allocator::vector<Mapping>& anon_mappings,
+                                      allocator::vector<Mapping>& globals_mappings,
+                                      allocator::vector<Mapping>& stack_mappings) {
   heap_mappings.clear();
   anon_mappings.clear();
   globals_mappings.clear();
@@ -245,7 +244,8 @@
       stack_mappings.emplace_back(*it);
     } else if (mapping_name.size() == 0) {
       globals_mappings.emplace_back(*it);
-    } else if (has_prefix(mapping_name, "[anon:") && mapping_name != "[anon:leak_detector_malloc]") {
+    } else if (has_prefix(mapping_name, "[anon:") &&
+               mapping_name != "[anon:leak_detector_malloc]") {
       // TODO(ccross): it would be nice to treat named anonymous mappings as
       // possible leaks, but naming something in a .bss or .data section makes
       // it impossible to distinguish them from mmaped and then named mappings.
@@ -256,7 +256,7 @@
   return true;
 }
 
-template<typename T>
+template <typename T>
 static inline const char* plural(T val) {
   return (val == 1) ? "" : "s";
 }
@@ -403,7 +403,6 @@
 }
 
 std::string Leak::ToString(bool log_contents) const {
-
   std::ostringstream oss;
 
   oss << "  " << std::dec << size;
@@ -492,8 +491,8 @@
   oss << std::endl;
 
   for (auto it = leaks.begin(); it != leaks.end(); it++) {
-      oss << it->ToString(log_contents);
-      oss << std::endl;
+    oss << it->ToString(log_contents);
+    oss << std::endl;
   }
 
   return oss.str();
@@ -523,7 +522,6 @@
   return true;
 }
 
-
 bool NoLeaks() {
   UnreachableMemoryInfo info;
   if (!GetUnreachableMemory(info, 0)) {
diff --git a/ProcessMappings.cpp b/ProcessMappings.cpp
index 57b2321..42e5326 100644
--- a/ProcessMappings.cpp
+++ b/ProcessMappings.cpp
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#include <inttypes.h>
 #include <fcntl.h>
+#include <inttypes.h>
 #include <string.h>
 #include <unistd.h>
 
@@ -42,8 +42,8 @@
     int name_pos;
     char perms[5];
     Mapping mapping{};
-    if (sscanf(line, "%" SCNxPTR "-%" SCNxPTR " %4s %*x %*x:%*x %*d %n",
-        &mapping.begin, &mapping.end, perms, &name_pos) == 3) {
+    if (sscanf(line, "%" SCNxPTR "-%" SCNxPTR " %4s %*x %*x:%*x %*d %n", &mapping.begin,
+               &mapping.end, perms, &name_pos) == 3) {
       if (perms[0] == 'r') {
         mapping.read = true;
       }
diff --git a/ProcessMappings.h b/ProcessMappings.h
index d3b7496..81b33dc 100644
--- a/ProcessMappings.h
+++ b/ProcessMappings.h
@@ -33,4 +33,4 @@
 // the line data.
 bool ProcessMappings(pid_t pid, allocator::vector<Mapping>& mappings);
 
-#endif // LIBMEMUNREACHABLE_PROCESS_MAPPING_H_
+#endif  // LIBMEMUNREACHABLE_PROCESS_MAPPING_H_
diff --git a/PtracerThread.cpp b/PtracerThread.cpp
index 73b0493..41efa9c 100644
--- a/PtracerThread.cpp
+++ b/PtracerThread.cpp
@@ -23,17 +23,17 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
-#include <unistd.h>
 #include <sys/mman.h>
 #include <sys/syscall.h>
 #include <sys/types.h>
 #include <sys/wait.h>
+#include <unistd.h>
 
 #include "android-base/macros.h"
 
+#include "PtracerThread.h"
 #include "anon_vma_naming.h"
 #include "log.h"
-#include "PtracerThread.h"
 
 class Stack {
  public:
@@ -41,7 +41,7 @@
     int prot = PROT_READ | PROT_WRITE;
     int flags = MAP_PRIVATE | MAP_ANONYMOUS;
     page_size_ = sysconf(_SC_PAGE_SIZE);
-    size_ += page_size_*2; // guard pages
+    size_ += page_size_ * 2;  // guard pages
     base_ = mmap(NULL, size_, prot, flags, -1, 0);
     if (base_ == MAP_FAILED) {
       base_ = NULL;
@@ -52,22 +52,20 @@
     mprotect(base_, page_size_, PROT_NONE);
     mprotect(top(), page_size_, PROT_NONE);
   };
-  ~Stack() {
-    munmap(base_, size_);
-  };
+  ~Stack() { munmap(base_, size_); };
   void* top() {
     return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(base_) + size_ - page_size_);
   };
+
  private:
   DISALLOW_COPY_AND_ASSIGN(Stack);
 
-  void *base_;
+  void* base_;
   size_t size_;
   size_t page_size_;
 };
 
-PtracerThread::PtracerThread(const std::function<int()>& func) :
-    child_pid_(0) {
+PtracerThread::PtracerThread(const std::function<int()>& func) : child_pid_(0) {
   stack_ = std::make_unique<Stack>(PTHREAD_STACK_MIN);
   if (stack_->top() == nullptr) {
     MEM_LOG_ALWAYS_FATAL("failed to mmap child stack: %s", strerror(errno));
@@ -93,14 +91,13 @@
   std::unique_lock<std::mutex> lk(m_);
 
   // Convert from void(*)(void*) to lambda with captures
-  auto proxy = [](void *arg) -> int {
+  auto proxy = [](void* arg) -> int {
     prctl(PR_SET_NAME, "libmemunreachable ptrace thread");
     return (*reinterpret_cast<std::function<int()>*>(arg))();
   };
 
-  child_pid_ = clone(proxy, stack_->top(),
-       CLONE_VM|CLONE_FS|CLONE_FILES/*|CLONE_UNTRACED*/,
-       reinterpret_cast<void*>(&func_));
+  child_pid_ = clone(proxy, stack_->top(), CLONE_VM | CLONE_FS | CLONE_FILES /*|CLONE_UNTRACED*/,
+                     reinterpret_cast<void*>(&func_));
   if (child_pid_ < 0) {
     MEM_ALOGE("failed to clone child: %s", strerror(errno));
     return false;
diff --git a/PtracerThread.h b/PtracerThread.h
index f88b599..ddf9026 100644
--- a/PtracerThread.h
+++ b/PtracerThread.h
@@ -36,6 +36,7 @@
   ~PtracerThread();
   bool Start();
   int Join();
+
  private:
   void SetTracer(pid_t);
   void ClearTracer();
@@ -47,4 +48,4 @@
   pid_t child_pid_;
 };
 
-#endif // LIBMEMUNREACHABLE_PTRACER_THREAD_H_
+#endif  // LIBMEMUNREACHABLE_PTRACER_THREAD_H_
diff --git a/ScopedAlarm.h b/ScopedAlarm.h
index 287f479..53ea112 100644
--- a/ScopedAlarm.h
+++ b/ScopedAlarm.h
@@ -27,11 +27,9 @@
  public:
   ScopedAlarm(std::chrono::microseconds us, std::function<void()> func) {
     func_ = func;
-    struct sigaction oldact{};
-    struct sigaction act{};
-    act.sa_handler = [](int) {
-      ScopedAlarm::func_();
-    };
+    struct sigaction oldact {};
+    struct sigaction act {};
+    act.sa_handler = [](int) { ScopedAlarm::func_(); };
     sigaction(SIGALRM, &act, &oldact);
 
     std::chrono::seconds s = std::chrono::duration_cast<std::chrono::seconds>(us);
@@ -43,10 +41,11 @@
   ~ScopedAlarm() {
     itimerval t = itimerval{};
     setitimer(ITIMER_REAL, &t, NULL);
-    struct sigaction act{};
+    struct sigaction act {};
     act.sa_handler = SIG_DFL;
     sigaction(SIGALRM, &act, NULL);
   }
+
  private:
   static std::function<void()> func_;
 };
diff --git a/ScopedDisableMalloc.h b/ScopedDisableMalloc.h
index 758d317..7d2f630 100644
--- a/ScopedDisableMalloc.h
+++ b/ScopedDisableMalloc.h
@@ -21,16 +21,14 @@
 
 #include "android-base/macros.h"
 
+#include "ScopedAlarm.h"
 #include "bionic.h"
 #include "log.h"
-#include "ScopedAlarm.h"
 
-class DisableMallocGuard{
+class DisableMallocGuard {
  public:
-  DisableMallocGuard() : disabled_(false){}
-  ~DisableMallocGuard() {
-    Enable();
-  }
+  DisableMallocGuard() : disabled_(false) {}
+  ~DisableMallocGuard() { Enable(); }
 
   void Disable() {
     if (!disabled_) {
@@ -45,6 +43,7 @@
       disabled_ = false;
     }
   }
+
  private:
   DISALLOW_COPY_AND_ASSIGN(DisableMallocGuard);
   bool disabled_;
@@ -59,13 +58,9 @@
 // here.
 class ScopedDisableMalloc {
  public:
-  ScopedDisableMalloc() {
-    disable_malloc_.Disable();
-  }
+  ScopedDisableMalloc() { disable_malloc_.Disable(); }
 
-  ~ScopedDisableMalloc() {
-    disable_malloc_.Enable();
-  }
+  ~ScopedDisableMalloc() { disable_malloc_.Enable(); }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(ScopedDisableMalloc);
@@ -74,18 +69,15 @@
 
 class ScopedDisableMallocTimeout {
  public:
-  explicit ScopedDisableMallocTimeout(std::chrono::milliseconds timeout = std::chrono::milliseconds(2000)) :
-    timeout_(timeout), timed_out_(false), disable_malloc_() {
+  explicit ScopedDisableMallocTimeout(
+      std::chrono::milliseconds timeout = std::chrono::milliseconds(2000))
+      : timeout_(timeout), timed_out_(false), disable_malloc_() {
     Disable();
   }
 
-  ~ScopedDisableMallocTimeout() {
-    Enable();
-  }
+  ~ScopedDisableMallocTimeout() { Enable(); }
 
-  bool timed_out() {
-    return timed_out_;
-  }
+  bool timed_out() { return timed_out_; }
 
   void Enable() {
     disable_malloc_.Enable();
@@ -110,4 +102,4 @@
   DisableMallocGuard disable_malloc_;
 };
 
-#endif // LIBMEMUNREACHABLE_SCOPED_DISABLE_MALLOC_H_
+#endif  // LIBMEMUNREACHABLE_SCOPED_DISABLE_MALLOC_H_
diff --git a/ScopedPipe.h b/ScopedPipe.h
index 7f44953..76bd974 100644
--- a/ScopedPipe.h
+++ b/ScopedPipe.h
@@ -29,28 +29,22 @@
       MEM_LOG_ALWAYS_FATAL("failed to open pipe");
     }
   }
-  ~ScopedPipe() {
-    Close();
-  }
+  ~ScopedPipe() { Close(); }
 
   ScopedPipe(ScopedPipe&& other) {
     SetReceiver(other.ReleaseReceiver());
     SetSender(other.ReleaseSender());
   }
 
-  ScopedPipe& operator = (ScopedPipe&& other) {
+  ScopedPipe& operator=(ScopedPipe&& other) {
     SetReceiver(other.ReleaseReceiver());
     SetSender(other.ReleaseSender());
     return *this;
   }
 
-  void CloseReceiver() {
-    close(ReleaseReceiver());
-  }
+  void CloseReceiver() { close(ReleaseReceiver()); }
 
-  void CloseSender() {
-    close(ReleaseSender());
-  }
+  void CloseSender() { close(ReleaseSender()); }
 
   void Close() {
     CloseReceiver();
diff --git a/ScopedSignalHandler.h b/ScopedSignalHandler.h
index fab38ed..58ac2aa 100644
--- a/ScopedSignalHandler.h
+++ b/ScopedSignalHandler.h
@@ -31,9 +31,7 @@
   using Fn = std::function<void(ScopedSignalHandler&, int, siginfo_t*, void*)>;
 
   explicit ScopedSignalHandler(Allocator<Fn> allocator) : allocator_(allocator), signal_(-1) {}
-  ~ScopedSignalHandler() {
-    reset();
-  }
+  ~ScopedSignalHandler() { reset(); }
 
   template <class F>
   void install(int signal, F&& f) {
@@ -65,7 +63,6 @@
     }
   }
 
-
  private:
   using SignalFn = std::function<void(int, siginfo_t*, void*)>;
   DISALLOW_COPY_AND_ASSIGN(ScopedSignalHandler);
@@ -77,4 +74,4 @@
   static SignalFn handler_;
 };
 
-#endif // LIBMEMUNREACHABLE_SCOPED_SIGNAL_HANDLER_H_
+#endif  // LIBMEMUNREACHABLE_SCOPED_SIGNAL_HANDLER_H_
diff --git a/Semaphore.h b/Semaphore.h
index 6bcf4ea..6d39a93 100644
--- a/Semaphore.h
+++ b/Semaphore.h
@@ -29,7 +29,7 @@
 
   void Wait(std::chrono::milliseconds ms) {
     std::unique_lock<std::mutex> lk(m_);
-    cv_.wait_for(lk, ms, [&]{
+    cv_.wait_for(lk, ms, [&] {
       if (count_ > 0) {
         count_--;
         return true;
@@ -44,6 +44,7 @@
     }
     cv_.notify_one();
   }
+
  private:
   DISALLOW_COPY_AND_ASSIGN(Semaphore);
 
@@ -52,5 +53,4 @@
   std::condition_variable cv_;
 };
 
-
-#endif // LIBMEMUNREACHABLE_SEMAPHORE_H_
+#endif  // LIBMEMUNREACHABLE_SEMAPHORE_H_
diff --git a/Tarjan.h b/Tarjan.h
index 2546341..86c7360 100644
--- a/Tarjan.h
+++ b/Tarjan.h
@@ -24,7 +24,7 @@
 
 #include "Allocator.h"
 
-template<class T>
+template <class T>
 class Node {
  public:
   allocator::set<Node<T>*> references_in;
@@ -34,39 +34,41 @@
 
   T* ptr;
 
-  Node(T* ptr, Allocator<Node> allocator) : references_in(allocator), references_out(allocator),
-      ptr(ptr) {};
+  Node(T* ptr, Allocator<Node> allocator)
+      : references_in(allocator), references_out(allocator), ptr(ptr){};
   Node(Node&& rhs) = default;
   void Edge(Node<T>* ref) {
     references_out.emplace(ref);
     ref->references_in.emplace(this);
   }
-  template<class F>
+  template <class F>
   void Foreach(F&& f) {
-    for (auto& node: references_out) {
+    for (auto& node : references_out) {
       f(node->ptr);
     }
   }
+
  private:
   DISALLOW_COPY_AND_ASSIGN(Node<T>);
 };
 
-template<class T>
+template <class T>
 using Graph = allocator::vector<Node<T>*>;
 
-template<class T>
+template <class T>
 using SCC = allocator::vector<Node<T>*>;
 
-template<class T>
+template <class T>
 using SCCList = allocator::vector<SCC<T>>;
 
-template<class T>
+template <class T>
 class TarjanAlgorithm {
  public:
-  explicit TarjanAlgorithm(Allocator<void> allocator) : index_(0),
-    stack_(allocator), components_(allocator) {}
+  explicit TarjanAlgorithm(Allocator<void> allocator)
+      : index_(0), stack_(allocator), components_(allocator) {}
 
   void Execute(Graph<T>& graph, SCCList<T>& out);
+
  private:
   static constexpr size_t UNDEFINED_INDEX = static_cast<size_t>(-1);
   void Tarjan(Node<T>* vertex, Graph<T>& graph);
@@ -76,17 +78,17 @@
   SCCList<T> components_;
 };
 
-template<class T>
+template <class T>
 void TarjanAlgorithm<T>::Execute(Graph<T>& graph, SCCList<T>& out) {
   stack_.clear();
   components_.clear();
   index_ = 0;
-  for (auto& it: graph) {
+  for (auto& it : graph) {
     it->index = UNDEFINED_INDEX;
     it->lowlink = UNDEFINED_INDEX;
   }
 
-  for (auto& it: graph) {
+  for (auto& it : graph) {
     if (it->index == UNDEFINED_INDEX) {
       Tarjan(it, graph);
     }
@@ -94,14 +96,14 @@
   out.swap(components_);
 }
 
-template<class T>
+template <class T>
 void TarjanAlgorithm<T>::Tarjan(Node<T>* vertex, Graph<T>& graph) {
   assert(vertex->index == UNDEFINED_INDEX);
   vertex->index = index_;
   vertex->lowlink = index_;
   index_++;
   stack_.push_back(vertex);
-  for (auto& it: vertex->references_out) {
+  for (auto& it : vertex->references_out) {
     Node<T>* vertex_next = it;
     if (vertex_next->index == UNDEFINED_INDEX) {
       Tarjan(vertex_next, graph);
@@ -123,10 +125,10 @@
   }
 }
 
-template<class T>
+template <class T>
 void Tarjan(Graph<T>& graph, SCCList<T>& out) {
   TarjanAlgorithm<T> tarjan{graph.get_allocator()};
   tarjan.Execute(graph, out);
 }
 
-#endif // LIBMEMUNREACHABLE_TARJAN_H_
+#endif  // LIBMEMUNREACHABLE_TARJAN_H_
diff --git a/ThreadCapture.cpp b/ThreadCapture.cpp
index 3891f2d..a7bd91c 100644
--- a/ThreadCapture.cpp
+++ b/ThreadCapture.cpp
@@ -21,13 +21,13 @@
 #include <fcntl.h>
 #include <limits.h>
 #include <stdlib.h>
-#include <unistd.h>
 #include <sys/ptrace.h>
 #include <sys/stat.h>
 #include <sys/syscall.h>
 #include <sys/types.h>
 #include <sys/uio.h>
 #include <sys/wait.h>
+#include <unistd.h>
 
 #include <map>
 #include <memory>
@@ -50,12 +50,12 @@
 // Convert a pid > 0 to a string.  sprintf might allocate, so we can't use it.
 // Returns a pointer somewhere in buf to a null terminated string, or NULL
 // on error.
-static char *pid_to_str(char *buf, size_t len, pid_t pid) {
+static char* pid_to_str(char* buf, size_t len, pid_t pid) {
   if (pid <= 0) {
     return nullptr;
   }
 
-  char *ptr = buf + len - 1;
+  char* ptr = buf + len - 1;
   *ptr = 0;
   while (pid > 0) {
     ptr--;
@@ -79,6 +79,7 @@
   bool ReleaseThread(pid_t tid);
   bool CapturedThreadInfo(ThreadInfoList& threads);
   void InjectTestFunc(std::function<void(pid_t)>&& f) { inject_test_func_ = f; }
+
  private:
   int CaptureThread(pid_t tid);
   bool ReleaseThread(pid_t tid, unsigned int signal);
@@ -92,9 +93,8 @@
   std::function<void(pid_t)> inject_test_func_;
 };
 
-ThreadCaptureImpl::ThreadCaptureImpl(pid_t pid, Allocator<ThreadCaptureImpl>& allocator) :
-    captured_threads_(allocator), allocator_(allocator), pid_(pid) {
-}
+ThreadCaptureImpl::ThreadCaptureImpl(pid_t pid, Allocator<ThreadCaptureImpl>& allocator)
+    : captured_threads_(allocator), allocator_(allocator), pid_(pid) {}
 
 bool ThreadCaptureImpl::ListThreads(TidList& tids) {
   tids.clear();
@@ -115,11 +115,11 @@
   }
 
   struct linux_dirent64 {
-    uint64_t  d_ino;
-    int64_t   d_off;
-    uint16_t  d_reclen;
-    char      d_type;
-    char      d_name[];
+    uint64_t d_ino;
+    int64_t d_off;
+    uint16_t d_reclen;
+    char d_type;
+    char d_name[];
   } __attribute((packed));
   char dirent_buf[4096];
   ssize_t nread;
@@ -209,7 +209,7 @@
 bool ThreadCaptureImpl::PtraceThreadInfo(pid_t tid, ThreadInfo& thread_info) {
   thread_info.tid = tid;
 
-  const unsigned int max_num_regs = 128; // larger than number of registers on any device
+  const unsigned int max_num_regs = 128;  // larger than number of registers on any device
   uintptr_t regs[max_num_regs];
   struct iovec iovec;
   iovec.iov_base = &regs;
@@ -243,7 +243,7 @@
 
   thread_info.stack = std::pair<uintptr_t, uintptr_t>(regs[sp], 0);
 
-   return true;
+  return true;
 }
 
 int ThreadCaptureImpl::CaptureThread(pid_t tid) {
@@ -266,7 +266,7 @@
 
   unsigned int resume_signal = 0;
 
-  unsigned int signal =  WSTOPSIG(status);
+  unsigned int signal = WSTOPSIG(status);
   if ((status >> 16) == PTRACE_EVENT_STOP) {
     switch (signal) {
       case SIGSTOP:
@@ -307,7 +307,7 @@
 
 bool ThreadCaptureImpl::ReleaseThreads() {
   bool ret = true;
-  for (auto it = captured_threads_.begin(); it != captured_threads_.end(); ) {
+  for (auto it = captured_threads_.begin(); it != captured_threads_.end();) {
     if (ReleaseThread(it->first, it->second)) {
       it = captured_threads_.erase(it);
     } else {
diff --git a/ThreadCapture.h b/ThreadCapture.h
index 1022cad..d209660 100644
--- a/ThreadCapture.h
+++ b/ThreadCapture.h
@@ -33,7 +33,7 @@
 class ThreadCaptureImpl;
 
 class ThreadCapture {
-public:
+ public:
   ThreadCapture(pid_t pid, Allocator<ThreadCapture> allocator);
   ~ThreadCapture();
 
@@ -44,7 +44,7 @@
   bool CapturedThreadInfo(ThreadInfoList& threads);
   void InjectTestFunc(std::function<void(pid_t)>&& f);
 
-private:
+ private:
   ThreadCapture(const ThreadCapture&) = delete;
   void operator=(const ThreadCapture&) = delete;
 
diff --git a/anon_vma_naming.h b/anon_vma_naming.h
index 1e4ade1..fb31e41 100644
--- a/anon_vma_naming.h
+++ b/anon_vma_naming.h
@@ -19,7 +19,7 @@
 
 #include <sys/prctl.h>
 
-#define PR_SET_VMA            0x53564d41
-#define  PR_SET_VMA_ANON_NAME 0
+#define PR_SET_VMA 0x53564d41
+#define PR_SET_VMA_ANON_NAME 0
 
-#endif // LIBMEMUNREACHABLE_ANON_VMA_NAMING_H_
+#endif  // LIBMEMUNREACHABLE_ANON_VMA_NAMING_H_
diff --git a/bionic.h b/bionic.h
index 83d07a8..dd1ec79 100644
--- a/bionic.h
+++ b/bionic.h
@@ -17,9 +17,9 @@
 #ifndef LIBMEMUNREACHABLE_BIONIC_H_
 #define LIBMEMUNREACHABLE_BIONIC_H_
 
-#include <sys/cdefs.h>
 #include <stdint.h>
 #include <stdlib.h>
+#include <sys/cdefs.h>
 
 __BEGIN_DECLS
 
@@ -27,9 +27,9 @@
 extern void malloc_disable();
 extern void malloc_enable();
 extern int malloc_iterate(uintptr_t base, size_t size,
-    void (*callback)(uintptr_t base, size_t size, void* arg), void* arg);
+                          void (*callback)(uintptr_t base, size_t size, void* arg), void* arg);
 extern ssize_t malloc_backtrace(void* pointer, uintptr_t* frames, size_t frame_count);
 
 __END_DECLS
 
-#endif // LIBMEMUNREACHABLE_BIONIC_H_
+#endif  // LIBMEMUNREACHABLE_BIONIC_H_
diff --git a/include/memunreachable/memunreachable.h b/include/memunreachable/memunreachable.h
index 9b227fd..f6249e3 100644
--- a/include/memunreachable/memunreachable.h
+++ b/include/memunreachable/memunreachable.h
@@ -21,8 +21,8 @@
 
 #ifdef __cplusplus
 
-#include <vector>
 #include <string>
+#include <vector>
 
 struct Leak {
   uintptr_t begin;
@@ -83,4 +83,4 @@
 
 __END_DECLS
 
-#endif // LIBMEMUNREACHABLE_MEMUNREACHABLE_H_
+#endif  // LIBMEMUNREACHABLE_MEMUNREACHABLE_H_
diff --git a/log.h b/log.h
index 10b83db..0f1bb8a 100644
--- a/log.h
+++ b/log.h
@@ -43,4 +43,4 @@
 
 #endif
 
-#endif // LIBMEMUNREACHABLE_LOG_H_
+#endif  // LIBMEMUNREACHABLE_LOG_H_
diff --git a/tests/Allocator_test.cpp b/tests/Allocator_test.cpp
index 21c8218..0bb4f31 100644
--- a/tests/Allocator_test.cpp
+++ b/tests/Allocator_test.cpp
@@ -16,44 +16,42 @@
 
 #include <Allocator.h>
 
-#include <gtest/gtest.h>
 #include <ScopedDisableMalloc.h>
-
+#include <gtest/gtest.h>
 
 std::function<void()> ScopedAlarm::func_;
 
 class AllocatorTest : public testing::Test {
  protected:
   AllocatorTest() : heap(), disable_malloc_() {}
-  virtual void SetUp() {
-    heap_count = 0;
-  }
+  virtual void SetUp() { heap_count = 0; }
   virtual void TearDown() {
     ASSERT_EQ(heap_count, 0);
     ASSERT_TRUE(heap.empty());
     ASSERT_FALSE(disable_malloc_.timed_out());
   }
   Heap heap;
+
  private:
   ScopedDisableMallocTimeout disable_malloc_;
 };
 
 TEST_F(AllocatorTest, simple) {
   Allocator<char[100]> allocator(heap);
-  void *ptr = allocator.allocate();
+  void* ptr = allocator.allocate();
   ASSERT_TRUE(ptr != NULL);
   allocator.deallocate(ptr);
 }
 
 TEST_F(AllocatorTest, multiple) {
   Allocator<char[100]> allocator(heap);
-  void *ptr1 = allocator.allocate();
+  void* ptr1 = allocator.allocate();
   ASSERT_TRUE(ptr1 != NULL);
-  void *ptr2 = allocator.allocate();
+  void* ptr2 = allocator.allocate();
   ASSERT_TRUE(ptr2 != NULL);
   ASSERT_NE(ptr1, ptr2);
   allocator.deallocate(ptr1);
-  void *ptr3 = allocator.allocate();
+  void* ptr3 = allocator.allocate();
   ASSERT_EQ(ptr1, ptr3);
   allocator.deallocate(ptr3);
   allocator.deallocate(ptr2);
@@ -63,7 +61,7 @@
   const int num = 4096;
   const int size = 128;
   Allocator<char[size]> allocator(heap);
-  void *ptr[num];
+  void* ptr[num];
   for (int i = 0; i < num; i++) {
     ptr[i] = allocator.allocate();
     memset(ptr[i], 0xaa, size);
@@ -87,7 +85,7 @@
 TEST_F(AllocatorTest, large) {
   const size_t size = 1024 * 1024;
   Allocator<char[size]> allocator(heap);
-  void *ptr = allocator.allocate();
+  void* ptr = allocator.allocate();
   memset(ptr, 0xaa, size);
   allocator.deallocate(ptr);
 }
@@ -96,7 +94,7 @@
   const int num = 128;
   const int size = 1024 * 1024;
   Allocator<char[size]> allocator(heap);
-  void *ptr[num];
+  void* ptr[num];
   for (int i = 0; i < num; i++) {
     ptr[i] = allocator.allocate();
     memset(ptr[i], 0xaa, size);
diff --git a/tests/DisableMalloc_test.cpp b/tests/DisableMalloc_test.cpp
index 4e6155b..2db5848 100644
--- a/tests/DisableMalloc_test.cpp
+++ b/tests/DisableMalloc_test.cpp
@@ -19,8 +19,8 @@
 #include <chrono>
 #include <functional>
 
-#include <gtest/gtest.h>
 #include <ScopedDisableMalloc.h>
+#include <gtest/gtest.h>
 
 using namespace std::chrono_literals;
 
@@ -36,75 +36,83 @@
 };
 
 TEST_F(DisableMallocTest, reenable) {
-  ASSERT_EXIT({
-    alarm(100ms);
-    void *ptr1 = malloc(128);
-    ASSERT_NE(ptr1, nullptr);
-    free(ptr1);
-    {
-      ScopedDisableMalloc disable_malloc;
-    }
-    void *ptr2 = malloc(128);
-    ASSERT_NE(ptr2, nullptr);
-    free(ptr2);
-    _exit(1);
-  }, ::testing::ExitedWithCode(1), "");
+  ASSERT_EXIT(
+      {
+        alarm(100ms);
+        void* ptr1 = malloc(128);
+        ASSERT_NE(ptr1, nullptr);
+        free(ptr1);
+        { ScopedDisableMalloc disable_malloc; }
+        void* ptr2 = malloc(128);
+        ASSERT_NE(ptr2, nullptr);
+        free(ptr2);
+        _exit(1);
+      },
+      ::testing::ExitedWithCode(1), "");
 }
 
 TEST_F(DisableMallocTest, deadlock_allocate) {
-  ASSERT_DEATH({
-    void *ptr = malloc(128);
-    ASSERT_NE(ptr, nullptr);
-    free(ptr);
-    {
-      alarm(100ms);
-      ScopedDisableMalloc disable_malloc;
-      void* ptr = malloc(128);
-      ASSERT_NE(ptr, nullptr);
-      free(ptr);
-    }
-  }, "");
+  ASSERT_DEATH(
+      {
+        void* ptr = malloc(128);
+        ASSERT_NE(ptr, nullptr);
+        free(ptr);
+        {
+          alarm(100ms);
+          ScopedDisableMalloc disable_malloc;
+          void* ptr = malloc(128);
+          ASSERT_NE(ptr, nullptr);
+          free(ptr);
+        }
+      },
+      "");
 }
 
 TEST_F(DisableMallocTest, deadlock_new) {
-  ASSERT_DEATH({
-    char* ptr = new(char);
-    ASSERT_NE(ptr, nullptr);
-    delete(ptr);
-    {
-      alarm(100ms);
-      ScopedDisableMalloc disable_malloc;
-      char* ptr = new (std::nothrow)(char);
-      ASSERT_NE(ptr, nullptr);
-      delete(ptr);
-    }
-  }, "");
+  ASSERT_DEATH(
+      {
+        char* ptr = new (char);
+        ASSERT_NE(ptr, nullptr);
+        delete (ptr);
+        {
+          alarm(100ms);
+          ScopedDisableMalloc disable_malloc;
+          char* ptr = new (std::nothrow)(char);
+          ASSERT_NE(ptr, nullptr);
+          delete (ptr);
+        }
+      },
+      "");
 }
 
 TEST_F(DisableMallocTest, deadlock_delete) {
-  ASSERT_DEATH({
-    char* ptr = new(char);
-    ASSERT_NE(ptr, nullptr);
-    {
-      alarm(250ms);
-      ScopedDisableMalloc disable_malloc;
-      delete(ptr);
-      // Force ptr usage or this code gets optimized away by the arm64 compiler.
-      ASSERT_NE(ptr, nullptr);
-    }
-  }, "");
+  ASSERT_DEATH(
+      {
+        char* ptr = new (char);
+        ASSERT_NE(ptr, nullptr);
+        {
+          alarm(250ms);
+          ScopedDisableMalloc disable_malloc;
+          delete (ptr);
+          // Force ptr usage or this code gets optimized away by the arm64 compiler.
+          ASSERT_NE(ptr, nullptr);
+        }
+      },
+      "");
 }
 
 TEST_F(DisableMallocTest, deadlock_free) {
-  ASSERT_DEATH({
-    void *ptr = malloc(128);
-    ASSERT_NE(ptr, nullptr);
-    {
-      alarm(100ms);
-      ScopedDisableMalloc disable_malloc;
-      free(ptr);
-    }
-  }, "");
+  ASSERT_DEATH(
+      {
+        void* ptr = malloc(128);
+        ASSERT_NE(ptr, nullptr);
+        {
+          alarm(100ms);
+          ScopedDisableMalloc disable_malloc;
+          free(ptr);
+        }
+      },
+      "");
 }
 
 TEST_F(DisableMallocTest, deadlock_fork) {
@@ -113,6 +121,6 @@
       alarm(100ms);
       ScopedDisableMalloc disable_malloc;
       fork();
-    }
-  }, "");
+}
+}, "");
 }
diff --git a/tests/HeapWalker_test.cpp b/tests/HeapWalker_test.cpp
index 98e4aa1..1b258ee 100644
--- a/tests/HeapWalker_test.cpp
+++ b/tests/HeapWalker_test.cpp
@@ -19,8 +19,8 @@
 
 #include "HeapWalker.h"
 
-#include <gtest/gtest.h>
 #include <ScopedDisableMalloc.h>
+#include <gtest/gtest.h>
 #include "Allocator.h"
 
 class HeapWalkerTest : public ::testing::Test {
@@ -172,20 +172,20 @@
   ASSERT_EQ(true, heap_walker.Leaked(leaked, 100, &num_leaks, &leaked_bytes));
 
   EXPECT_EQ(2U, num_leaks);
-  EXPECT_EQ(2*sizeof(uintptr_t), leaked_bytes);
+  EXPECT_EQ(2 * sizeof(uintptr_t), leaked_bytes);
   ASSERT_EQ(2U, leaked.size());
 }
 
 TEST_F(HeapWalkerTest, segv) {
   const size_t page_size = sysconf(_SC_PAGE_SIZE);
-  void* buffer1 = mmap(NULL, page_size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+  void* buffer1 = mmap(NULL, page_size, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
   ASSERT_NE(buffer1, nullptr);
   void* buffer2;
 
   buffer2 = &buffer1;
 
   HeapWalker heap_walker(heap_);
-  heap_walker.Allocation(buffer_begin(buffer1), buffer_begin(buffer1)+page_size);
+  heap_walker.Allocation(buffer_begin(buffer1), buffer_begin(buffer1) + page_size);
   heap_walker.Root(buffer_begin(buffer2), buffer_end(buffer2));
 
   ASSERT_EQ(true, heap_walker.DetectLeaks());
diff --git a/tests/HostMallocStub.cpp b/tests/HostMallocStub.cpp
index a7e3f07..0ef0487 100644
--- a/tests/HostMallocStub.cpp
+++ b/tests/HostMallocStub.cpp
@@ -16,8 +16,6 @@
 
 #include "bionic.h"
 
-void malloc_disable() {
-}
+void malloc_disable() {}
 
-void malloc_enable() {
-}
+void malloc_enable() {}
diff --git a/tests/LeakFolding_test.cpp b/tests/LeakFolding_test.cpp
index e85df5f..7ae7f76 100644
--- a/tests/LeakFolding_test.cpp
+++ b/tests/LeakFolding_test.cpp
@@ -14,11 +14,11 @@
  * limitations under the License.
  */
 
-#include "HeapWalker.h"
 #include "LeakFolding.h"
+#include "HeapWalker.h"
 
-#include <gtest/gtest.h>
 #include <ScopedDisableMalloc.h>
+#include <gtest/gtest.h>
 #include "Allocator.h"
 
 class LeakFoldingTest : public ::testing::Test {
@@ -84,7 +84,7 @@
   ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
 
   EXPECT_EQ(2U, num_leaks);
-  EXPECT_EQ(2*sizeof(uintptr_t), leaked_bytes);
+  EXPECT_EQ(2 * sizeof(uintptr_t), leaked_bytes);
   ASSERT_EQ(2U, leaked.size());
   EXPECT_EQ(0U, leaked[0].referenced_count);
   EXPECT_EQ(0U, leaked[0].referenced_size);
@@ -113,7 +113,7 @@
   ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
 
   EXPECT_EQ(2U, num_leaks);
-  EXPECT_EQ(2*sizeof(uintptr_t), leaked_bytes);
+  EXPECT_EQ(2 * sizeof(uintptr_t), leaked_bytes);
   ASSERT_EQ(1U, leaked.size());
   EXPECT_EQ(1U, leaked[0].referenced_count);
   EXPECT_EQ(sizeof(uintptr_t), leaked[0].referenced_size);
@@ -144,10 +144,10 @@
   ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
 
   EXPECT_EQ(3U, num_leaks);
-  EXPECT_EQ(3*sizeof(uintptr_t), leaked_bytes);
+  EXPECT_EQ(3 * sizeof(uintptr_t), leaked_bytes);
   ASSERT_EQ(1U, leaked.size());
   EXPECT_EQ(2U, leaked[0].referenced_count);
-  EXPECT_EQ(2*sizeof(uintptr_t), leaked[0].referenced_size);
+  EXPECT_EQ(2 * sizeof(uintptr_t), leaked[0].referenced_size);
 }
 
 TEST_F(LeakFoldingTest, dominator_cycle) {
@@ -175,13 +175,13 @@
   ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
 
   EXPECT_EQ(3U, num_leaks);
-  EXPECT_EQ(5*sizeof(uintptr_t), leaked_bytes);
+  EXPECT_EQ(5 * sizeof(uintptr_t), leaked_bytes);
   ASSERT_EQ(2U, leaked.size());
 
   EXPECT_EQ(2U, leaked[0].referenced_count);
-  EXPECT_EQ(3*sizeof(uintptr_t), leaked[0].referenced_size);
+  EXPECT_EQ(3 * sizeof(uintptr_t), leaked[0].referenced_size);
   EXPECT_EQ(2U, leaked[1].referenced_count);
-  EXPECT_EQ(3*sizeof(uintptr_t), leaked[1].referenced_size);
+  EXPECT_EQ(3 * sizeof(uintptr_t), leaked[1].referenced_size);
 }
 
 TEST_F(LeakFoldingTest, two_cycles) {
@@ -218,12 +218,12 @@
   ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
 
   EXPECT_EQ(6U, num_leaks);
-  EXPECT_EQ(6*sizeof(uintptr_t), leaked_bytes);
+  EXPECT_EQ(6 * sizeof(uintptr_t), leaked_bytes);
   ASSERT_EQ(2U, leaked.size());
   EXPECT_EQ(2U, leaked[0].referenced_count);
-  EXPECT_EQ(2*sizeof(uintptr_t), leaked[0].referenced_size);
+  EXPECT_EQ(2 * sizeof(uintptr_t), leaked[0].referenced_size);
   EXPECT_EQ(2U, leaked[1].referenced_count);
-  EXPECT_EQ(2*sizeof(uintptr_t), leaked[1].referenced_size);
+  EXPECT_EQ(2 * sizeof(uintptr_t), leaked[1].referenced_size);
 }
 
 TEST_F(LeakFoldingTest, two_dominator_cycles) {
@@ -254,7 +254,7 @@
   ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
 
   EXPECT_EQ(4U, num_leaks);
-  EXPECT_EQ(4*sizeof(uintptr_t), leaked_bytes);
+  EXPECT_EQ(4 * sizeof(uintptr_t), leaked_bytes);
   ASSERT_EQ(4U, leaked.size());
   EXPECT_EQ(1U, leaked[0].referenced_count);
   EXPECT_EQ(sizeof(uintptr_t), leaked[0].referenced_size);
@@ -272,13 +272,13 @@
 
   HeapWalker heap_walker(heap_);
 
-  for (size_t i = 0; i < n; i ++) {
+  for (size_t i = 0; i < n; i++) {
     ASSERT_TRUE(heap_walker.Allocation(reinterpret_cast<uintptr_t>(&buffer[i]),
-        reinterpret_cast<uintptr_t>(&buffer[i+1])));
+                                       reinterpret_cast<uintptr_t>(&buffer[i + 1])));
   }
 
   for (size_t i = 0; i < n - 1; i++) {
-    buffer[i] = &buffer[i+1];
+    buffer[i] = &buffer[i + 1];
   }
   buffer[n - 1] = &buffer[0];
 
@@ -306,15 +306,15 @@
   HeapWalker heap_walker(heap_);
 
   for (size_t i = 0; i < n - 1; i++) {
-    buffer[i] = &buffer[i+1];
+    buffer[i] = &buffer[i + 1];
   }
   buffer[n - 1] = &buffer[0];
 
   buffer1[0] = &buffer[0];
 
-  for (size_t i = 0; i < n; i ++) {
+  for (size_t i = 0; i < n; i++) {
     ASSERT_TRUE(heap_walker.Allocation(reinterpret_cast<uintptr_t>(&buffer[i]),
-        reinterpret_cast<uintptr_t>(&buffer[i+1])));
+                                       reinterpret_cast<uintptr_t>(&buffer[i + 1])));
   }
 
   ALLOCATION(heap_walker, buffer1);
diff --git a/tests/MemUnreachable_test.cpp b/tests/MemUnreachable_test.cpp
index 71da365..a231f4f 100644
--- a/tests/MemUnreachable_test.cpp
+++ b/tests/MemUnreachable_test.cpp
@@ -16,8 +16,8 @@
 
 #include <fcntl.h>
 #include <stdlib.h>
-#include <unistd.h>
 #include <sys/prctl.h>
+#include <unistd.h>
 
 #include <gtest/gtest.h>
 
@@ -25,23 +25,16 @@
 
 class HiddenPointer {
  public:
-  explicit HiddenPointer(size_t size = 256) {
-    Set(malloc(size));
-  }
-  ~HiddenPointer() {
-    Free();
-  }
-  void* Get() {
-    return reinterpret_cast<void*>(~ptr_);
-  }
+  explicit HiddenPointer(size_t size = 256) { Set(malloc(size)); }
+  ~HiddenPointer() { Free(); }
+  void* Get() { return reinterpret_cast<void*>(~ptr_); }
   void Free() {
     free(Get());
     Set(nullptr);
   }
+
  private:
-  void Set(void* ptr) {
-    ptr_ = ~reinterpret_cast<uintptr_t>(ptr);
-  }
+  void Set(void* ptr) { ptr_ = ~reinterpret_cast<uintptr_t>(ptr); }
   volatile uintptr_t ptr_;
 };
 
diff --git a/tests/ThreadCapture_test.cpp b/tests/ThreadCapture_test.cpp
index 44aabd7..5bcb79e 100644
--- a/tests/ThreadCapture_test.cpp
+++ b/tests/ThreadCapture_test.cpp
@@ -45,12 +45,10 @@
     WaitForThreads();
   }
 
-  virtual void TearDown() {
-    ASSERT_TRUE(heap.empty());
-  }
+  virtual void TearDown() { ASSERT_TRUE(heap.empty()); }
 
  protected:
-  template<class Function>
+  template <class Function>
   void StartThreads(unsigned int threads, Function&& func) {
     threads_.reserve(threads);
     tids_.reserve(threads);
@@ -68,14 +66,14 @@
 
         {
           std::unique_lock<std::mutex> lk(m_);
-          cv_stop_.wait(lk, [&] {return stop_;});
+          cv_stop_.wait(lk, [&] { return stop_; });
         }
       });
     }
 
     {
       std::unique_lock<std::mutex> lk(m_);
-      cv_start_.wait(lk, [&]{ return tids_.size() == threads; });
+      cv_start_.wait(lk, [&] { return tids_.size() == threads; });
     }
   }
 
@@ -93,9 +91,7 @@
     tids_.clear();
   }
 
-  std::vector<pid_t>& tids() {
-    return tids_;
-  }
+  std::vector<pid_t>& tids() { return tids_; }
 
   Heap heap;
 
@@ -143,7 +139,7 @@
 TEST_P(ThreadListTest, list_some) {
   const unsigned int threads = GetParam() - 1;
 
-  StartThreads(threads, [](){});
+  StartThreads(threads, []() {});
   std::vector<pid_t> expected_tids = tids();
   expected_tids.push_back(getpid());
 
@@ -176,10 +172,8 @@
  public:
   ThreadCaptureTest() {}
   ~ThreadCaptureTest() {}
-  void Fork(std::function<void()>&& child_init,
-      std::function<void()>&& child_cleanup,
-      std::function<void(pid_t)>&& parent) {
-
+  void Fork(std::function<void()>&& child_init, std::function<void()>&& child_cleanup,
+            std::function<void(pid_t)>&& parent) {
     ScopedPipe start_pipe;
     ScopedPipe stop_pipe;
 
@@ -211,39 +205,40 @@
 TEST_P(ThreadCaptureTest, capture_some) {
   const unsigned int threads = GetParam();
 
-  Fork([&](){
-    // child init
-    StartThreads(threads - 1, [](){});
-  },
-  [&](){
-    // child cleanup
-    StopThreads();
-  },
-  [&](pid_t child){
-    // parent
-    ASSERT_GT(child, 0);
+  Fork(
+      [&]() {
+        // child init
+        StartThreads(threads - 1, []() {});
+      },
+      [&]() {
+        // child cleanup
+        StopThreads();
+      },
+      [&](pid_t child) {
+        // parent
+        ASSERT_GT(child, 0);
 
-    {
-      ScopedDisableMallocTimeout disable_malloc;
+        {
+          ScopedDisableMallocTimeout disable_malloc;
 
-      ThreadCapture thread_capture(child, heap);
-      auto list_tids = allocator::vector<pid_t>(heap);
+          ThreadCapture thread_capture(child, heap);
+          auto list_tids = allocator::vector<pid_t>(heap);
 
-      ASSERT_TRUE(thread_capture.ListThreads(list_tids));
-      ASSERT_EQ(threads, list_tids.size());
+          ASSERT_TRUE(thread_capture.ListThreads(list_tids));
+          ASSERT_EQ(threads, list_tids.size());
 
-      ASSERT_TRUE(thread_capture.CaptureThreads());
+          ASSERT_TRUE(thread_capture.CaptureThreads());
 
-      auto thread_info = allocator::vector<ThreadInfo>(heap);
-      ASSERT_TRUE(thread_capture.CapturedThreadInfo(thread_info));
-      ASSERT_EQ(threads, thread_info.size());
-      ASSERT_TRUE(thread_capture.ReleaseThreads());
+          auto thread_info = allocator::vector<ThreadInfo>(heap);
+          ASSERT_TRUE(thread_capture.CapturedThreadInfo(thread_info));
+          ASSERT_EQ(threads, thread_info.size());
+          ASSERT_TRUE(thread_capture.ReleaseThreads());
 
-      if (!HasFailure()) {
-        ASSERT_FALSE(disable_malloc.timed_out());
-      }
-}
-  });
+          if (!HasFailure()) {
+            ASSERT_FALSE(disable_malloc.timed_out());
+          }
+        }
+      });
 }
 
 INSTANTIATE_TEST_CASE_P(ThreadCaptureTest, ThreadCaptureTest, ::testing::Values(1, 2, 10, 1024));
@@ -262,7 +257,7 @@
       ScopedDisableMallocTimeout disable_malloc;
 
       ThreadCapture thread_capture(ret, heap);
-      thread_capture.InjectTestFunc([&](pid_t tid){
+      thread_capture.InjectTestFunc([&](pid_t tid) {
         syscall(SYS_tgkill, ret, tid, SIGKILL);
         usleep(10000);
       });
@@ -288,62 +283,63 @@
   // For signal handler
   static ScopedPipe* g_pipe;
 
-  Fork([&](){
-    // child init
-    pipe.CloseReceiver();
+  Fork(
+      [&]() {
+        // child init
+        pipe.CloseReceiver();
 
-    g_pipe = &pipe;
+        g_pipe = &pipe;
 
-    struct sigaction act{};
-    act.sa_handler = [](int){
-      char buf = '+';
-      write(g_pipe->Sender(), &buf, 1);
-      g_pipe->CloseSender();
-    };
-    sigaction(sig, &act, NULL);
-    sigset_t set;
-    sigemptyset(&set);
-    sigaddset(&set, sig);
-    pthread_sigmask(SIG_UNBLOCK, &set, NULL);
-  },
-  [&](){
-    // child cleanup
-    g_pipe = nullptr;
-    pipe.Close();
-  },
-  [&](pid_t child){
-    // parent
-    ASSERT_GT(child, 0);
-    pipe.CloseSender();
+        struct sigaction act {};
+        act.sa_handler = [](int) {
+          char buf = '+';
+          write(g_pipe->Sender(), &buf, 1);
+          g_pipe->CloseSender();
+        };
+        sigaction(sig, &act, NULL);
+        sigset_t set;
+        sigemptyset(&set);
+        sigaddset(&set, sig);
+        pthread_sigmask(SIG_UNBLOCK, &set, NULL);
+      },
+      [&]() {
+        // child cleanup
+        g_pipe = nullptr;
+        pipe.Close();
+      },
+      [&](pid_t child) {
+        // parent
+        ASSERT_GT(child, 0);
+        pipe.CloseSender();
 
-    {
-      ScopedDisableMallocTimeout disable_malloc;
+        {
+          ScopedDisableMallocTimeout disable_malloc;
 
-      ThreadCapture thread_capture(child, heap);
-      thread_capture.InjectTestFunc([&](pid_t tid){
-        syscall(SYS_tgkill, child, tid, sig);
-        usleep(10000);
+          ThreadCapture thread_capture(child, heap);
+          thread_capture.InjectTestFunc([&](pid_t tid) {
+            syscall(SYS_tgkill, child, tid, sig);
+            usleep(10000);
+          });
+          auto list_tids = allocator::vector<pid_t>(heap);
+
+          ASSERT_TRUE(thread_capture.ListThreads(list_tids));
+          ASSERT_EQ(1U, list_tids.size());
+
+          ASSERT_TRUE(thread_capture.CaptureThreads());
+
+          auto thread_info = allocator::vector<ThreadInfo>(heap);
+          ASSERT_TRUE(thread_capture.CapturedThreadInfo(thread_info));
+          ASSERT_EQ(1U, thread_info.size());
+          ASSERT_TRUE(thread_capture.ReleaseThreads());
+
+          usleep(100000);
+          char buf;
+          ASSERT_EQ(1, TEMP_FAILURE_RETRY(read(pipe.Receiver(), &buf, 1)));
+          ASSERT_EQ(buf, '+');
+
+          if (!HasFailure()) {
+            ASSERT_FALSE(disable_malloc.timed_out());
+          }
+        }
       });
-      auto list_tids = allocator::vector<pid_t>(heap);
-
-      ASSERT_TRUE(thread_capture.ListThreads(list_tids));
-      ASSERT_EQ(1U, list_tids.size());
-
-      ASSERT_TRUE(thread_capture.CaptureThreads());
-
-      auto thread_info = allocator::vector<ThreadInfo>(heap);
-      ASSERT_TRUE(thread_capture.CapturedThreadInfo(thread_info));
-      ASSERT_EQ(1U, thread_info.size());
-      ASSERT_TRUE(thread_capture.ReleaseThreads());
-
-      usleep(100000);
-      char buf;
-      ASSERT_EQ(1, TEMP_FAILURE_RETRY(read(pipe.Receiver(), &buf, 1)));
-      ASSERT_EQ(buf, '+');
-
-      if (!HasFailure()) {
-        ASSERT_FALSE(disable_malloc.timed_out());
-      }
-    }
-  });
 }