test: check unbounded memory leak in pthread detach

In pthread_leak.detach test, compare the memory usage after two same
scenarios - all children threads have exited. Thus, test emulated by
NativeBridge, which may reserve a memory pool, will pass if it's not
unbounded leak.

Bug: https://issuetracker.google.com/37920774
Test: CtsBionicTestCases

(cherry picked from commit cf17b487ef60aa00ff9b6873cfed50ebde7864c4)

Change-Id: Ib47b26975489b397783e29d65f67fe775b060589
diff --git a/tests/leak_test.cpp b/tests/leak_test.cpp
index 9ddb2ff..a356640 100644
--- a/tests/leak_test.cpp
+++ b/tests/leak_test.cpp
@@ -49,28 +49,61 @@
   return result;
 }
 
-#define LEAK_TEST(test_case_name, test_name)                                                 \
-  static void __leak_test__##test_case_name##__##test_name();                                \
-  TEST(test_case_name, test_name) {                                                          \
-    auto previous_size = GetMappingSize();                                                   \
-    __leak_test__##test_case_name##__##test_name();                                          \
-    auto current_size = GetMappingSize();                                                    \
-    if (current_size > previous_size) {                                                      \
-      FAIL() << "increase in process map size: " << previous_size << " -> " << current_size; \
-    }                                                                                        \
-  }                                                                                          \
-  static void __leak_test__##test_case_name##__##test_name()
-
-LEAK_TEST(leak, smoke) {
-  // Do nothing.
+static void WaitUntilAllExited(pid_t* pids, size_t pid_count) {
+  // Wait until all children have exited.
+  bool alive = true;
+  while (alive) {
+    alive = false;
+    for (size_t i = 0; i < pid_count; ++i) {
+      if (pids[i] != 0) {
+        if (kill(pids[i], 0) == 0) {
+          alive = true;
+        } else {
+          EXPECT_EQ(errno, ESRCH);
+          pids[i] = 0;  // Skip in next loop.
+        }
+      }
+    }
+  }
 }
 
-LEAK_TEST(leak, xfail) {
-  UNUSED(mmap(nullptr, PAGE_SIZE, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
+class LeakChecker {
+ public:
+  LeakChecker() {
+    Reset();
+  }
+
+  ~LeakChecker() {
+    Check();
+  }
+
+  void Reset() {
+    previous_size_ = GetMappingSize();
+  }
+
+  void DumpTo(std::ostream& os) const {
+    os << previous_size_;
+  }
+
+ private:
+  size_t previous_size_;
+
+  void Check() {
+    auto current_size = GetMappingSize();
+    if (current_size > previous_size_) {
+      FAIL() << "increase in process map size: " << previous_size_ << " -> " << current_size;
+    }
+  }
+};
+
+std::ostream& operator<<(std::ostream& os, const LeakChecker& lc) {
+  lc.DumpTo(os);
+  return os;
 }
 
 // http://b/36045112
-LEAK_TEST(pthread_leak, join) {
+TEST(pthread_leak, join) {
+  LeakChecker lc;
   for (int i = 0; i < 100; ++i) {
     pthread_t thread;
     ASSERT_EQ(0, pthread_create(&thread, nullptr, [](void*) -> void* { return nullptr; }, nullptr));
@@ -79,22 +112,42 @@
 }
 
 // http://b/36045112
-LEAK_TEST(pthread_leak, detach) {
-  pthread_barrier_t barrier;
-  constexpr int thread_count = 100;
-  ASSERT_EQ(0, pthread_barrier_init(&barrier, nullptr, thread_count + 1));
-  for (int i = 0; i < thread_count; ++i) {
-    pthread_t thread;
-    const auto thread_function = +[](void* barrier) -> void* {
-      pthread_barrier_wait(static_cast<pthread_barrier_t*>(barrier));
-      return nullptr;
-    };
-    ASSERT_EQ(0, pthread_create(&thread, nullptr, thread_function, &barrier));
-    ASSERT_EQ(0, pthread_detach(thread));
+TEST(pthread_leak, detach) {
+  LeakChecker lc;
+
+  for (size_t pass = 0; pass < 2; ++pass) {
+    pthread_barrier_t barrier;
+    constexpr int thread_count = 100;
+    ASSERT_EQ(pthread_barrier_init(&barrier, nullptr, thread_count + 1), 0);
+
+    // Start child threads.
+    struct thread_data { pthread_barrier_t* barrier; pid_t* tid; };
+    pid_t tids[thread_count];
+    for (int i = 0; i < thread_count; ++i) {
+      thread_data* td = new thread_data{&barrier, &tids[i]};
+      const auto thread_function = +[](void* ptr) -> void* {
+        thread_data* data = static_cast<thread_data*>(ptr);
+        *data->tid = gettid();
+        pthread_barrier_wait(data->barrier);
+        // Doing this delete allocates new VMAs for jemalloc bookkeeping,
+        // but the two-pass nature of this test means we can check that
+        // it's a pool rather than an unbounded leak.
+        delete data;
+        return nullptr;
+      };
+      pthread_t thread;
+      ASSERT_EQ(0, pthread_create(&thread, nullptr, thread_function, td));
+      ASSERT_EQ(0, pthread_detach(thread));
+    }
+
+    pthread_barrier_wait(&barrier);
+    ASSERT_EQ(pthread_barrier_destroy(&barrier), 0);
+
+    WaitUntilAllExited(tids, arraysize(tids));
+
+    // houdini keeps a thread pool, so we ignore the first pass while the
+    // pool fills, but then on the second pass require that the "pool" isn't
+    // actually an unbounded leak. https://issuetracker.google.com/37920774.
+    if (pass == 0) lc.Reset();
   }
-
-  pthread_barrier_wait(&barrier);
-
-  // Give the threads some time to exit.
-  std::this_thread::sleep_for(100ms);
 }