Revert "Uprev the library to r462023 from Chromium"

This reverts commit bf8c17f71511c1e90cd8cccfe71f0852c566bd3b.

Reason for revert: https://buganizer.corp.google.com/issues/70858501

Change-Id: Iedb1193d46ea2211f8b6fdace41902ad8df6d754
(cherry picked from commit 70cd4fac31a9b0865dab6574540f70cc103337dc)
diff --git a/Android.bp b/Android.bp
index 3aba3c1..fdbcd93 100644
--- a/Android.bp
+++ b/Android.bp
@@ -239,7 +239,6 @@
     "base/trace_event/memory_allocator_dump.cc",
     "base/trace_event/memory_allocator_dump_guid.cc",
     "base/trace_event/memory_dump_manager.cc",
-    "base/trace_event/memory_dump_provider_info.cc",
     "base/trace_event/memory_dump_request_args.cc",
     "base/trace_event/memory_dump_scheduler.cc",
     "base/trace_event/memory_dump_session_state.cc",
@@ -250,7 +249,6 @@
     "base/trace_event/process_memory_totals.cc",
     "base/trace_event/trace_buffer.cc",
     "base/trace_event/trace_config.cc",
-    "base/trace_event/trace_config_category_filter.cc",
     "base/trace_event/trace_event_argument.cc",
     "base/trace_event/trace_event_filter.cc",
     "base/trace_event/trace_event_impl.cc",
diff --git a/base/BUILD.gn b/base/BUILD.gn
index b4a5c47..f84856d 100644
--- a/base/BUILD.gn
+++ b/base/BUILD.gn
@@ -276,9 +276,7 @@
     "command_line.h",
     "compiler_specific.h",
     "containers/adapters.h",
-    "containers/flat_map.h",
     "containers/flat_set.h",
-    "containers/flat_tree.h",
     "containers/hash_tables.h",
     "containers/linked_list.h",
     "containers/mru_cache.h",
@@ -474,7 +472,6 @@
     "mac/scoped_aedesc.h",
     "mac/scoped_authorizationref.h",
     "mac/scoped_block.h",
-    "mac/scoped_cffiledescriptorref.h",
     "mac/scoped_cftyperef.h",
     "mac/scoped_dispatch_object.h",
     "mac/scoped_ionotificationportref.h",
@@ -857,7 +854,6 @@
     "task_scheduler/scheduler_single_thread_task_runner_manager.h",
     "task_scheduler/scheduler_worker.cc",
     "task_scheduler/scheduler_worker.h",
-    "task_scheduler/scheduler_worker_params.h",
     "task_scheduler/scheduler_worker_pool.h",
     "task_scheduler/scheduler_worker_pool_impl.cc",
     "task_scheduler/scheduler_worker_pool_impl.h",
@@ -997,8 +993,6 @@
     "trace_event/memory_dump_manager.cc",
     "trace_event/memory_dump_manager.h",
     "trace_event/memory_dump_provider.h",
-    "trace_event/memory_dump_provider_info.cc",
-    "trace_event/memory_dump_provider_info.h",
     "trace_event/memory_dump_request_args.cc",
     "trace_event/memory_dump_request_args.h",
     "trace_event/memory_dump_scheduler.cc",
@@ -1007,8 +1001,6 @@
     "trace_event/memory_dump_session_state.h",
     "trace_event/memory_infra_background_whitelist.cc",
     "trace_event/memory_infra_background_whitelist.h",
-    "trace_event/memory_peak_detector.cc",
-    "trace_event/memory_peak_detector.h",
     "trace_event/memory_usage_estimator.cc",
     "trace_event/memory_usage_estimator.h",
     "trace_event/process_memory_dump.cc",
@@ -1022,8 +1014,6 @@
     "trace_event/trace_category.h",
     "trace_event/trace_config.cc",
     "trace_event/trace_config.h",
-    "trace_event/trace_config_category_filter.cc",
-    "trace_event/trace_config_category_filter.h",
     "trace_event/trace_event.h",
     "trace_event/trace_event_android.cc",
     "trace_event/trace_event_argument.cc",
@@ -1060,7 +1050,6 @@
     "version.h",
     "vlog.cc",
     "vlog.h",
-    "win/current_module.h",
     "win/enum_variant.cc",
     "win/enum_variant.h",
     "win/event_trace_consumer.h",
@@ -1205,7 +1194,6 @@
       "process/internal_linux.cc",
       "process/memory_linux.cc",
       "process/process_handle_linux.cc",
-      "process/process_info_linux.cc",
       "process/process_iterator_linux.cc",
       "process/process_metrics_linux.cc",
       "sys_info_linux.cc",
@@ -1712,7 +1700,6 @@
     "i18n/time_formatting.h",
     "i18n/timezone.cc",
     "i18n/timezone.h",
-    "i18n/unicodestring.h",
     "i18n/utf8_validator_tables.cc",
     "i18n/utf8_validator_tables.h",
   ]
@@ -1906,7 +1893,6 @@
 
 test("base_unittests") {
   sources = [
-    "allocator/allocator_interception_mac_unittest.mm",
     "allocator/malloc_zone_functions_mac_unittest.cc",
     "allocator/tcmalloc_unittest.cc",
     "android/application_status_listener_unittest.cc",
@@ -1936,10 +1922,7 @@
     "cancelable_callback_unittest.cc",
     "command_line_unittest.cc",
     "containers/adapters_unittest.cc",
-    "containers/container_test_utils.h",
-    "containers/flat_map_unittest.cc",
     "containers/flat_set_unittest.cc",
-    "containers/flat_tree_unittest.cc",
     "containers/hash_tables_unittest.cc",
     "containers/linked_list_unittest.cc",
     "containers/mru_cache_unittest.cc",
@@ -1989,7 +1972,6 @@
     "i18n/time_formatting_unittest.cc",
     "i18n/timezone_unittest.cc",
     "id_map_unittest.cc",
-    "ios/crb_protocol_observers_unittest.mm",
     "ios/device_util_unittest.mm",
     "ios/weak_nsobject_unittest.mm",
     "json/json_parser_unittest.cc",
@@ -2067,7 +2049,6 @@
     "process/memory_unittest.cc",
     "process/memory_unittest_mac.h",
     "process/memory_unittest_mac.mm",
-    "process/process_info_unittest.cc",
     "process/process_metrics_unittest.cc",
     "process/process_metrics_unittest_ios.cc",
     "process/process_unittest.cc",
@@ -2173,8 +2154,6 @@
     "trace_event/java_heap_dump_provider_android_unittest.cc",
     "trace_event/memory_allocator_dump_unittest.cc",
     "trace_event/memory_dump_manager_unittest.cc",
-    "trace_event/memory_dump_scheduler_unittest.cc",
-    "trace_event/memory_peak_detector_unittest.cc",
     "trace_event/memory_usage_estimator_unittest.cc",
     "trace_event/process_memory_dump_unittest.cc",
     "trace_event/trace_category_unittest.cc",
@@ -2402,7 +2381,6 @@
       "bind_unittest.nc",
       "callback_list_unittest.nc",
       "callback_unittest.nc",
-      "memory/ref_counted_unittest.nc",
       "memory/weak_ptr_unittest.nc",
       "metrics/histogram_unittest.nc",
     ]
@@ -2468,7 +2446,6 @@
     srcjar_deps = [
       ":base_android_java_enums_srcjar",
       ":base_build_config_gen",
-      ":base_java_aidl",
       ":base_native_libraries_gen",
     ]
 
@@ -2547,8 +2524,6 @@
       "android/java/src/org/chromium/base/metrics/RecordUserAction.java",
       "android/java/src/org/chromium/base/metrics/StatisticsRecorderAndroid.java",
       "android/java/src/org/chromium/base/multidex/ChromiumMultiDexInstaller.java",
-      "android/java/src/org/chromium/base/process_launcher/ChildProcessCreationParams.java",
-      "android/java/src/org/chromium/base/process_launcher/FileDescriptorInfo.java",
     ]
 
     # New versions of BuildConfig.java and NativeLibraries.java
@@ -2560,13 +2535,6 @@
     ]
   }
 
-  android_aidl("base_java_aidl") {
-    import_include = [ "android/java/src" ]
-    sources = [
-      "android/java/src/org/chromium/base/process_launcher/IChildProcessService.aidl",
-    ]
-  }
-
   android_library("base_javatests") {
     testonly = true
     deps = [
@@ -2693,7 +2661,6 @@
     ]
     java_files = [
       "test/android/java/src/org/chromium/base/ContentUriTestUtils.java",
-      "test/android/java/src/org/chromium/base/JavaHandlerThreadTest.java",
       "test/android/java/src/org/chromium/base/TestSystemMessageHandler.java",
     ]
   }
diff --git a/base/allocator/BUILD.gn b/base/allocator/BUILD.gn
index ac53481..8cdb061 100644
--- a/base/allocator/BUILD.gn
+++ b/base/allocator/BUILD.gn
@@ -65,10 +65,12 @@
       # tcmalloc contains some unused local template specializations.
       "-Wno-unused-function",
 
-      # tcmalloc uses COMPILE_ASSERT without static_assert but with typedefs.
+      # tcmalloc uses COMPILE_ASSERT without static_assert but with
+      # typedefs.
       "-Wno-unused-local-typedefs",
 
-      # for magic2_ in debugallocation.cc (only built in Debug builds) typedefs.
+      # for magic2_ in debugallocation.cc (only built in Debug builds)
+      # typedefs.
       "-Wno-unused-private-field",
     ]
   } else {
@@ -164,15 +166,12 @@
       "$tcmalloc_dir/src/heap-profile-table.cc",
       "$tcmalloc_dir/src/heap-profile-table.h",
       "$tcmalloc_dir/src/heap-profiler.cc",
-      "$tcmalloc_dir/src/heap-profiler.h",
       "$tcmalloc_dir/src/internal_logging.cc",
       "$tcmalloc_dir/src/internal_logging.h",
       "$tcmalloc_dir/src/linked_list.h",
       "$tcmalloc_dir/src/malloc_extension.cc",
-      "$tcmalloc_dir/src/malloc_extension.h",
       "$tcmalloc_dir/src/malloc_hook-inl.h",
       "$tcmalloc_dir/src/malloc_hook.cc",
-      "$tcmalloc_dir/src/malloc_hook.h",
       "$tcmalloc_dir/src/maybe_threads.cc",
       "$tcmalloc_dir/src/maybe_threads.h",
       "$tcmalloc_dir/src/memory_region_map.cc",
@@ -188,7 +187,6 @@
       "$tcmalloc_dir/src/stack_trace_table.cc",
       "$tcmalloc_dir/src/stack_trace_table.h",
       "$tcmalloc_dir/src/stacktrace.cc",
-      "$tcmalloc_dir/src/stacktrace.h",
       "$tcmalloc_dir/src/static_vars.cc",
       "$tcmalloc_dir/src/static_vars.h",
       "$tcmalloc_dir/src/symbolize.cc",
@@ -198,7 +196,6 @@
 
       # #included by debugallocation_shim.cc
       #"$tcmalloc_dir/src/tcmalloc.cc",
-      #"$tcmalloc_dir/src/tcmalloc.h",
       "$tcmalloc_dir/src/thread_cache.cc",
       "$tcmalloc_dir/src/thread_cache.h",
       "$tcmalloc_dir/src/windows/port.cc",
diff --git a/base/allocator/allocator_shim.cc b/base/allocator/allocator_shim.cc
index 4887142..fbdbdfc 100644
--- a/base/allocator/allocator_shim.cc
+++ b/base/allocator/allocator_shim.cc
@@ -23,8 +23,6 @@
 
 #if defined(OS_MACOSX)
 #include <malloc/malloc.h>
-
-#include "base/allocator/allocator_interception_mac.h"
 #endif
 
 // No calls to malloc / new in this file. They would would cause re-entrancy of
@@ -338,11 +336,9 @@
   // traversed the shim this will route them to the default malloc zone.
   InitializeDefaultDispatchToMacAllocator();
 
-  MallocZoneFunctions functions = MallocZoneFunctionsToReplaceDefault();
-
   // This replaces the default malloc zone, causing calls to malloc & friends
   // from the codebase to be routed to ShimMalloc() above.
-  base::allocator::ReplaceFunctionsForStoredZones(&functions);
+  OverrideMacSymbols();
 }
 }  // namespace allocator
 }  // namespace base
diff --git a/base/allocator/allocator_shim_internals.h b/base/allocator/allocator_shim_internals.h
index 0196f89..82624ee 100644
--- a/base/allocator/allocator_shim_internals.h
+++ b/base/allocator/allocator_shim_internals.h
@@ -18,26 +18,7 @@
 #endif
 
 // Shim layer symbols need to be ALWAYS exported, regardless of component build.
-//
-// If an exported symbol is linked into a DSO, it may be preempted by a
-// definition in the main executable. If this happens to an allocator symbol, it
-// will mean that the DSO will use the main executable's allocator. This is
-// normally relatively harmless -- regular allocations should all use the same
-// allocator, but if the DSO tries to hook the allocator it will not see any
-// allocations.
-//
-// However, if LLVM LTO is enabled, the compiler may inline the shim layer
-// symbols into callers. The end result is that allocator calls in DSOs may use
-// either the main executable's allocator or the DSO's allocator, depending on
-// whether the call was inlined. This is arguably a bug in LLVM caused by its
-// somewhat irregular handling of symbol interposition (see llvm.org/PR23501).
-// To work around the bug we use noinline to prevent the symbols from being
-// inlined.
-//
-// In the long run we probably want to avoid linking the allocator bits into
-// DSOs altogether. This will save a little space and stop giving DSOs the false
-// impression that they can hook the allocator.
-#define SHIM_ALWAYS_EXPORT __attribute__((visibility("default"), noinline))
+#define SHIM_ALWAYS_EXPORT __attribute__((visibility("default")))
 
 #endif  // __GNUC__
 
diff --git a/base/at_exit.cc b/base/at_exit.cc
index e0025ea..5dcc83c 100644
--- a/base/at_exit.cc
+++ b/base/at_exit.cc
@@ -81,10 +81,6 @@
     g_top_manager->processing_callbacks_ = true;
   }
 
-  // Relax the cross-thread access restriction to non-thread-safe RefCount.
-  // It's safe since all other threads should be terminated at this point.
-  ScopedAllowCrossThreadRefCountAccess allow_cross_thread_ref_count_access;
-
   while (!tasks.empty()) {
     base::Closure task = tasks.top();
     task.Run();
diff --git a/base/base.isolate b/base/base.isolate
new file mode 100644
index 0000000..079d07d
--- /dev/null
+++ b/base/base.isolate
@@ -0,0 +1,60 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+  'includes': [
+    # While the target 'base' doesn't depend on ../third_party/icu/icu.gyp
+    # itself, virtually all targets using it has to include icu. The only
+    # exception is the Windows sandbox (?).
+    '../third_party/icu/icu.isolate',
+    # Sanitizer-instrumented third-party libraries (if enabled).
+    '../third_party/instrumented_libraries/instrumented_libraries.isolate',
+    # MSVS runtime libraries.
+    '../build/config/win/msvs_dependencies.isolate',
+  ],
+  'conditions': [
+    ['use_custom_libcxx==1', {
+      'variables': {
+        'files': [
+          '<(PRODUCT_DIR)/lib/libc++.so',
+        ],
+      },
+    }],
+    ['OS=="mac" and asan==1', {
+      'variables': {
+        'files': [
+          '<(PRODUCT_DIR)/libclang_rt.asan_osx_dynamic.dylib',
+        ],
+      },
+    }],
+    ['OS=="win" and asan==1 and component=="shared_library"', {
+      'variables': {
+        'files': [
+          # We only need x.y.z/lib/windows/clang_rt.asan_dynamic-i386.dll,
+          # but since the version (x.y.z) changes, just grab the whole dir.
+          '../third_party/llvm-build/Release+Asserts/lib/clang/',
+        ],
+      },
+    }],
+    ['OS=="linux" and (asan==1 or lsan==1 or msan==1 or tsan==1)', {
+      'variables': {
+        'files': [
+          # For llvm-symbolizer.
+          '../third_party/llvm-build/Release+Asserts/lib/libstdc++.so.6',
+        ],
+      },
+    }],
+    ['asan==1 or lsan==1 or msan==1 or tsan==1', {
+      'variables': {
+        'files': [
+          '../tools/valgrind/asan/',
+          '../third_party/llvm-build/Release+Asserts/bin/llvm-symbolizer<(EXECUTABLE_SUFFIX)',
+        ],
+      },
+    }],
+    # Workaround for https://code.google.com/p/swarming/issues/detail?id=211
+    ['asan==0 or lsan==0 or msan==0 or tsan==0', {
+      'variables': {},
+    }],
+  ],
+}
diff --git a/base/bind_unittest.cc b/base/bind_unittest.cc
index 0de9294..a9ca9d2 100644
--- a/base/bind_unittest.cc
+++ b/base/bind_unittest.cc
@@ -1309,59 +1309,65 @@
   static_assert(std::is_constructible<
       RepeatingClosure, const RepeatingClosure&>::value,
       "RepeatingClosure should be copyable.");
-  static_assert(
-      std::is_assignable<RepeatingClosure, const RepeatingClosure&>::value,
+  static_assert(is_assignable<
+      RepeatingClosure, const RepeatingClosure&>::value,
       "RepeatingClosure should be copy-assignable.");
 
   // Move constructor and assignment of RepeatingCallback.
   static_assert(std::is_constructible<
       RepeatingClosure, RepeatingClosure&&>::value,
       "RepeatingClosure should be movable.");
-  static_assert(std::is_assignable<RepeatingClosure, RepeatingClosure&&>::value,
-                "RepeatingClosure should be move-assignable");
+  static_assert(is_assignable<
+      RepeatingClosure, RepeatingClosure&&>::value,
+      "RepeatingClosure should be move-assignable");
 
   // Conversions from OnceCallback to RepeatingCallback.
   static_assert(!std::is_constructible<
       RepeatingClosure, const OnceClosure&>::value,
       "OnceClosure should not be convertible to RepeatingClosure.");
-  static_assert(
-      !std::is_assignable<RepeatingClosure, const OnceClosure&>::value,
+  static_assert(!is_assignable<
+      RepeatingClosure, const OnceClosure&>::value,
       "OnceClosure should not be convertible to RepeatingClosure.");
 
   // Destructive conversions from OnceCallback to RepeatingCallback.
   static_assert(!std::is_constructible<
       RepeatingClosure, OnceClosure&&>::value,
       "OnceClosure should not be convertible to RepeatingClosure.");
-  static_assert(!std::is_assignable<RepeatingClosure, OnceClosure&&>::value,
-                "OnceClosure should not be convertible to RepeatingClosure.");
+  static_assert(!is_assignable<
+      RepeatingClosure, OnceClosure&&>::value,
+      "OnceClosure should not be convertible to RepeatingClosure.");
 
   // Copy constructor and assignment of OnceCallback.
   static_assert(!std::is_constructible<
       OnceClosure, const OnceClosure&>::value,
       "OnceClosure should not be copyable.");
-  static_assert(!std::is_assignable<OnceClosure, const OnceClosure&>::value,
-                "OnceClosure should not be copy-assignable");
+  static_assert(!is_assignable<
+      OnceClosure, const OnceClosure&>::value,
+      "OnceClosure should not be copy-assignable");
 
   // Move constructor and assignment of OnceCallback.
   static_assert(std::is_constructible<
       OnceClosure, OnceClosure&&>::value,
       "OnceClosure should be movable.");
-  static_assert(std::is_assignable<OnceClosure, OnceClosure&&>::value,
-                "OnceClosure should be move-assignable.");
+  static_assert(is_assignable<
+      OnceClosure, OnceClosure&&>::value,
+      "OnceClosure should be move-assignable.");
 
   // Conversions from RepeatingCallback to OnceCallback.
   static_assert(std::is_constructible<
       OnceClosure, const RepeatingClosure&>::value,
       "RepeatingClosure should be convertible to OnceClosure.");
-  static_assert(std::is_assignable<OnceClosure, const RepeatingClosure&>::value,
-                "RepeatingClosure should be convertible to OnceClosure.");
+  static_assert(is_assignable<
+      OnceClosure, const RepeatingClosure&>::value,
+      "RepeatingClosure should be convertible to OnceClosure.");
 
   // Destructive conversions from RepeatingCallback to OnceCallback.
   static_assert(std::is_constructible<
       OnceClosure, RepeatingClosure&&>::value,
       "RepeatingClosure should be convertible to OnceClosure.");
-  static_assert(std::is_assignable<OnceClosure, RepeatingClosure&&>::value,
-                "RepeatingClosure should be covretible to OnceClosure.");
+  static_assert(is_assignable<
+      OnceClosure, RepeatingClosure&&>::value,
+      "RepeatingClosure should be covretible to OnceClosure.");
 
   OnceClosure cb = BindOnce(&VoidPolymorphic<>::Run);
   std::move(cb).Run();
diff --git a/base/callback.h b/base/callback.h
index c91e1a8..40bd520 100644
--- a/base/callback.h
+++ b/base/callback.h
@@ -21,6 +21,71 @@
 
 namespace internal {
 
+template <typename CallbackType>
+struct IsOnceCallback : std::false_type {};
+
+template <typename Signature>
+struct IsOnceCallback<OnceCallback<Signature>> : std::true_type {};
+
+// RunMixin provides different variants of `Run()` function to `Callback<>`
+// based on the type of callback.
+template <typename CallbackType>
+class RunMixin;
+
+// Specialization for OnceCallback.
+template <typename R, typename... Args>
+class RunMixin<OnceCallback<R(Args...)>> {
+ private:
+  using CallbackType = OnceCallback<R(Args...)>;
+
+ public:
+  using PolymorphicInvoke = R(*)(internal::BindStateBase*, Args&&...);
+
+  R Run(Args... /* args */) const & {
+    // Note: even though this static_assert will trivially always fail, it
+    // cannot be simply replaced with static_assert(false, ...) because:
+    // - Per [dcl.dcl]/p4, a program is ill-formed if the constant-expression
+    //   argument does not evaluate to true.
+    // - Per [temp.res]/p8, if no valid specialization can be generated for a
+    //   template definition, and that template is not instantiated, the
+    //   template definition is ill-formed, no diagnostic required.
+    // These two clauses, taken together, would allow a conforming C++ compiler
+    // to immediately reject static_assert(false, ...), even inside an
+    // uninstantiated template.
+    static_assert(!IsOnceCallback<CallbackType>::value,
+                  "OnceCallback::Run() may only be invoked on a non-const "
+                  "rvalue, i.e. std::move(callback).Run().");
+  }
+
+  R Run(Args... args) && {
+    // Move the callback instance into a local variable before the invocation,
+    // that ensures the internal state is cleared after the invocation.
+    // It's not safe to touch |this| after the invocation, since running the
+    // bound function may destroy |this|.
+    CallbackType cb = static_cast<CallbackType&&>(*this);
+    PolymorphicInvoke f =
+        reinterpret_cast<PolymorphicInvoke>(cb.polymorphic_invoke());
+    return f(cb.bind_state_.get(), std::forward<Args>(args)...);
+  }
+};
+
+// Specialization for RepeatingCallback.
+template <typename R, typename... Args>
+class RunMixin<RepeatingCallback<R(Args...)>> {
+ private:
+  using CallbackType = RepeatingCallback<R(Args...)>;
+
+ public:
+  using PolymorphicInvoke = R(*)(internal::BindStateBase*, Args&&...);
+
+  R Run(Args... args) const {
+    const CallbackType& cb = static_cast<const CallbackType&>(*this);
+    PolymorphicInvoke f =
+        reinterpret_cast<PolymorphicInvoke>(cb.polymorphic_invoke());
+    return f(cb.bind_state_.get(), std::forward<Args>(args)...);
+  }
+};
+
 template <typename From, typename To>
 struct IsCallbackConvertible : std::false_type {};
 
@@ -35,14 +100,14 @@
           internal::CopyMode copy_mode,
           internal::RepeatMode repeat_mode>
 class Callback<R(Args...), copy_mode, repeat_mode>
-    : public internal::CallbackBase<copy_mode> {
+    : public internal::CallbackBase<copy_mode>,
+      public internal::RunMixin<Callback<R(Args...), copy_mode, repeat_mode>> {
  public:
   static_assert(repeat_mode != internal::RepeatMode::Once ||
                 copy_mode == internal::CopyMode::MoveOnly,
                 "OnceCallback must be MoveOnly.");
 
   using RunType = R(Args...);
-  using PolymorphicInvoke = R (*)(internal::BindStateBase*, Args&&...);
 
   Callback() : internal::CallbackBase<copy_mode>(nullptr) {}
 
@@ -70,26 +135,7 @@
     return this->EqualsInternal(other);
   }
 
-  R Run(Args... args) const & {
-    static_assert(repeat_mode == internal::RepeatMode::Repeating,
-                  "OnceCallback::Run() may only be invoked on a non-const "
-                  "rvalue, i.e. std::move(callback).Run().");
-
-    PolymorphicInvoke f =
-        reinterpret_cast<PolymorphicInvoke>(this->polymorphic_invoke());
-    return f(this->bind_state_.get(), std::forward<Args>(args)...);
-  }
-
-  R Run(Args... args) && {
-    // Move the callback instance into a local variable before the invocation,
-    // that ensures the internal state is cleared after the invocation.
-    // It's not safe to touch |this| after the invocation, since running the
-    // bound function may destroy |this|.
-    Callback cb = std::move(*this);
-    PolymorphicInvoke f =
-        reinterpret_cast<PolymorphicInvoke>(cb.polymorphic_invoke());
-    return f(cb.bind_state_.get(), std::forward<Args>(args)...);
-  }
+  friend class internal::RunMixin<Callback>;
 };
 
 }  // namespace base
diff --git a/base/callback_helpers.h b/base/callback_helpers.h
index 6e0aee8..ec3d6cb 100644
--- a/base/callback_helpers.h
+++ b/base/callback_helpers.h
@@ -8,8 +8,8 @@
 // generated).  Instead, consider adding methods here.
 //
 // ResetAndReturn(&cb) is like cb.Reset() but allows executing a callback (via a
-// move or copy) after the original callback is Reset().  This can be handy if
-// Run() reads/writes the variable holding the Callback.
+// copy) after the original callback is Reset().  This can be handy if Run()
+// reads/writes the variable holding the Callback.
 
 #ifndef BASE_CALLBACK_HELPERS_H_
 #define BASE_CALLBACK_HELPERS_H_
diff --git a/base/callback_internal.cc b/base/callback_internal.cc
index a760f06..4330e9c 100644
--- a/base/callback_internal.cc
+++ b/base/callback_internal.cc
@@ -17,10 +17,6 @@
 
 }  // namespace
 
-void BindStateBaseRefCountTraits::Destruct(const BindStateBase* bind_state) {
-  bind_state->destructor_(bind_state);
-}
-
 BindStateBase::BindStateBase(InvokeFuncStorage polymorphic_invoke,
                              void (*destructor)(const BindStateBase*))
     : BindStateBase(polymorphic_invoke, destructor, &ReturnFalse) {
@@ -30,9 +26,19 @@
                              void (*destructor)(const BindStateBase*),
                              bool (*is_cancelled)(const BindStateBase*))
     : polymorphic_invoke_(polymorphic_invoke),
+      ref_count_(0),
       destructor_(destructor),
       is_cancelled_(is_cancelled) {}
 
+void BindStateBase::AddRef() const {
+  AtomicRefCountInc(&ref_count_);
+}
+
+void BindStateBase::Release() const {
+  if (!AtomicRefCountDec(&ref_count_))
+    destructor_(this);
+}
+
 CallbackBase<CopyMode::MoveOnly>::CallbackBase(CallbackBase&& c) = default;
 
 CallbackBase<CopyMode::MoveOnly>&
@@ -74,9 +80,10 @@
   return bind_state_ == other.bind_state_;
 }
 
-CallbackBase<CopyMode::MoveOnly>::CallbackBase(BindStateBase* bind_state)
-    : bind_state_(bind_state ? AdoptRef(bind_state) : nullptr) {
-  DCHECK(!bind_state_.get() || bind_state_->HasOneRef());
+CallbackBase<CopyMode::MoveOnly>::CallbackBase(
+    BindStateBase* bind_state)
+    : bind_state_(bind_state) {
+  DCHECK(!bind_state_.get() || bind_state_->ref_count_ == 1);
 }
 
 CallbackBase<CopyMode::MoveOnly>::~CallbackBase() {}
diff --git a/base/callback_internal.h b/base/callback_internal.h
index 29b07c2..d6dcfeb 100644
--- a/base/callback_internal.h
+++ b/base/callback_internal.h
@@ -8,29 +8,17 @@
 #ifndef BASE_CALLBACK_INTERNAL_H_
 #define BASE_CALLBACK_INTERNAL_H_
 
+#include "base/atomic_ref_count.h"
 #include "base/base_export.h"
 #include "base/callback_forward.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
 
 namespace base {
-
-struct FakeBindState;
-
 namespace internal {
-
 template <CopyMode copy_mode>
 class CallbackBase;
 
-class BindStateBase;
-
-template <typename Functor, typename... BoundArgs>
-struct BindState;
-
-struct BindStateBaseRefCountTraits {
-  static void Destruct(const BindStateBase*);
-};
-
 // BindStateBase is used to provide an opaque handle that the Callback
 // class can use to represent a function object with bound arguments.  It
 // behaves as an existential type that is used by a corresponding
@@ -42,43 +30,38 @@
 // Creating a vtable for every BindState template instantiation results in a lot
 // of bloat. Its only task is to call the destructor which can be done with a
 // function pointer.
-class BASE_EXPORT BindStateBase
-    : public RefCountedThreadSafe<BindStateBase, BindStateBaseRefCountTraits> {
+class BASE_EXPORT BindStateBase {
  public:
-  REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE();
-
   using InvokeFuncStorage = void(*)();
 
- private:
+ protected:
   BindStateBase(InvokeFuncStorage polymorphic_invoke,
                 void (*destructor)(const BindStateBase*));
   BindStateBase(InvokeFuncStorage polymorphic_invoke,
                 void (*destructor)(const BindStateBase*),
                 bool (*is_cancelled)(const BindStateBase*));
-
   ~BindStateBase() = default;
 
-  friend struct BindStateBaseRefCountTraits;
-  friend class RefCountedThreadSafe<BindStateBase, BindStateBaseRefCountTraits>;
-
+ private:
+  friend class scoped_refptr<BindStateBase>;
   template <CopyMode copy_mode>
   friend class CallbackBase;
 
-  // Whitelist subclasses that access the destructor of BindStateBase.
-  template <typename Functor, typename... BoundArgs>
-  friend struct BindState;
-  friend struct ::base::FakeBindState;
-
   bool IsCancelled() const {
     return is_cancelled_(this);
   }
 
+  void AddRef() const;
+  void Release() const;
+
   // In C++, it is safe to cast function pointers to function pointers of
   // another type. It is not okay to use void*. We create a InvokeFuncStorage
   // that that can store our function pointer, and then cast it back to
   // the original type on usage.
   InvokeFuncStorage polymorphic_invoke_;
 
+  mutable AtomicRefCount ref_count_;
+
   // Pointer to a function that will properly destroy |this|.
   void (*destructor_)(const BindStateBase*);
   bool (*is_cancelled_)(const BindStateBase*);
@@ -103,7 +86,7 @@
   CallbackBase& operator=(CallbackBase<CopyMode::Copyable>&& c);
 
   // Returns true if Callback is null (doesn't refer to anything).
-  bool is_null() const { return !bind_state_; }
+  bool is_null() const { return bind_state_.get() == NULL; }
   explicit operator bool() const { return !is_null(); }
 
   // Returns true if the callback invocation will be nop due to an cancellation.
diff --git a/base/callback_unittest.cc b/base/callback_unittest.cc
index f76adbc..a417369 100644
--- a/base/callback_unittest.cc
+++ b/base/callback_unittest.cc
@@ -21,13 +21,24 @@
 // based on a type we declared in the anonymous namespace above to remove any
 // chance of colliding with another instantiation and breaking the
 // one-definition-rule.
-struct FakeBindState : internal::BindStateBase {
-  FakeBindState() : BindStateBase(&NopInvokeFunc, &Destroy, &IsCancelled) {}
-
+struct FakeBindState1 : internal::BindStateBase {
+  FakeBindState1() : BindStateBase(&NopInvokeFunc, &Destroy, &IsCancelled) {}
  private:
-  ~FakeBindState() {}
+  ~FakeBindState1() {}
   static void Destroy(const internal::BindStateBase* self) {
-    delete static_cast<const FakeBindState*>(self);
+    delete static_cast<const FakeBindState1*>(self);
+  }
+  static bool IsCancelled(const internal::BindStateBase*) {
+    return false;
+  }
+};
+
+struct FakeBindState2 : internal::BindStateBase {
+  FakeBindState2() : BindStateBase(&NopInvokeFunc, &Destroy, &IsCancelled) {}
+ private:
+  ~FakeBindState2() {}
+  static void Destroy(const internal::BindStateBase* self) {
+    delete static_cast<const FakeBindState2*>(self);
   }
   static bool IsCancelled(const internal::BindStateBase*) {
     return false;
@@ -39,7 +50,9 @@
 class CallbackTest : public ::testing::Test {
  public:
   CallbackTest()
-      : callback_a_(new FakeBindState()), callback_b_(new FakeBindState()) {}
+      : callback_a_(new FakeBindState1()),
+        callback_b_(new FakeBindState2()) {
+  }
 
   ~CallbackTest() override {}
 
@@ -81,7 +94,7 @@
   EXPECT_FALSE(callback_b_.Equals(callback_a_));
 
   // We should compare based on instance, not type.
-  Callback<void()> callback_c(new FakeBindState());
+  Callback<void()> callback_c(new FakeBindState1());
   Callback<void()> callback_a2 = callback_a_;
   EXPECT_TRUE(callback_a_.Equals(callback_a2));
   EXPECT_FALSE(callback_a_.Equals(callback_c));
@@ -135,23 +148,6 @@
   ASSERT_TRUE(tfr.cb_already_run);
 }
 
-TEST_F(CallbackTest, NullAfterMoveRun) {
-  Closure cb = Bind([] {});
-  ASSERT_TRUE(cb);
-  std::move(cb).Run();
-  ASSERT_FALSE(cb);
-
-  const Closure cb2 = Bind([] {});
-  ASSERT_TRUE(cb2);
-  std::move(cb2).Run();
-  ASSERT_TRUE(cb2);
-
-  OnceClosure cb3 = BindOnce([] {});
-  ASSERT_TRUE(cb3);
-  std::move(cb3).Run();
-  ASSERT_FALSE(cb3);
-}
-
 class CallbackOwner : public base::RefCounted<CallbackOwner> {
  public:
   explicit CallbackOwner(bool* deleted) {
diff --git a/base/command_line.cc b/base/command_line.cc
index 137f966..3033fcf 100644
--- a/base/command_line.cc
+++ b/base/command_line.cc
@@ -11,7 +11,6 @@
 #include "base/logging.h"
 #include "base/macros.h"
 #include "base/strings/string_split.h"
-#include "base/strings/string_tokenizer.h"
 #include "base/strings/string_util.h"
 #include "base/strings/utf_string_conversions.h"
 #include "build/build_config.h"
@@ -412,15 +411,11 @@
 void CommandLine::PrependWrapper(const CommandLine::StringType& wrapper) {
   if (wrapper.empty())
     return;
-  // Split the wrapper command based on whitespace (with quoting).
-  using CommandLineTokenizer =
-      StringTokenizerT<StringType, StringType::const_iterator>;
-  CommandLineTokenizer tokenizer(wrapper, FILE_PATH_LITERAL(" "));
-  tokenizer.set_quote_chars(FILE_PATH_LITERAL("'\""));
-  std::vector<StringType> wrapper_argv;
-  while (tokenizer.GetNext())
-    wrapper_argv.emplace_back(tokenizer.token());
-
+  // The wrapper may have embedded arguments (like "gdb --args"). In this case,
+  // we don't pretend to do anything fancy, we just split on spaces.
+  StringVector wrapper_argv = SplitString(
+      wrapper, FilePath::StringType(1, ' '), base::TRIM_WHITESPACE,
+      base::SPLIT_WANT_ALL);
   // Prepend the wrapper and update the switches/arguments |begin_args_|.
   argv_.insert(argv_.begin(), wrapper_argv.begin(), wrapper_argv.end());
   begin_args_ += wrapper_argv.size();
diff --git a/base/command_line_unittest.cc b/base/command_line_unittest.cc
index 79c9aec..bcfc6c5 100644
--- a/base/command_line_unittest.cc
+++ b/base/command_line_unittest.cc
@@ -406,35 +406,4 @@
     EXPECT_TRUE(assigned.HasSwitch(pair.first));
 }
 
-TEST(CommandLineTest, PrependSimpleWrapper) {
-  CommandLine cl(FilePath(FILE_PATH_LITERAL("Program")));
-  cl.AppendSwitch("a");
-  cl.AppendSwitch("b");
-  cl.PrependWrapper(FILE_PATH_LITERAL("wrapper --foo --bar"));
-
-  EXPECT_EQ(6u, cl.argv().size());
-  EXPECT_EQ(FILE_PATH_LITERAL("wrapper"), cl.argv()[0]);
-  EXPECT_EQ(FILE_PATH_LITERAL("--foo"), cl.argv()[1]);
-  EXPECT_EQ(FILE_PATH_LITERAL("--bar"), cl.argv()[2]);
-  EXPECT_EQ(FILE_PATH_LITERAL("Program"), cl.argv()[3]);
-  EXPECT_EQ(FILE_PATH_LITERAL("--a"), cl.argv()[4]);
-  EXPECT_EQ(FILE_PATH_LITERAL("--b"), cl.argv()[5]);
-}
-
-TEST(CommandLineTest, PrependComplexWrapper) {
-  CommandLine cl(FilePath(FILE_PATH_LITERAL("Program")));
-  cl.AppendSwitch("a");
-  cl.AppendSwitch("b");
-  cl.PrependWrapper(
-      FILE_PATH_LITERAL("wrapper --foo='hello world' --bar=\"let's go\""));
-
-  EXPECT_EQ(6u, cl.argv().size());
-  EXPECT_EQ(FILE_PATH_LITERAL("wrapper"), cl.argv()[0]);
-  EXPECT_EQ(FILE_PATH_LITERAL("--foo='hello world'"), cl.argv()[1]);
-  EXPECT_EQ(FILE_PATH_LITERAL("--bar=\"let's go\""), cl.argv()[2]);
-  EXPECT_EQ(FILE_PATH_LITERAL("Program"), cl.argv()[3]);
-  EXPECT_EQ(FILE_PATH_LITERAL("--a"), cl.argv()[4]);
-  EXPECT_EQ(FILE_PATH_LITERAL("--b"), cl.argv()[5]);
-}
-
 } // namespace base
diff --git a/base/containers/container_test_utils.h b/base/containers/container_test_utils.h
deleted file mode 100644
index e36b9f7..0000000
--- a/base/containers/container_test_utils.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_CONTAINERS_CONTAINER_TEST_UTILS_H_
-#define BASE_CONTAINERS_CONTAINER_TEST_UTILS_H_
-
-// This file contains some helper classes for testing conainer behavior.
-
-#include "base/macros.h"
-
-namespace base {
-
-// A move-only class that holds an integer.
-class MoveOnlyInt {
- public:
-  explicit MoveOnlyInt(int data = 1) : data_(data) {}
-  MoveOnlyInt(MoveOnlyInt&& other) : data_(other.data_) { other.data_ = 0; }
-  MoveOnlyInt& operator=(MoveOnlyInt&& other) {
-    data_ = other.data_;
-    other.data_ = 0;
-    return *this;
-  }
-
-  friend bool operator<(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) {
-    return lhs.data_ < rhs.data_;
-  }
-
-  int data() const { return data_; }
-
- private:
-  int data_;
-
-  DISALLOW_COPY_AND_ASSIGN(MoveOnlyInt);
-};
-
-}  // namespace base
-
-#endif  // BASE_CONTAINERS_CONTAINER_TEST_UTILS_H_
diff --git a/base/containers/mru_cache.h b/base/containers/mru_cache.h
index 7c684a9..4005489 100644
--- a/base/containers/mru_cache.h
+++ b/base/containers/mru_cache.h
@@ -105,6 +105,8 @@
   // Retrieves the contents of the given key, or end() if not found. This method
   // has the side effect of moving the requested item to the front of the
   // recency list.
+  //
+  // TODO(brettw) We may want a const version of this function in the future.
   iterator Get(const KeyType& key) {
     typename KeyIndex::iterator index_iter = index_.find(key);
     if (index_iter == index_.end())
diff --git a/base/critical_closure.h b/base/critical_closure.h
index 94c618d..1b10cde 100644
--- a/base/critical_closure.h
+++ b/base/critical_closure.h
@@ -5,8 +5,6 @@
 #ifndef BASE_CRITICAL_CLOSURE_H_
 #define BASE_CRITICAL_CLOSURE_H_
 
-#include <utility>
-
 #include "base/callback.h"
 #include "base/macros.h"
 #include "build/build_config.h"
@@ -29,13 +27,13 @@
 // |ios::ScopedCriticalAction|.
 class CriticalClosure {
  public:
-  explicit CriticalClosure(OnceClosure closure);
+  explicit CriticalClosure(const Closure& closure);
   ~CriticalClosure();
   void Run();
 
  private:
   ios::ScopedCriticalAction critical_action_;
-  OnceClosure closure_;
+  Closure closure_;
 
   DISALLOW_COPY_AND_ASSIGN(CriticalClosure);
 };
@@ -57,14 +55,13 @@
 // background running time, |MakeCriticalClosure| should be applied on them
 // before posting.
 #if defined(OS_IOS)
-inline OnceClosure MakeCriticalClosure(OnceClosure closure) {
+inline Closure MakeCriticalClosure(const Closure& closure) {
   DCHECK(internal::IsMultiTaskingSupported());
-  return base::BindOnce(
-      &internal::CriticalClosure::Run,
-      Owned(new internal::CriticalClosure(std::move(closure))));
+  return base::Bind(&internal::CriticalClosure::Run,
+                    Owned(new internal::CriticalClosure(closure)));
 }
 #else  // defined(OS_IOS)
-inline OnceClosure MakeCriticalClosure(OnceClosure closure) {
+inline Closure MakeCriticalClosure(const Closure& closure) {
   // No-op for platforms where the application does not need to acquire
   // background time for closures to finish when it goes into the background.
   return closure;
diff --git a/base/debug/activity_tracker.cc b/base/debug/activity_tracker.cc
index 5081c1c..40e9b95 100644
--- a/base/debug/activity_tracker.cc
+++ b/base/debug/activity_tracker.cc
@@ -23,7 +23,6 @@
 #include "base/process/process_handle.h"
 #include "base/stl_util.h"
 #include "base/strings/string_util.h"
-#include "base/strings/utf_string_conversions.h"
 #include "base/threading/platform_thread.h"
 
 namespace base {
@@ -31,13 +30,18 @@
 
 namespace {
 
+// A number that identifies the memory as having been initialized. It's
+// arbitrary but happens to be the first 4 bytes of SHA1(ThreadActivityTracker).
+// A version number is added on so that major structure changes won't try to
+// read an older version (since the cookie won't match).
+const uint32_t kHeaderCookie = 0xC0029B24UL + 2;  // v2
+
 // The minimum depth a stack should support.
 const int kMinStackDepth = 2;
 
 // The amount of memory set aside for holding arbitrary user data (key/value
 // pairs) globally or associated with ActivityData entries.
 const size_t kUserDataSize = 1 << 10;     // 1 KiB
-const size_t kProcessDataSize = 4 << 10;  // 4 KiB
 const size_t kGlobalDataSize = 16 << 10;  // 16 KiB
 const size_t kMaxUserDataNameLength =
     static_cast<size_t>(std::numeric_limits<uint8_t>::max());
@@ -45,13 +49,6 @@
 // A constant used to indicate that module information is changing.
 const uint32_t kModuleInformationChanging = 0x80000000;
 
-// The key used to record process information.
-const char kProcessPhaseDataKey[] = "process-phase";
-
-// An atomically incrementing number, used to check for recreations of objects
-// in the same memory space.
-StaticAtomicSequenceNumber g_next_id;
-
 union ThreadRef {
   int64_t as_id;
 #if defined(OS_WIN)
@@ -67,43 +64,6 @@
 #endif
 };
 
-// Gets the next non-zero identifier. It is only unique within a process.
-uint32_t GetNextDataId() {
-  uint32_t id;
-  while ((id = g_next_id.GetNext()) == 0)
-    ;
-  return id;
-}
-
-// Gets the current process-id, either from the GlobalActivityTracker if it
-// exists (where the PID can be defined for testing) or from the system if
-// there isn't such.
-int64_t GetProcessId() {
-  GlobalActivityTracker* global = GlobalActivityTracker::Get();
-  if (global)
-    return global->process_id();
-  return GetCurrentProcId();
-}
-
-// Finds and reuses a specific allocation or creates a new one.
-PersistentMemoryAllocator::Reference AllocateFrom(
-    PersistentMemoryAllocator* allocator,
-    uint32_t from_type,
-    size_t size,
-    uint32_t to_type) {
-  PersistentMemoryAllocator::Iterator iter(allocator);
-  PersistentMemoryAllocator::Reference ref;
-  while ((ref = iter.GetNextOfType(from_type)) != 0) {
-    DCHECK_LE(size, allocator->GetAllocSize(ref));
-    // This can fail if a another thread has just taken it. It is assumed that
-    // the memory is cleared during the "free" operation.
-    if (allocator->ChangeType(ref, to_type, from_type, /*clear=*/false))
-      return ref;
-  }
-
-  return allocator->Allocate(size, to_type);
-}
-
 // Determines the previous aligned index.
 size_t RoundDownToAlignment(size_t index, size_t alignment) {
   return index & (0 - alignment);
@@ -114,43 +74,8 @@
   return (index + (alignment - 1)) & (0 - alignment);
 }
 
-// Converts "tick" timing into wall time.
-Time WallTimeFromTickTime(int64_t ticks_start, int64_t ticks, Time time_start) {
-  return time_start + TimeDelta::FromInternalValue(ticks - ticks_start);
-}
-
 }  // namespace
 
-OwningProcess::OwningProcess() {}
-OwningProcess::~OwningProcess() {}
-
-void OwningProcess::Release_Initialize(int64_t pid) {
-  uint32_t old_id = data_id.load(std::memory_order_acquire);
-  DCHECK_EQ(0U, old_id);
-  process_id = pid != 0 ? pid : GetProcessId();
-  create_stamp = Time::Now().ToInternalValue();
-  data_id.store(GetNextDataId(), std::memory_order_release);
-}
-
-void OwningProcess::SetOwningProcessIdForTesting(int64_t pid, int64_t stamp) {
-  DCHECK_NE(0U, data_id);
-  process_id = pid;
-  create_stamp = stamp;
-}
-
-// static
-bool OwningProcess::GetOwningProcessId(const void* memory,
-                                       int64_t* out_id,
-                                       int64_t* out_stamp) {
-  const OwningProcess* info = reinterpret_cast<const OwningProcess*>(memory);
-  uint32_t id = info->data_id.load(std::memory_order_acquire);
-  if (id == 0)
-    return false;
-
-  *out_id = info->process_id;
-  *out_stamp = info->create_stamp;
-  return id == info->data_id.load(std::memory_order_seq_cst);
-}
 
 // It doesn't matter what is contained in this (though it will be all zeros)
 // as only the address of it is important.
@@ -321,42 +246,32 @@
   return ref_value_;
 }
 
-// These are required because std::atomic is (currently) not a POD type and
-// thus clang requires explicit out-of-line constructors and destructors even
-// when they do nothing.
 ActivityUserData::ValueInfo::ValueInfo() {}
 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default;
 ActivityUserData::ValueInfo::~ValueInfo() {}
-ActivityUserData::MemoryHeader::MemoryHeader() {}
-ActivityUserData::MemoryHeader::~MemoryHeader() {}
-ActivityUserData::FieldHeader::FieldHeader() {}
-ActivityUserData::FieldHeader::~FieldHeader() {}
 
-ActivityUserData::ActivityUserData() : ActivityUserData(nullptr, 0, -1) {}
+StaticAtomicSequenceNumber ActivityUserData::next_id_;
 
-ActivityUserData::ActivityUserData(void* memory, size_t size, int64_t pid)
+ActivityUserData::ActivityUserData(void* memory, size_t size)
     : memory_(reinterpret_cast<char*>(memory)),
       available_(RoundDownToAlignment(size, kMemoryAlignment)),
-      header_(reinterpret_cast<MemoryHeader*>(memory)),
-      orig_data_id(0),
-      orig_process_id(0),
-      orig_create_stamp(0) {
+      id_(reinterpret_cast<std::atomic<uint32_t>*>(memory)) {
   // It's possible that no user data is being stored.
   if (!memory_)
     return;
 
-  static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header");
-  DCHECK_LT(sizeof(MemoryHeader), available_);
-  if (header_->owner.data_id.load(std::memory_order_acquire) == 0)
-    header_->owner.Release_Initialize(pid);
-  memory_ += sizeof(MemoryHeader);
-  available_ -= sizeof(MemoryHeader);
-
-  // Make a copy of identifying information for later comparison.
-  *const_cast<uint32_t*>(&orig_data_id) =
-      header_->owner.data_id.load(std::memory_order_acquire);
-  *const_cast<int64_t*>(&orig_process_id) = header_->owner.process_id;
-  *const_cast<int64_t*>(&orig_create_stamp) = header_->owner.create_stamp;
+  DCHECK_LT(kMemoryAlignment, available_);
+  if (id_->load(std::memory_order_relaxed) == 0) {
+    // Generate a new ID and store it in the first 32-bit word of memory_.
+    // |id_| must be non-zero for non-sink instances.
+    uint32_t id;
+    while ((id = next_id_.GetNext()) == 0)
+      ;
+    id_->store(id, std::memory_order_relaxed);
+    DCHECK_NE(0U, id_->load(std::memory_order_relaxed));
+  }
+  memory_ += kMemoryAlignment;
+  available_ -= kMemoryAlignment;
 
   // If there is already data present, load that. This allows the same class
   // to be used for analysis through snapshots.
@@ -365,85 +280,6 @@
 
 ActivityUserData::~ActivityUserData() {}
 
-bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
-  DCHECK(output_snapshot);
-  DCHECK(output_snapshot->empty());
-
-  // Find any new data that may have been added by an active instance of this
-  // class that is adding records.
-  ImportExistingData();
-
-  // Add all the values to the snapshot.
-  for (const auto& entry : values_) {
-    TypedValue value;
-    const size_t size = entry.second.size_ptr->load(std::memory_order_acquire);
-    value.type_ = entry.second.type;
-    DCHECK_GE(entry.second.extent, size);
-
-    switch (entry.second.type) {
-      case RAW_VALUE:
-      case STRING_VALUE:
-        value.long_value_ =
-            std::string(reinterpret_cast<char*>(entry.second.memory), size);
-        break;
-      case RAW_VALUE_REFERENCE:
-      case STRING_VALUE_REFERENCE: {
-        ReferenceRecord* ref =
-            reinterpret_cast<ReferenceRecord*>(entry.second.memory);
-        value.ref_value_ = StringPiece(
-            reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
-            static_cast<size_t>(ref->size));
-      } break;
-      case BOOL_VALUE:
-      case CHAR_VALUE:
-        value.short_value_ = *reinterpret_cast<char*>(entry.second.memory);
-        break;
-      case SIGNED_VALUE:
-      case UNSIGNED_VALUE:
-        value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory);
-        break;
-      case END_OF_VALUES:  // Included for completeness purposes.
-        NOTREACHED();
-    }
-    auto inserted = output_snapshot->insert(
-        std::make_pair(entry.second.name.as_string(), std::move(value)));
-    DCHECK(inserted.second);  // True if inserted, false if existed.
-  }
-
-  // Another import attempt will validate that the underlying memory has not
-  // been reused for another purpose. Entries added since the first import
-  // will be ignored here but will be returned if another snapshot is created.
-  ImportExistingData();
-  if (!memory_) {
-    output_snapshot->clear();
-    return false;
-  }
-
-  // Successful snapshot.
-  return true;
-}
-
-const void* ActivityUserData::GetBaseAddress() const {
-  // The |memory_| pointer advances as elements are written but the |header_|
-  // value is always at the start of the block so just return that.
-  return header_;
-}
-
-void ActivityUserData::SetOwningProcessIdForTesting(int64_t pid,
-                                                    int64_t stamp) {
-  if (!header_)
-    return;
-  header_->owner.SetOwningProcessIdForTesting(pid, stamp);
-}
-
-// static
-bool ActivityUserData::GetOwningProcessId(const void* memory,
-                                          int64_t* out_id,
-                                          int64_t* out_stamp) {
-  const MemoryHeader* header = reinterpret_cast<const MemoryHeader*>(memory);
-  return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp);
-}
-
 void ActivityUserData::Set(StringPiece name,
                            ValueType type,
                            const void* memory,
@@ -472,13 +308,13 @@
     // following field will be aligned properly.
     size_t name_size = name.length();
     size_t name_extent =
-        RoundUpToAlignment(sizeof(FieldHeader) + name_size, kMemoryAlignment) -
-        sizeof(FieldHeader);
+        RoundUpToAlignment(sizeof(Header) + name_size, kMemoryAlignment) -
+        sizeof(Header);
     size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment);
 
     // The "base size" is the size of the header and (padded) string key. Stop
     // now if there's not room enough for even this.
-    size_t base_size = sizeof(FieldHeader) + name_extent;
+    size_t base_size = sizeof(Header) + name_extent;
     if (base_size > available_)
       return;
 
@@ -502,7 +338,7 @@
     }
 
     // Allocate a chunk of memory.
-    FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
+    Header* header = reinterpret_cast<Header*>(memory_);
     memory_ += full_size;
     available_ -= full_size;
 
@@ -512,9 +348,9 @@
     DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed));
     header->name_size = static_cast<uint8_t>(name_size);
     header->record_size = full_size;
-    char* name_memory = reinterpret_cast<char*>(header) + sizeof(FieldHeader);
+    char* name_memory = reinterpret_cast<char*>(header) + sizeof(Header);
     void* value_memory =
-        reinterpret_cast<char*>(header) + sizeof(FieldHeader) + name_extent;
+        reinterpret_cast<char*>(header) + sizeof(Header) + name_extent;
     memcpy(name_memory, name.data(), name_size);
     header->type.store(type, std::memory_order_release);
 
@@ -528,7 +364,7 @@
     info->name = persistent_name;
     info->memory = value_memory;
     info->size_ptr = &header->value_size;
-    info->extent = full_size - sizeof(FieldHeader) - name_extent;
+    info->extent = full_size - sizeof(Header) - name_extent;
     info->type = type;
   }
 
@@ -553,12 +389,8 @@
 }
 
 void ActivityUserData::ImportExistingData() const {
-  // It's possible that no user data is being stored.
-  if (!memory_)
-    return;
-
-  while (available_ > sizeof(FieldHeader)) {
-    FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
+  while (available_ > sizeof(Header)) {
+    Header* header = reinterpret_cast<Header*>(memory_);
     ValueType type =
         static_cast<ValueType>(header->type.load(std::memory_order_acquire));
     if (type == END_OF_VALUES)
@@ -566,8 +398,8 @@
     if (header->record_size > available_)
       return;
 
-    size_t value_offset = RoundUpToAlignment(
-        sizeof(FieldHeader) + header->name_size, kMemoryAlignment);
+    size_t value_offset = RoundUpToAlignment(sizeof(Header) + header->name_size,
+                                             kMemoryAlignment);
     if (header->record_size == value_offset &&
         header->value_size.load(std::memory_order_relaxed) == 1) {
       value_offset -= 1;
@@ -576,7 +408,7 @@
       return;
 
     ValueInfo info;
-    info.name = StringPiece(memory_ + sizeof(FieldHeader), header->name_size);
+    info.name = StringPiece(memory_ + sizeof(Header), header->name_size);
     info.type = type;
     info.memory = memory_ + value_offset;
     info.size_ptr = &header->value_size;
@@ -588,14 +420,60 @@
     memory_ += header->record_size;
     available_ -= header->record_size;
   }
+}
 
-  // Check if memory has been completely reused.
-  if (header_->owner.data_id.load(std::memory_order_acquire) != orig_data_id ||
-      header_->owner.process_id != orig_process_id ||
-      header_->owner.create_stamp != orig_create_stamp) {
-    memory_ = nullptr;
-    values_.clear();
+bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
+  DCHECK(output_snapshot);
+  DCHECK(output_snapshot->empty());
+
+  // Find any new data that may have been added by an active instance of this
+  // class that is adding records.
+  ImportExistingData();
+
+  for (const auto& entry : values_) {
+    TypedValue value;
+    value.type_ = entry.second.type;
+    DCHECK_GE(entry.second.extent,
+              entry.second.size_ptr->load(std::memory_order_relaxed));
+
+    switch (entry.second.type) {
+      case RAW_VALUE:
+      case STRING_VALUE:
+        value.long_value_ =
+            std::string(reinterpret_cast<char*>(entry.second.memory),
+                        entry.second.size_ptr->load(std::memory_order_relaxed));
+        break;
+      case RAW_VALUE_REFERENCE:
+      case STRING_VALUE_REFERENCE: {
+        ReferenceRecord* ref =
+            reinterpret_cast<ReferenceRecord*>(entry.second.memory);
+        value.ref_value_ = StringPiece(
+            reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
+            static_cast<size_t>(ref->size));
+      } break;
+      case BOOL_VALUE:
+      case CHAR_VALUE:
+        value.short_value_ = *reinterpret_cast<char*>(entry.second.memory);
+        break;
+      case SIGNED_VALUE:
+      case UNSIGNED_VALUE:
+        value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory);
+        break;
+      case END_OF_VALUES:  // Included for completeness purposes.
+        NOTREACHED();
+    }
+    auto inserted = output_snapshot->insert(
+        std::make_pair(entry.second.name.as_string(), std::move(value)));
+    DCHECK(inserted.second);  // True if inserted, false if existed.
   }
+
+  return true;
+}
+
+const void* ActivityUserData::GetBaseAddress() {
+  // The |memory_| pointer advances as elements are written but the |id_|
+  // value is always at the start of the block so just return that.
+  return id_;
 }
 
 // This information is kept for every thread that is tracked. It is filled
@@ -607,16 +485,27 @@
       GlobalActivityTracker::kTypeIdActivityTracker;
 
   // Expected size for 32/64-bit check.
-  static constexpr size_t kExpectedInstanceSize =
-      OwningProcess::kExpectedInstanceSize + Activity::kExpectedInstanceSize +
-      72;
+  static constexpr size_t kExpectedInstanceSize = 80;
 
-  // This information uniquely identifies a process.
-  OwningProcess owner;
+  // This unique number indicates a valid initialization of the memory.
+  std::atomic<uint32_t> cookie;
 
-  // The thread-id (thread_ref.as_id) to which this data belongs. This number
-  // is not guaranteed to mean anything but combined with the process-id from
-  // OwningProcess is unique among all active trackers.
+  // The number of Activity slots (spaces that can hold an Activity) that
+  // immediately follow this structure in memory.
+  uint32_t stack_slots;
+
+  // The process-id and thread-id (thread_ref.as_id) to which this data belongs.
+  // These identifiers are not guaranteed to mean anything but are unique, in
+  // combination, among all active trackers. It would be nice to always have
+  // the process_id be a 64-bit value but the necessity of having it atomic
+  // (for the memory barriers it provides) limits it to the natural word size
+  // of the machine.
+#ifdef ARCH_CPU_64_BITS
+  std::atomic<int64_t> process_id;
+#else
+  std::atomic<int32_t> process_id;
+  int32_t process_id_padding;
+#endif
   ThreadRef thread_ref;
 
   // The start-time and start-ticks when the data was created. Each activity
@@ -625,19 +514,12 @@
   int64_t start_time;
   int64_t start_ticks;
 
-  // The number of Activity slots (spaces that can hold an Activity) that
-  // immediately follow this structure in memory.
-  uint32_t stack_slots;
-
-  // Some padding to keep everything 64-bit aligned.
-  uint32_t padding;
-
   // The current depth of the stack. This may be greater than the number of
   // slots. If the depth exceeds the number of slots, the newest entries
   // won't be recorded.
   std::atomic<uint32_t> current_depth;
 
-  // A memory location used to indicate if changes have been made to the data
+  // A memory location used to indicate if changes have been made to the stack
   // that would invalidate an in-progress read of its contents. The active
   // tracker will zero the value whenever something gets popped from the
   // stack. A monitoring tracker can write a non-zero value here, copy the
@@ -645,11 +527,7 @@
   // the contents didn't change while being copied. This can handle concurrent
   // snapshot operations only if each snapshot writes a different bit (which
   // is not the current implementation so no parallel snapshots allowed).
-  std::atomic<uint32_t> data_unchanged;
-
-  // The last "exception" activity. This can't be stored on the stack because
-  // that could get popped as things unwind.
-  Activity last_exception;
+  std::atomic<uint32_t> stack_unchanged;
 
   // The name of the thread (up to a maximum length). Dynamic-length names
   // are not practical since the memory has to come from the same persistent
@@ -718,16 +596,15 @@
                 "ActivityData.data is not 64-bit aligned");
 
   // Provided memory should either be completely initialized or all zeros.
-  if (header_->owner.data_id.load(std::memory_order_relaxed) == 0) {
+  if (header_->cookie.load(std::memory_order_relaxed) == 0) {
     // This is a new file. Double-check other fields and then initialize.
-    DCHECK_EQ(0, header_->owner.process_id);
-    DCHECK_EQ(0, header_->owner.create_stamp);
+    DCHECK_EQ(0, header_->process_id.load(std::memory_order_relaxed));
     DCHECK_EQ(0, header_->thread_ref.as_id);
     DCHECK_EQ(0, header_->start_time);
     DCHECK_EQ(0, header_->start_ticks);
     DCHECK_EQ(0U, header_->stack_slots);
     DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed));
-    DCHECK_EQ(0U, header_->data_unchanged.load(std::memory_order_relaxed));
+    DCHECK_EQ(0U, header_->stack_unchanged.load(std::memory_order_relaxed));
     DCHECK_EQ(0, stack_[0].time_internal);
     DCHECK_EQ(0U, stack_[0].origin_address);
     DCHECK_EQ(0U, stack_[0].call_stack[0]);
@@ -739,6 +616,7 @@
     header_->thread_ref.as_handle =
         PlatformThread::CurrentHandle().platform_handle();
 #endif
+    header_->process_id.store(GetCurrentProcId(), std::memory_order_relaxed);
 
     header_->start_time = base::Time::Now().ToInternalValue();
     header_->start_ticks = base::TimeTicks::Now().ToInternalValue();
@@ -748,7 +626,7 @@
 
     // This is done last so as to guarantee that everything above is "released"
     // by the time this value gets written.
-    header_->owner.Release_Initialize();
+    header_->cookie.store(kHeaderCookie, std::memory_order_release);
 
     valid_ = true;
     DCHECK(IsValid());
@@ -841,28 +719,40 @@
 
   // The stack has shrunk meaning that some other thread trying to copy the
   // contents for reporting purposes could get bad data. That thread would
-  // have written a non-zero value into |data_unchanged|; clearing it here
+  // have written a non-zero value into |stack_unchanged|; clearing it here
   // will let that thread detect that something did change. This needs to
   // happen after the atomic |depth| operation above so a "release" store
   // is required.
-  header_->data_unchanged.store(0, std::memory_order_release);
+  header_->stack_unchanged.store(0, std::memory_order_release);
 }
 
 std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData(
     ActivityId id,
     ActivityTrackerMemoryAllocator* allocator) {
-  // Don't allow user data for lock acquisition as recursion may occur.
-  if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) {
-    NOTREACHED();
-    return MakeUnique<ActivityUserData>();
+  // User-data is only stored for activities actually held in the stack.
+  if (id < stack_slots_) {
+    // Don't allow user data for lock acquisition as recursion may occur.
+    if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) {
+      NOTREACHED();
+      return MakeUnique<ActivityUserData>(nullptr, 0);
+    }
+
+    // Get (or reuse) a block of memory and create a real UserData object
+    // on it.
+    PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference();
+    void* memory =
+        allocator->GetAsArray<char>(ref, PersistentMemoryAllocator::kSizeAny);
+    if (memory) {
+      std::unique_ptr<ActivityUserData> user_data =
+          MakeUnique<ActivityUserData>(memory, kUserDataSize);
+      stack_[id].user_data_ref = ref;
+      stack_[id].user_data_id = user_data->id();
+      return user_data;
+    }
   }
 
-  // User-data is only stored for activities actually held in the stack.
-  if (id >= stack_slots_)
-    return MakeUnique<ActivityUserData>();
-
-  // Create and return a real UserData object.
-  return CreateUserDataForActivity(&stack_[id], allocator);
+  // Return a dummy object that will still accept (but ignore) Set() calls.
+  return MakeUnique<ActivityUserData>(nullptr, 0);
 }
 
 bool ThreadActivityTracker::HasUserData(ActivityId id) {
@@ -880,27 +770,12 @@
   }
 }
 
-void ThreadActivityTracker::RecordExceptionActivity(const void* program_counter,
-                                                    const void* origin,
-                                                    Activity::Type type,
-                                                    const ActivityData& data) {
-  // A thread-checker creates a lock to check the thread-id which means
-  // re-entry into this code if lock acquisitions are being tracked.
-  DCHECK(thread_checker_.CalledOnValidThread());
-
-  // Fill the reusable exception activity.
-  Activity::FillFrom(&header_->last_exception, program_counter, origin, type,
-                     data);
-
-  // The data has changed meaning that some other thread trying to copy the
-  // contents for reporting purposes could get bad data.
-  header_->data_unchanged.store(0, std::memory_order_relaxed);
-}
-
 bool ThreadActivityTracker::IsValid() const {
-  if (header_->owner.data_id.load(std::memory_order_acquire) == 0 ||
-      header_->owner.process_id == 0 || header_->thread_ref.as_id == 0 ||
-      header_->start_time == 0 || header_->start_ticks == 0 ||
+  if (header_->cookie.load(std::memory_order_acquire) != kHeaderCookie ||
+      header_->process_id.load(std::memory_order_relaxed) == 0 ||
+      header_->thread_ref.as_id == 0 ||
+      header_->start_time == 0 ||
+      header_->start_ticks == 0 ||
       header_->stack_slots != stack_slots_ ||
       header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') {
     return false;
@@ -931,21 +806,20 @@
   output_snapshot->activity_stack.reserve(stack_slots_);
 
   for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
-    // Remember the data IDs to ensure nothing is replaced during the snapshot
-    // operation. Use "acquire" so that all the non-atomic fields of the
-    // structure are valid (at least at the current moment in time).
-    const uint32_t starting_id =
-        header_->owner.data_id.load(std::memory_order_acquire);
-    const int64_t starting_create_stamp = header_->owner.create_stamp;
-    const int64_t starting_process_id = header_->owner.process_id;
+    // Remember the process and thread IDs to ensure they aren't replaced
+    // during the snapshot operation. Use "acquire" to ensure that all the
+    // non-atomic fields of the structure are valid (at least at the current
+    // moment in time).
+    const int64_t starting_process_id =
+        header_->process_id.load(std::memory_order_acquire);
     const int64_t starting_thread_id = header_->thread_ref.as_id;
 
-    // Write a non-zero value to |data_unchanged| so it's possible to detect
+    // Write a non-zero value to |stack_unchanged| so it's possible to detect
     // at the end that nothing has changed since copying the data began. A
     // "cst" operation is required to ensure it occurs before everything else.
     // Using "cst" memory ordering is relatively expensive but this is only
     // done during analysis so doesn't directly affect the worker threads.
-    header_->data_unchanged.store(1, std::memory_order_seq_cst);
+    header_->stack_unchanged.store(1, std::memory_order_seq_cst);
 
     // Fetching the current depth also "acquires" the contents of the stack.
     depth = header_->current_depth.load(std::memory_order_acquire);
@@ -957,26 +831,29 @@
              count * sizeof(Activity));
     }
 
-    // Capture the last exception.
-    memcpy(&output_snapshot->last_exception, &header_->last_exception,
-           sizeof(Activity));
-
-    // TODO(bcwhite): Snapshot other things here.
-
     // Retry if something changed during the copy. A "cst" operation ensures
     // it must happen after all the above operations.
-    if (!header_->data_unchanged.load(std::memory_order_seq_cst))
+    if (!header_->stack_unchanged.load(std::memory_order_seq_cst))
       continue;
 
     // Stack copied. Record it's full depth.
     output_snapshot->activity_stack_depth = depth;
 
-    // Get the general thread information.
+    // TODO(bcwhite): Snapshot other things here.
+
+    // Get the general thread information. Loading of "process_id" is guaranteed
+    // to be last so that it's possible to detect below if any content has
+    // changed while reading it. It's technically possible for a thread to end,
+    // have its data cleared, a new thread get created with the same IDs, and
+    // it perform an action which starts tracking all in the time since the
+    // ID reads above but the chance is so unlikely that it's not worth the
+    // effort and complexity of protecting against it (perhaps with an
+    // "unchanged" field like is done for the stack).
     output_snapshot->thread_name =
         std::string(header_->thread_name, sizeof(header_->thread_name) - 1);
-    output_snapshot->create_stamp = header_->owner.create_stamp;
     output_snapshot->thread_id = header_->thread_ref.as_id;
-    output_snapshot->process_id = header_->owner.process_id;
+    output_snapshot->process_id =
+        header_->process_id.load(std::memory_order_seq_cst);
 
     // All characters of the thread-name buffer were copied so as to not break
     // if the trailing NUL were missing. Now limit the length if the actual
@@ -984,11 +861,9 @@
     output_snapshot->thread_name.resize(
         strlen(output_snapshot->thread_name.c_str()));
 
-    // If the data ID has changed then the tracker has exited and the memory
-    // reused by a new one. Try again.
-    if (header_->owner.data_id.load(std::memory_order_seq_cst) != starting_id ||
-        output_snapshot->create_stamp != starting_create_stamp ||
-        output_snapshot->process_id != starting_process_id ||
+    // If the process or thread ID has changed then the tracker has exited and
+    // the memory reused by a new one. Try again.
+    if (output_snapshot->process_id != starting_process_id ||
         output_snapshot->thread_id != starting_thread_id) {
       continue;
     }
@@ -1004,14 +879,10 @@
     const int64_t start_ticks = header_->start_ticks;
     for (Activity& activity : output_snapshot->activity_stack) {
       activity.time_internal =
-          WallTimeFromTickTime(start_ticks, activity.time_internal, start_time)
+          (start_time +
+           TimeDelta::FromInternalValue(activity.time_internal - start_ticks))
               .ToInternalValue();
     }
-    output_snapshot->last_exception.time_internal =
-        WallTimeFromTickTime(start_ticks,
-                             output_snapshot->last_exception.time_internal,
-                             start_time)
-            .ToInternalValue();
 
     // Success!
     return true;
@@ -1021,48 +892,11 @@
   return false;
 }
 
-const void* ThreadActivityTracker::GetBaseAddress() {
-  return header_;
-}
-
-void ThreadActivityTracker::SetOwningProcessIdForTesting(int64_t pid,
-                                                         int64_t stamp) {
-  header_->owner.SetOwningProcessIdForTesting(pid, stamp);
-}
-
-// static
-bool ThreadActivityTracker::GetOwningProcessId(const void* memory,
-                                               int64_t* out_id,
-                                               int64_t* out_stamp) {
-  const Header* header = reinterpret_cast<const Header*>(memory);
-  return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp);
-}
-
 // static
 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) {
   return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header);
 }
 
-std::unique_ptr<ActivityUserData>
-ThreadActivityTracker::CreateUserDataForActivity(
-    Activity* activity,
-    ActivityTrackerMemoryAllocator* allocator) {
-  DCHECK_EQ(0U, activity->user_data_ref);
-
-  PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference();
-  void* memory = allocator->GetAsArray<char>(ref, kUserDataSize);
-  if (memory) {
-    std::unique_ptr<ActivityUserData> user_data =
-        MakeUnique<ActivityUserData>(memory, kUserDataSize);
-    activity->user_data_ref = ref;
-    activity->user_data_id = user_data->id();
-    return user_data;
-  }
-
-  // Return a dummy object that will still accept (but ignore) Set() calls.
-  return MakeUnique<ActivityUserData>();
-}
-
 // The instantiation of the GlobalActivityTracker object.
 // The object held here will obviously not be destructed at process exit
 // but that's best since PersistentMemoryAllocator objects (that underlie
@@ -1145,9 +979,6 @@
   pickle_size = pickler.size();
   changes.store(0, std::memory_order_relaxed);
 
-  // Initialize the owner info.
-  owner.Release_Initialize();
-
   // Now set those fields that can change.
   return UpdateFrom(info);
 }
@@ -1216,23 +1047,21 @@
       user_data_ =
           tracker_->GetUserData(activity_id_, &global->user_data_allocator_);
     } else {
-      user_data_ = MakeUnique<ActivityUserData>();
+      user_data_ = MakeUnique<ActivityUserData>(nullptr, 0);
     }
   }
   return *user_data_;
 }
 
-GlobalActivityTracker::ThreadSafeUserData::ThreadSafeUserData(void* memory,
-                                                              size_t size,
-                                                              int64_t pid)
-    : ActivityUserData(memory, size, pid) {}
+GlobalActivityTracker::GlobalUserData::GlobalUserData(void* memory, size_t size)
+    : ActivityUserData(memory, size) {}
 
-GlobalActivityTracker::ThreadSafeUserData::~ThreadSafeUserData() {}
+GlobalActivityTracker::GlobalUserData::~GlobalUserData() {}
 
-void GlobalActivityTracker::ThreadSafeUserData::Set(StringPiece name,
-                                                    ValueType type,
-                                                    const void* memory,
-                                                    size_t size) {
+void GlobalActivityTracker::GlobalUserData::Set(StringPiece name,
+                                                ValueType type,
+                                                const void* memory,
+                                                size_t size) {
   AutoLock lock(data_lock_);
   ActivityUserData::Set(name, type, memory, size);
 }
@@ -1255,11 +1084,10 @@
 
 void GlobalActivityTracker::CreateWithAllocator(
     std::unique_ptr<PersistentMemoryAllocator> allocator,
-    int stack_depth,
-    int64_t process_id) {
+    int stack_depth) {
   // There's no need to do anything with the result. It is self-managing.
   GlobalActivityTracker* global_tracker =
-      new GlobalActivityTracker(std::move(allocator), stack_depth, process_id);
+      new GlobalActivityTracker(std::move(allocator), stack_depth);
   // Create a tracker for this thread since it is known.
   global_tracker->CreateTrackerForCurrentThread();
 }
@@ -1285,7 +1113,7 @@
   DCHECK(success);
   CreateWithAllocator(MakeUnique<FilePersistentMemoryAllocator>(
                           std::move(mapped_file), size, id, name, false),
-                      stack_depth, 0);
+                      stack_depth);
 }
 #endif  // !defined(OS_NACL)
 
@@ -1293,37 +1121,11 @@
 void GlobalActivityTracker::CreateWithLocalMemory(size_t size,
                                                   uint64_t id,
                                                   StringPiece name,
-                                                  int stack_depth,
-                                                  int64_t process_id) {
+                                                  int stack_depth) {
   CreateWithAllocator(
-      MakeUnique<LocalPersistentMemoryAllocator>(size, id, name), stack_depth,
-      process_id);
+      MakeUnique<LocalPersistentMemoryAllocator>(size, id, name), stack_depth);
 }
 
-// static
-void GlobalActivityTracker::SetForTesting(
-    std::unique_ptr<GlobalActivityTracker> tracker) {
-  CHECK(!subtle::NoBarrier_Load(&g_tracker_));
-  subtle::Release_Store(&g_tracker_,
-                        reinterpret_cast<uintptr_t>(tracker.release()));
-}
-
-// static
-std::unique_ptr<GlobalActivityTracker>
-GlobalActivityTracker::ReleaseForTesting() {
-  GlobalActivityTracker* tracker = Get();
-  if (!tracker)
-    return nullptr;
-
-  // Thread trackers assume that the global tracker is present for some
-  // operations so ensure that there aren't any.
-  tracker->ReleaseTrackerForCurrentThreadForTesting();
-  DCHECK_EQ(0, tracker->thread_tracker_count_.load(std::memory_order_relaxed));
-
-  subtle::Release_Store(&g_tracker_, 0);
-  return WrapUnique(tracker);
-};
-
 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
   DCHECK(!this_thread_tracker_.Get());
 
@@ -1380,181 +1182,8 @@
 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
   ThreadActivityTracker* tracker =
       reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
-  if (tracker) {
-    this_thread_tracker_.Set(nullptr);
+  if (tracker)
     delete tracker;
-  }
-}
-
-void GlobalActivityTracker::SetBackgroundTaskRunner(
-    const scoped_refptr<TaskRunner>& runner) {
-  AutoLock lock(global_tracker_lock_);
-  background_task_runner_ = runner;
-}
-
-void GlobalActivityTracker::SetProcessExitCallback(
-    ProcessExitCallback callback) {
-  AutoLock lock(global_tracker_lock_);
-  process_exit_callback_ = callback;
-}
-
-void GlobalActivityTracker::RecordProcessLaunch(
-    ProcessId process_id,
-    const FilePath::StringType& cmd) {
-  const int64_t pid = process_id;
-  DCHECK_NE(GetProcessId(), pid);
-  DCHECK_NE(0, pid);
-
-  base::AutoLock lock(global_tracker_lock_);
-  if (base::ContainsKey(known_processes_, pid)) {
-    // TODO(bcwhite): Measure this in UMA.
-    NOTREACHED() << "Process #" << process_id
-                 << " was previously recorded as \"launched\""
-                 << " with no corresponding exit.";
-    known_processes_.erase(pid);
-  }
-
-#if defined(OS_WIN)
-  known_processes_.insert(std::make_pair(pid, UTF16ToUTF8(cmd)));
-#else
-  known_processes_.insert(std::make_pair(pid, cmd));
-#endif
-}
-
-void GlobalActivityTracker::RecordProcessLaunch(
-    ProcessId process_id,
-    const FilePath::StringType& exe,
-    const FilePath::StringType& args) {
-  const int64_t pid = process_id;
-  if (exe.find(FILE_PATH_LITERAL(" "))) {
-    RecordProcessLaunch(pid, FilePath::StringType(FILE_PATH_LITERAL("\"")) +
-                                 exe + FILE_PATH_LITERAL("\" ") + args);
-  } else {
-    RecordProcessLaunch(pid, exe + FILE_PATH_LITERAL(' ') + args);
-  }
-}
-
-void GlobalActivityTracker::RecordProcessExit(ProcessId process_id,
-                                              int exit_code) {
-  const int64_t pid = process_id;
-  DCHECK_NE(GetProcessId(), pid);
-  DCHECK_NE(0, pid);
-
-  scoped_refptr<TaskRunner> task_runner;
-  std::string command_line;
-  {
-    base::AutoLock lock(global_tracker_lock_);
-    task_runner = background_task_runner_;
-    auto found = known_processes_.find(pid);
-    if (found != known_processes_.end()) {
-      command_line = std::move(found->second);
-      known_processes_.erase(found);
-    } else {
-      DLOG(ERROR) << "Recording exit of unknown process #" << process_id;
-    }
-  }
-
-  // Use the current time to differentiate the process that just exited
-  // from any that might be created in the future with the same ID.
-  int64_t now_stamp = Time::Now().ToInternalValue();
-
-  // The persistent allocator is thread-safe so run the iteration and
-  // adjustments on a worker thread if one was provided.
-  if (task_runner && !task_runner->RunsTasksOnCurrentThread()) {
-    task_runner->PostTask(
-        FROM_HERE,
-        Bind(&GlobalActivityTracker::CleanupAfterProcess, Unretained(this), pid,
-             now_stamp, exit_code, Passed(&command_line)));
-    return;
-  }
-
-  CleanupAfterProcess(pid, now_stamp, exit_code, std::move(command_line));
-}
-
-void GlobalActivityTracker::SetProcessPhase(ProcessPhase phase) {
-  process_data().SetInt(kProcessPhaseDataKey, phase);
-}
-
-void GlobalActivityTracker::CleanupAfterProcess(int64_t process_id,
-                                                int64_t exit_stamp,
-                                                int exit_code,
-                                                std::string&& command_line) {
-  // The process may not have exited cleanly so its necessary to go through
-  // all the data structures it may have allocated in the persistent memory
-  // segment and mark them as "released". This will allow them to be reused
-  // later on.
-
-  PersistentMemoryAllocator::Iterator iter(allocator_.get());
-  PersistentMemoryAllocator::Reference ref;
-
-  ProcessExitCallback process_exit_callback;
-  {
-    AutoLock lock(global_tracker_lock_);
-    process_exit_callback = process_exit_callback_;
-  }
-  if (process_exit_callback) {
-    // Find the processes user-data record so the process phase can be passed
-    // to the callback.
-    ActivityUserData::Snapshot process_data_snapshot;
-    while ((ref = iter.GetNextOfType(kTypeIdProcessDataRecord)) != 0) {
-      const void* memory = allocator_->GetAsArray<char>(
-          ref, kTypeIdProcessDataRecord, PersistentMemoryAllocator::kSizeAny);
-      int64_t found_id;
-      int64_t create_stamp;
-      if (ActivityUserData::GetOwningProcessId(memory, &found_id,
-                                               &create_stamp)) {
-        if (found_id == process_id && create_stamp < exit_stamp) {
-          const ActivityUserData process_data(const_cast<void*>(memory),
-                                              allocator_->GetAllocSize(ref));
-          process_data.CreateSnapshot(&process_data_snapshot);
-          break;  // No need to look for any others.
-        }
-      }
-    }
-    iter.Reset();  // So it starts anew when used below.
-
-    // Record the process's phase at exit so callback doesn't need to go
-    // searching based on a private key value.
-    ProcessPhase exit_phase = PROCESS_PHASE_UNKNOWN;
-    auto phase = process_data_snapshot.find(kProcessPhaseDataKey);
-    if (phase != process_data_snapshot.end())
-      exit_phase = static_cast<ProcessPhase>(phase->second.GetInt());
-
-    // Perform the callback.
-    process_exit_callback.Run(process_id, exit_stamp, exit_code, exit_phase,
-                              std::move(command_line),
-                              std::move(process_data_snapshot));
-  }
-
-  // Find all allocations associated with the exited process and free them.
-  uint32_t type;
-  while ((ref = iter.GetNext(&type)) != 0) {
-    switch (type) {
-      case kTypeIdActivityTracker:
-      case kTypeIdUserDataRecord:
-      case kTypeIdProcessDataRecord:
-      case ModuleInfoRecord::kPersistentTypeId: {
-        const void* memory = allocator_->GetAsArray<char>(
-            ref, type, PersistentMemoryAllocator::kSizeAny);
-        int64_t found_id;
-        int64_t create_stamp;
-
-        // By convention, the OwningProcess structure is always the first
-        // field of the structure so there's no need to handle all the
-        // cases separately.
-        if (OwningProcess::GetOwningProcessId(memory, &found_id,
-                                              &create_stamp)) {
-          // Only change the type to be "free" if the process ID matches and
-          // the creation time is before the exit time (so PID re-use doesn't
-          // cause the erasure of something that is in-use). Memory is cleared
-          // here, rather than when it's needed, so as to limit the impact at
-          // that critical time.
-          if (found_id == process_id && create_stamp < exit_stamp)
-            allocator_->ChangeType(ref, ~type, type, /*clear=*/true);
-        }
-      } break;
-    }
-  }
 }
 
 void GlobalActivityTracker::RecordLogMessage(StringPiece message) {
@@ -1604,11 +1233,9 @@
 
 GlobalActivityTracker::GlobalActivityTracker(
     std::unique_ptr<PersistentMemoryAllocator> allocator,
-    int stack_depth,
-    int64_t process_id)
+    int stack_depth)
     : allocator_(std::move(allocator)),
       stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)),
-      process_id_(process_id == 0 ? GetCurrentProcId() : process_id),
       this_thread_tracker_(&OnTLSDestroy),
       thread_tracker_count_(0),
       thread_tracker_allocator_(allocator_.get(),
@@ -1622,38 +1249,25 @@
                            kTypeIdUserDataRecordFree,
                            kUserDataSize,
                            kCachedUserDataMemories,
-                           /*make_iterable=*/true),
-      process_data_(allocator_->GetAsArray<char>(
-                        AllocateFrom(allocator_.get(),
-                                     kTypeIdProcessDataRecordFree,
-                                     kProcessDataSize,
-                                     kTypeIdProcessDataRecord),
-                        kTypeIdProcessDataRecord,
-                        kProcessDataSize),
-                    kProcessDataSize,
-                    process_id_),
+                           /*make_iterable=*/false),
       global_data_(
           allocator_->GetAsArray<char>(
               allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord),
               kTypeIdGlobalDataRecord,
-              kGlobalDataSize),
-          kGlobalDataSize,
-          process_id_) {
-  DCHECK_NE(0, process_id_);
+              PersistentMemoryAllocator::kSizeAny),
+          kGlobalDataSize) {
+  // Ensure the passed memory is valid and empty (iterator finds nothing).
+  uint32_t type;
+  DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type));
 
   // Ensure that there is no other global object and then make this one such.
   DCHECK(!g_tracker_);
   subtle::Release_Store(&g_tracker_, reinterpret_cast<uintptr_t>(this));
 
-  // The data records must be iterable in order to be found by an analyzer.
-  allocator_->MakeIterable(allocator_->GetAsReference(
-      process_data_.GetBaseAddress(), kTypeIdProcessDataRecord));
+  // The global records must be iterable in order to be found by an analyzer.
   allocator_->MakeIterable(allocator_->GetAsReference(
       global_data_.GetBaseAddress(), kTypeIdGlobalDataRecord));
 
-  // Note that this process has launched.
-  SetProcessPhase(PROCESS_LAUNCHED);
-
   // Fetch and record all activated field trials.
   FieldTrial::ActiveGroups active_groups;
   FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
@@ -1662,7 +1276,7 @@
 }
 
 GlobalActivityTracker::~GlobalActivityTracker() {
-  DCHECK(Get() == nullptr || Get() == this);
+  DCHECK_EQ(Get(), this);
   DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed));
   subtle::Release_Store(&g_tracker_, 0);
 }
@@ -1683,23 +1297,6 @@
   thread_tracker_allocator_.ReleaseObjectReference(mem_reference);
 }
 
-void GlobalActivityTracker::RecordExceptionImpl(const void* pc,
-                                                const void* origin,
-                                                uint32_t code) {
-  // Get an existing tracker for this thread. It's not possible to create
-  // one at this point because such would involve memory allocations and
-  // other potentially complex operations that can cause failures if done
-  // within an exception handler. In most cases various operations will
-  // have already created the tracker so this shouldn't generally be a
-  // problem.
-  ThreadActivityTracker* tracker = GetTrackerForCurrentThread();
-  if (!tracker)
-    return;
-
-  tracker->RecordExceptionActivity(pc, origin, Activity::ACT_EXCEPTION,
-                                   ActivityData::ForException(code));
-}
-
 // static
 void GlobalActivityTracker::OnTLSDestroy(void* value) {
   delete reinterpret_cast<ManagedActivityTracker*>(value);
diff --git a/base/debug/activity_tracker.h b/base/debug/activity_tracker.h
index c8cf1e9..719a318 100644
--- a/base/debug/activity_tracker.h
+++ b/base/debug/activity_tracker.h
@@ -23,15 +23,12 @@
 
 #include "base/atomicops.h"
 #include "base/base_export.h"
-#include "base/callback.h"
 #include "base/compiler_specific.h"
 #include "base/gtest_prod_util.h"
 #include "base/location.h"
 #include "base/metrics/persistent_memory_allocator.h"
-#include "base/process/process_handle.h"
 #include "base/strings/string_piece.h"
 #include "base/strings/utf_string_conversions.h"
-#include "base/task_runner.h"
 #include "base/threading/platform_thread.h"
 #include "base/threading/thread_checker.h"
 #include "base/threading/thread_local_storage.h"
@@ -44,6 +41,7 @@
 class Lock;
 class PlatformThreadHandle;
 class Process;
+class StaticAtomicSequenceNumber;
 class WaitableEvent;
 
 namespace debug {
@@ -58,48 +56,11 @@
   kActivityCallStackSize = 10,
 };
 
-// A class for keeping all information needed to verify that a structure is
-// associated with a given process.
-struct OwningProcess {
-  OwningProcess();
-  ~OwningProcess();
-
-  // Initializes structure with the current process id and the current time.
-  // These can uniquely identify a process. A unique non-zero data_id will be
-  // set making it possible to tell using atomic reads if the data has changed.
-  void Release_Initialize(int64_t pid = 0);
-
-  // Explicitly sets the process ID.
-  void SetOwningProcessIdForTesting(int64_t pid, int64_t stamp);
-
-  // Gets the associated process ID, in native form, and the creation timestamp
-  // from memory without loading the entire structure for analysis. This will
-  // return false if no valid process ID is available.
-  static bool GetOwningProcessId(const void* memory,
-                                 int64_t* out_id,
-                                 int64_t* out_stamp);
-
-  // SHA1(base::debug::OwningProcess): Increment this if structure changes!
-  static constexpr uint32_t kPersistentTypeId = 0xB1179672 + 1;
-
-  // Expected size for 32/64-bit check by PersistentMemoryAllocator.
-  static constexpr size_t kExpectedInstanceSize = 24;
-
-  std::atomic<uint32_t> data_id;
-  uint32_t padding;
-  int64_t process_id;
-  int64_t create_stamp;
-};
-
 // The data associated with an activity is dependent upon the activity type.
 // This union defines all of the various fields. All fields must be explicitly
 // sized types to ensure no interoperability problems between 32-bit and
 // 64-bit systems.
 union ActivityData {
-  // Expected size for 32/64-bit check.
-  // TODO(bcwhite): VC2015 doesn't allow statics in unions. Fix when it does.
-  // static constexpr size_t kExpectedInstanceSize = 8;
-
   // Generic activities don't have any defined structure.
   struct {
     uint32_t id;   // An arbitrary identifier used for association.
@@ -120,9 +81,6 @@
   struct {
     int64_t process_id;  // A unique identifier for a process.
   } process;
-  struct {
-    uint32_t code;  // An "exception code" number.
-  } exception;
 
   // These methods create an ActivityData object from the appropriate
   // parameters. Objects of this type should always be created this way to
@@ -168,12 +126,6 @@
     data.process.process_id = id;
     return data;
   }
-
-  static ActivityData ForException(const uint32_t code) {
-    ActivityData data;
-    data.exception.code = code;
-    return data;
-  }
 };
 
 // A "null" activity-data that can be passed to indicate "do not change".
@@ -285,9 +237,6 @@
     ACT_PROCESS_START = ACT_PROCESS,
     ACT_PROCESS_WAIT,
 
-    // Exception activities indicate the occurence of something unexpected.
-    ACT_EXCEPTION = 14 << 4,
-
     // Generic activities are user defined and can be anything.
     ACT_GENERIC = 15 << 4,
 
@@ -344,9 +293,7 @@
 // This class manages arbitrary user data that can be associated with activities
 // done by a thread by supporting key/value pairs of any type. This can provide
 // additional information during debugging. It is also used to store arbitrary
-// global data. All updates must be done from the same thread though other
-// threads can read it concurrently if they create new objects using the same
-// memory.
+// global data. All updates must be done from the same thread.
 class BASE_EXPORT ActivityUserData {
  public:
   // List of known value type. REFERENCE types must immediately follow the non-
@@ -393,7 +340,7 @@
    private:
     friend class ActivityUserData;
 
-    ValueType type_ = END_OF_VALUES;
+    ValueType type_;
     uint64_t short_value_;    // Used to hold copy of numbers, etc.
     std::string long_value_;  // Used to hold copy of raw/string data.
     StringPiece ref_value_;   // Used to hold reference to external data.
@@ -401,17 +348,14 @@
 
   using Snapshot = std::map<std::string, TypedValue>;
 
-  // Initialize the object either as a "sink" that just accepts and discards
-  // data or an active one that writes to a given (zeroed) memory block.
-  ActivityUserData();
-  ActivityUserData(void* memory, size_t size, int64_t pid = 0);
+  ActivityUserData(void* memory, size_t size);
   virtual ~ActivityUserData();
 
   // Gets the unique ID number for this user data. If this changes then the
   // contents have been overwritten by another thread. The return value is
   // always non-zero unless it's actually just a data "sink".
   uint32_t id() const {
-    return header_ ? header_->owner.data_id.load(std::memory_order_relaxed) : 0;
+    return memory_ ? id_->load(std::memory_order_relaxed) : 0;
   }
 
   // Writes a |value| (as part of a key/value pair) that will be included with
@@ -459,23 +403,13 @@
 
   // Creates a snapshot of the key/value pairs contained within. The returned
   // data will be fixed, independent of whatever changes afterward. There is
-  // some protection against concurrent modification. This will return false
-  // if the data is invalid or if a complete overwrite of the contents is
-  // detected.
+  // protection against concurrent modification of the values but no protection
+  // against a complete overwrite of the contents; the caller must ensure that
+  // the memory segment is not going to be re-initialized while this runs.
   bool CreateSnapshot(Snapshot* output_snapshot) const;
 
   // Gets the base memory address used for storing data.
-  const void* GetBaseAddress() const;
-
-  // Explicitly sets the process ID.
-  void SetOwningProcessIdForTesting(int64_t pid, int64_t stamp);
-
-  // Gets the associated process ID, in native form, and the creation timestamp
-  // from tracker memory without loading the entire structure for analysis. This
-  // will return false if no valid process ID is available.
-  static bool GetOwningProcessId(const void* memory,
-                                 int64_t* out_id,
-                                 int64_t* out_stamp);
+  const void* GetBaseAddress();
 
  protected:
   virtual void Set(StringPiece name,
@@ -488,31 +422,20 @@
 
   enum : size_t { kMemoryAlignment = sizeof(uint64_t) };
 
-  // A structure that defines the structure header in memory.
-  struct MemoryHeader {
-    MemoryHeader();
-    ~MemoryHeader();
-
-    OwningProcess owner;  // Information about the creating process.
-  };
-
-  // Header to a key/value record held in persistent memory.
-  struct FieldHeader {
-    FieldHeader();
-    ~FieldHeader();
-
-    std::atomic<uint8_t> type;         // Encoded ValueType
-    uint8_t name_size;                 // Length of "name" key.
-    std::atomic<uint16_t> value_size;  // Actual size of of the stored value.
-    uint16_t record_size;              // Total storage of name, value, header.
-  };
-
   // A structure used to reference data held outside of persistent memory.
   struct ReferenceRecord {
     uint64_t address;
     uint64_t size;
   };
 
+  // Header to a key/value record held in persistent memory.
+  struct Header {
+    std::atomic<uint8_t> type;         // Encoded ValueType
+    uint8_t name_size;                 // Length of "name" key.
+    std::atomic<uint16_t> value_size;  // Actual size of of the stored value.
+    uint16_t record_size;              // Total storage of name, value, header.
+  };
+
   // This record is used to hold known value is a map so that they can be
   // found and overwritten later.
   struct ValueInfo {
@@ -533,10 +456,7 @@
                     size_t size);
 
   // Loads any data already in the memory segment. This allows for accessing
-  // records created previously. If this detects that the underlying data has
-  // gone away (cleared by another thread/process), it will invalidate all the
-  // data in this object and turn it into simple "sink" with no values to
-  // return.
+  // records created previously.
   void ImportExistingData() const;
 
   // A map of all the values within the memory block, keyed by name for quick
@@ -550,14 +470,12 @@
   mutable char* memory_;
   mutable size_t available_;
 
-  // A pointer to the memory header for this instance.
-  MemoryHeader* const header_;
+  // A pointer to the unique ID for this instance.
+  std::atomic<uint32_t>* const id_;
 
-  // These hold values used when initially creating the object. They are
-  // compared against current header values to check for outside changes.
-  const uint32_t orig_data_id;
-  const int64_t orig_process_id;
-  const int64_t orig_create_stamp;
+  // This ID is used to create unique indentifiers for user data so that it's
+  // possible to tell if the information has been overwritten.
+  static StaticAtomicSequenceNumber next_id_;
 
   DISALLOW_COPY_AND_ASSIGN(ActivityUserData);
 };
@@ -593,9 +511,6 @@
     // truncated due to internal length limitations.
     std::string thread_name;
 
-    // The timestamp at which this process was created.
-    int64_t create_stamp;
-
     // The process and thread IDs. These values have no meaning other than
     // they uniquely identify a running process and a running thread within
     // that process.  Thread-IDs can be re-used across different processes
@@ -610,9 +525,6 @@
     // The current total depth of the activity stack, including those later
     // entries not recorded in the |activity_stack| vector.
     uint32_t activity_stack_depth = 0;
-
-    // The last recorded "exception" activity.
-    Activity last_exception;
   };
 
   // This is the base class for having the compiler manage an activity on the
@@ -696,12 +608,6 @@
   void ReleaseUserData(ActivityId id,
                        ActivityTrackerMemoryAllocator* allocator);
 
-  // Save an exception. |origin| is the location of the exception.
-  void RecordExceptionActivity(const void* program_counter,
-                               const void* origin,
-                               Activity::Type type,
-                               const ActivityData& data);
-
   // Returns whether the current data is valid or not. It is not valid if
   // corruption has been detected in the header or other data structures.
   bool IsValid() const;
@@ -712,19 +618,6 @@
   // implementation does not support concurrent snapshot operations.
   bool CreateSnapshot(Snapshot* output_snapshot) const;
 
-  // Gets the base memory address used for storing data.
-  const void* GetBaseAddress();
-
-  // Explicitly sets the process ID.
-  void SetOwningProcessIdForTesting(int64_t pid, int64_t stamp);
-
-  // Gets the associated process ID, in native form, and the creation timestamp
-  // from tracker memory without loading the entire structure for analysis. This
-  // will return false if no valid process ID is available.
-  static bool GetOwningProcessId(const void* memory,
-                                 int64_t* out_id,
-                                 int64_t* out_stamp);
-
   // Calculates the memory size required for a given stack depth, including
   // the internal header structure for the stack.
   static size_t SizeForStackDepth(int stack_depth);
@@ -732,10 +625,6 @@
  private:
   friend class ActivityTrackerTest;
 
-  std::unique_ptr<ActivityUserData> CreateUserDataForActivity(
-      Activity* activity,
-      ActivityTrackerMemoryAllocator* allocator);
-
   Header* const header_;        // Pointer to the Header structure.
   Activity* const stack_;       // The stack of activities.
   const uint32_t stack_slots_;  // The total number of stack slots.
@@ -760,45 +649,15 @@
   // will be safely ignored. These are public so that an external process
   // can recognize records of this type within an allocator.
   enum : uint32_t {
-    kTypeIdActivityTracker = 0x5D7381AF + 4,   // SHA1(ActivityTracker) v4
-    kTypeIdUserDataRecord = 0x615EDDD7 + 3,    // SHA1(UserDataRecord) v3
+    kTypeIdActivityTracker = 0x5D7381AF + 3,   // SHA1(ActivityTracker) v3
+    kTypeIdUserDataRecord = 0x615EDDD7 + 2,    // SHA1(UserDataRecord) v2
     kTypeIdGlobalLogMessage = 0x4CF434F9 + 1,  // SHA1(GlobalLogMessage) v1
-    kTypeIdProcessDataRecord = kTypeIdUserDataRecord + 0x100,
-    kTypeIdGlobalDataRecord = kTypeIdUserDataRecord + 0x200,
+    kTypeIdGlobalDataRecord = kTypeIdUserDataRecord + 1000,
 
     kTypeIdActivityTrackerFree = ~kTypeIdActivityTracker,
     kTypeIdUserDataRecordFree = ~kTypeIdUserDataRecord,
-    kTypeIdProcessDataRecordFree = ~kTypeIdProcessDataRecord,
   };
 
-  // An enumeration of common process life stages. All entries are given an
-  // explicit number so they are known and remain constant; this allows for
-  // cross-version analysis either locally or on a server.
-  enum ProcessPhase : int {
-    // The phases are generic and may have meaning to the tracker.
-    PROCESS_PHASE_UNKNOWN = 0,
-    PROCESS_LAUNCHED = 1,
-    PROCESS_LAUNCH_FAILED = 2,
-    PROCESS_EXITED_CLEANLY = 10,
-    PROCESS_EXITED_WITH_CODE = 11,
-
-    // Add here whatever is useful for analysis.
-    PROCESS_SHUTDOWN_STARTED = 100,
-    PROCESS_MAIN_LOOP_STARTED = 101,
-  };
-
-  // A callback made when a process exits to allow immediate analysis of its
-  // data. Note that the system may reuse the |process_id| so when fetching
-  // records it's important to ensure that what is returned was created before
-  // the |exit_stamp|. Movement of |process_data| information is allowed.
-  using ProcessExitCallback =
-      Callback<void(int64_t process_id,
-                    int64_t exit_stamp,
-                    int exit_code,
-                    ProcessPhase exit_phase,
-                    std::string&& command_line,
-                    ActivityUserData::Snapshot&& process_data)>;
-
   // This structure contains information about a loaded module, as shown to
   // users of the tracker.
   struct BASE_EXPORT ModuleInfo {
@@ -869,12 +728,9 @@
   // Creates a global tracker using a given persistent-memory |allocator| and
   // providing the given |stack_depth| to each thread tracker it manages. The
   // created object is activated so tracking will begin immediately upon return.
-  // The |process_id| can be zero to get it from the OS but is taken for testing
-  // purposes.
   static void CreateWithAllocator(
       std::unique_ptr<PersistentMemoryAllocator> allocator,
-      int stack_depth,
-      int64_t process_id);
+      int stack_depth);
 
 #if !defined(OS_NACL)
   // Like above but internally creates an allocator around a disk file with
@@ -889,13 +745,11 @@
 #endif  // !defined(OS_NACL)
 
   // Like above but internally creates an allocator using local heap memory of
-  // the specified size. This is used primarily for unit tests. The |process_id|
-  // can be zero to get it from the OS but is taken for testing purposes.
+  // the specified size. This is used primarily for unit tests.
   static void CreateWithLocalMemory(size_t size,
                                     uint64_t id,
                                     StringPiece name,
-                                    int stack_depth,
-                                    int64_t process_id);
+                                    int stack_depth);
 
   // Gets the global activity-tracker or null if none exists.
   static GlobalActivityTracker* Get() {
@@ -903,15 +757,6 @@
         subtle::Acquire_Load(&g_tracker_));
   }
 
-  // Sets the global activity-tracker for testing purposes.
-  static void SetForTesting(std::unique_ptr<GlobalActivityTracker> tracker);
-
-  // This access to the persistent allocator is only for testing; it extracts
-  // the global tracker completely. All tracked threads must exit before
-  // calling this. Tracking for the current thread will be automatically
-  // stopped.
-  static std::unique_ptr<GlobalActivityTracker> ReleaseForTesting();
-
   // Convenience method for determining if a global tracker is active.
   static bool IsEnabled() { return Get() != nullptr; }
 
@@ -944,50 +789,6 @@
   // Releases the activity-tracker for the current thread (for testing only).
   void ReleaseTrackerForCurrentThreadForTesting();
 
-  // Sets a task-runner that can be used for background work.
-  void SetBackgroundTaskRunner(const scoped_refptr<TaskRunner>& runner);
-
-  // Sets an optional callback to be called when a process exits.
-  void SetProcessExitCallback(ProcessExitCallback callback);
-
-  // Manages process lifetimes. These are called by the process that launched
-  // and reaped the subprocess, not the subprocess itself. If it is expensive
-  // to generate the parameters, Get() the global tracker and call these
-  // conditionally rather than using the static versions.
-  void RecordProcessLaunch(ProcessId process_id,
-                           const FilePath::StringType& cmd);
-  void RecordProcessLaunch(ProcessId process_id,
-                           const FilePath::StringType& exe,
-                           const FilePath::StringType& args);
-  void RecordProcessExit(ProcessId process_id, int exit_code);
-  static void RecordProcessLaunchIfEnabled(ProcessId process_id,
-                                           const FilePath::StringType& cmd) {
-    GlobalActivityTracker* tracker = Get();
-    if (tracker)
-      tracker->RecordProcessLaunch(process_id, cmd);
-  }
-  static void RecordProcessLaunchIfEnabled(ProcessId process_id,
-                                           const FilePath::StringType& exe,
-                                           const FilePath::StringType& args) {
-    GlobalActivityTracker* tracker = Get();
-    if (tracker)
-      tracker->RecordProcessLaunch(process_id, exe, args);
-  }
-  static void RecordProcessExitIfEnabled(ProcessId process_id, int exit_code) {
-    GlobalActivityTracker* tracker = Get();
-    if (tracker)
-      tracker->RecordProcessExit(process_id, exit_code);
-  }
-
-  // Sets the "phase" of the current process, useful for knowing what it was
-  // doing when it last reported.
-  void SetProcessPhase(ProcessPhase phase);
-  static void SetProcessPhaseIfEnabled(ProcessPhase phase) {
-    GlobalActivityTracker* tracker = Get();
-    if (tracker)
-      tracker->SetProcessPhase(phase);
-  }
-
   // Records a log message. The current implementation does NOT recycle these
   // only store critical messages such as FATAL ones.
   void RecordLogMessage(StringPiece message);
@@ -1017,23 +818,7 @@
       tracker->RecordFieldTrial(trial_name, group_name);
   }
 
-  // Record exception information for the current thread.
-  ALWAYS_INLINE
-  void RecordException(const void* origin, uint32_t code) {
-    return RecordExceptionImpl(::tracked_objects::GetProgramCounter(), origin,
-                               code);
-  }
-
-  // Gets the process ID used for tracking. This is typically the same as what
-  // the OS thinks is the current process but can be overridden for testing.
-  int64_t process_id() { return process_id_; };
-
-  // Accesses the process data record for storing arbitrary key/value pairs.
-  // Updates to this are thread-safe.
-  ActivityUserData& process_data() { return process_data_; }
-
   // Accesses the global data record for storing arbitrary key/value pairs.
-  // Updates to this are thread-safe.
   ActivityUserData& global_data() { return global_data_; }
 
  private:
@@ -1052,10 +837,10 @@
   // A wrapper around ActivityUserData that is thread-safe and thus can be used
   // in the global scope without the requirement of being called from only one
   // thread.
-  class ThreadSafeUserData : public ActivityUserData {
+  class GlobalUserData : public ActivityUserData {
    public:
-    ThreadSafeUserData(void* memory, size_t size, int64_t pid = 0);
-    ~ThreadSafeUserData() override;
+    GlobalUserData(void* memory, size_t size);
+    ~GlobalUserData() override;
 
    private:
     void Set(StringPiece name,
@@ -1065,7 +850,7 @@
 
     Lock data_lock_;
 
-    DISALLOW_COPY_AND_ASSIGN(ThreadSafeUserData);
+    DISALLOW_COPY_AND_ASSIGN(GlobalUserData);
   };
 
   // State of a module as stored in persistent memory. This supports a single
@@ -1077,8 +862,7 @@
     static constexpr uint32_t kPersistentTypeId = 0x05DB5F41 + 1;
 
     // Expected size for 32/64-bit check by PersistentMemoryAllocator.
-    static constexpr size_t kExpectedInstanceSize =
-        OwningProcess::kExpectedInstanceSize + 56;
+    static constexpr size_t kExpectedInstanceSize = 56;
 
     // The atomic unfortunately makes this a "complex" class on some compilers
     // and thus requires an out-of-line constructor & destructor even though
@@ -1086,7 +870,6 @@
     ModuleInfoRecord();
     ~ModuleInfoRecord();
 
-    OwningProcess owner;            // The process that created this record.
     uint64_t address;               // The base address of the module.
     uint64_t load_time;             // Time of last load/unload.
     uint64_t size;                  // The size of the module in bytes.
@@ -1138,30 +921,18 @@
   // Creates a global tracker using a given persistent-memory |allocator| and
   // providing the given |stack_depth| to each thread tracker it manages. The
   // created object is activated so tracking has already started upon return.
-  // The |process_id| can be zero to get it from the OS but is taken for testing
-  // purposes.
   GlobalActivityTracker(std::unique_ptr<PersistentMemoryAllocator> allocator,
-                        int stack_depth,
-                        int64_t process_id);
+                        int stack_depth);
 
   // Returns the memory used by an activity-tracker managed by this class.
   // It is called during the destruction of a ManagedActivityTracker object.
   void ReturnTrackerMemory(ManagedActivityTracker* tracker);
 
-  // Records exception information.
-  void RecordExceptionImpl(const void* pc, const void* origin, uint32_t code);
-
   // Releases the activity-tracker associcated with thread. It is called
   // automatically when a thread is joined and thus there is nothing more to
   // be tracked. |value| is a pointer to a ManagedActivityTracker.
   static void OnTLSDestroy(void* value);
 
-  // Does process-exit work. This can be run on any thread.
-  void CleanupAfterProcess(int64_t process_id,
-                           int64_t exit_stamp,
-                           int exit_code,
-                           std::string&& command_line);
-
   // The persistent-memory allocator from which the memory for all trackers
   // is taken.
   std::unique_ptr<PersistentMemoryAllocator> allocator_;
@@ -1170,10 +941,6 @@
   // provide the stack-depth requested during construction.
   const size_t stack_memory_size_;
 
-  // The process-id of the current process. This is kept as a member variable,
-  // defined during initialization, for testing purposes.
-  const int64_t process_id_;
-
   // The activity tracker for the currently executing thread.
   base::ThreadLocalStorage::Slot this_thread_tracker_;
 
@@ -1188,9 +955,9 @@
   ActivityTrackerMemoryAllocator user_data_allocator_;
   base::Lock user_data_allocator_lock_;
 
-  // An object for holding arbitrary key value pairs with thread-safe access.
-  ThreadSafeUserData process_data_;
-  ThreadSafeUserData global_data_;
+  // An object for holding global arbitrary key value pairs. Values must always
+  // be written from the main UI thread.
+  GlobalUserData global_data_;
 
   // A map of global module information, keyed by module path.
   std::map<const std::string, ModuleInfoRecord*> modules_;
@@ -1199,21 +966,6 @@
   // The active global activity tracker.
   static subtle::AtomicWord g_tracker_;
 
-  // A lock that is used to protect access to the following fields.
-  base::Lock global_tracker_lock_;
-
-  // The collection of processes being tracked and their command-lines.
-  std::map<int64_t, std::string> known_processes_;
-
-  // A task-runner that can be used for doing background processing.
-  scoped_refptr<TaskRunner> background_task_runner_;
-
-  // A callback performed when a subprocess exits, including its exit-code
-  // and the phase it was in when that occurred. This will be called via
-  // the |background_task_runner_| if one is set or whatever thread reaped
-  // the process otherwise.
-  ProcessExitCallback process_exit_callback_;
-
   DISALLOW_COPY_AND_ASSIGN(GlobalActivityTracker);
 };
 
diff --git a/base/debug/activity_tracker_unittest.cc b/base/debug/activity_tracker_unittest.cc
index c7efa58..aced4fb 100644
--- a/base/debug/activity_tracker_unittest.cc
+++ b/base/debug/activity_tracker_unittest.cc
@@ -84,73 +84,45 @@
     return GlobalActivityTracker::Get()->user_data_allocator_.cache_used();
   }
 
-  void HandleProcessExit(int64_t id,
-                         int64_t stamp,
-                         int code,
-                         GlobalActivityTracker::ProcessPhase phase,
-                         std::string&& command,
-                         ActivityUserData::Snapshot&& data) {
-    exit_id = id;
-    exit_stamp = stamp;
-    exit_code = code;
-    exit_phase = phase;
-    exit_command = std::move(command);
-    exit_data = std::move(data);
-  }
-
   static void DoNothing() {}
-
-  int64_t exit_id = 0;
-  int64_t exit_stamp;
-  int exit_code;
-  GlobalActivityTracker::ProcessPhase exit_phase;
-  std::string exit_command;
-  ActivityUserData::Snapshot exit_data;
 };
 
 TEST_F(ActivityTrackerTest, UserDataTest) {
   char buffer[256];
   memset(buffer, 0, sizeof(buffer));
   ActivityUserData data(buffer, sizeof(buffer));
-  size_t space = sizeof(buffer) - sizeof(ActivityUserData::MemoryHeader);
+  const size_t space = sizeof(buffer) - 8;
   ASSERT_EQ(space, data.available_);
 
   data.SetInt("foo", 1);
-  space -= 24;
-  ASSERT_EQ(space, data.available_);
+  ASSERT_EQ(space - 24, data.available_);
 
   data.SetUint("b", 1U);  // Small names fit beside header in a word.
-  space -= 16;
-  ASSERT_EQ(space, data.available_);
+  ASSERT_EQ(space - 24 - 16, data.available_);
 
   data.Set("c", buffer, 10);
-  space -= 24;
-  ASSERT_EQ(space, data.available_);
+  ASSERT_EQ(space - 24 - 16 - 24, data.available_);
 
   data.SetString("dear john", "it's been fun");
-  space -= 32;
-  ASSERT_EQ(space, data.available_);
+  ASSERT_EQ(space - 24 - 16 - 24 - 32, data.available_);
 
   data.Set("c", buffer, 20);
-  ASSERT_EQ(space, data.available_);
+  ASSERT_EQ(space - 24 - 16 - 24 - 32, data.available_);
 
   data.SetString("dear john", "but we're done together");
-  ASSERT_EQ(space, data.available_);
+  ASSERT_EQ(space - 24 - 16 - 24 - 32, data.available_);
 
   data.SetString("dear john", "bye");
-  ASSERT_EQ(space, data.available_);
+  ASSERT_EQ(space - 24 - 16 - 24 - 32, data.available_);
 
   data.SetChar("d", 'x');
-  space -= 8;
-  ASSERT_EQ(space, data.available_);
+  ASSERT_EQ(space - 24 - 16 - 24 - 32 - 8, data.available_);
 
   data.SetBool("ee", true);
-  space -= 16;
-  ASSERT_EQ(space, data.available_);
+  ASSERT_EQ(space - 24 - 16 - 24 - 32 - 8 - 16, data.available_);
 
   data.SetString("f", "");
-  space -= 8;
-  ASSERT_EQ(space, data.available_);
+  ASSERT_EQ(space - 24 - 16 - 24 - 32 - 8 - 16 - 8, data.available_);
 }
 
 TEST_F(ActivityTrackerTest, PushPopTest) {
@@ -204,7 +176,7 @@
 }
 
 TEST_F(ActivityTrackerTest, ScopedTaskTest) {
-  GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
+  GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
 
   ThreadActivityTracker* tracker =
       GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
@@ -250,28 +222,6 @@
   ASSERT_EQ(2U, GetGlobalUserDataMemoryCacheUsed());
 }
 
-TEST_F(ActivityTrackerTest, ExceptionTest) {
-  GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
-  GlobalActivityTracker* global = GlobalActivityTracker::Get();
-
-  ThreadActivityTracker* tracker =
-      GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
-  ThreadActivityTracker::Snapshot snapshot;
-  ASSERT_EQ(0U, GetGlobalUserDataMemoryCacheUsed());
-
-  ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
-  ASSERT_EQ(0U, snapshot.last_exception.activity_type);
-
-  char origin;
-  global->RecordException(&origin, 42);
-
-  ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
-  EXPECT_EQ(Activity::ACT_EXCEPTION, snapshot.last_exception.activity_type);
-  EXPECT_EQ(reinterpret_cast<uintptr_t>(&origin),
-            snapshot.last_exception.origin_address);
-  EXPECT_EQ(42U, snapshot.last_exception.data.exception.code);
-}
-
 TEST_F(ActivityTrackerTest, CreateWithFileTest) {
   const char temp_name[] = "CreateWithFileTest";
   ScopedTempDir temp_dir;
@@ -300,16 +250,6 @@
 
 // GlobalActivityTracker tests below.
 
-TEST_F(ActivityTrackerTest, BasicTest) {
-  GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
-  GlobalActivityTracker* global = GlobalActivityTracker::Get();
-
-  // Ensure the data repositories have backing store, indicated by non-zero ID.
-  EXPECT_NE(0U, global->process_data().id());
-  EXPECT_NE(0U, global->global_data().id());
-  EXPECT_NE(global->process_data().id(), global->global_data().id());
-}
-
 class SimpleActivityThread : public SimpleThread {
  public:
   SimpleActivityThread(const std::string& name,
@@ -364,7 +304,7 @@
 };
 
 TEST_F(ActivityTrackerTest, ThreadDeathTest) {
-  GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
+  GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
   GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
   const size_t starting_active = GetGlobalActiveTrackerCount();
   const size_t starting_inactive = GetGlobalInactiveTrackerCount();
@@ -396,107 +336,5 @@
   EXPECT_EQ(starting_inactive + 1, GetGlobalInactiveTrackerCount());
 }
 
-TEST_F(ActivityTrackerTest, ProcessDeathTest) {
-  // This doesn't actually create and destroy a process. Instead, it uses for-
-  // testing interfaces to simulate data created by other processes.
-  const ProcessId other_process_id = GetCurrentProcId() + 1;
-
-  GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3, 0);
-  GlobalActivityTracker* global = GlobalActivityTracker::Get();
-  ThreadActivityTracker* thread = global->GetOrCreateTrackerForCurrentThread();
-
-  // Get callbacks for process exit.
-  global->SetProcessExitCallback(
-      Bind(&ActivityTrackerTest::HandleProcessExit, Unretained(this)));
-
-  // Pretend than another process has started.
-  global->RecordProcessLaunch(other_process_id, FILE_PATH_LITERAL("foo --bar"));
-
-  // Do some activities.
-  PendingTask task(FROM_HERE, base::Bind(&DoNothing));
-  ScopedTaskRunActivity activity(task);
-  ActivityUserData& user_data = activity.user_data();
-  ASSERT_NE(0U, user_data.id());
-
-  // Get the memory-allocator references to that data.
-  PersistentMemoryAllocator::Reference proc_data_ref =
-      global->allocator()->GetAsReference(
-          global->process_data().GetBaseAddress(),
-          GlobalActivityTracker::kTypeIdProcessDataRecord);
-  ASSERT_TRUE(proc_data_ref);
-  PersistentMemoryAllocator::Reference tracker_ref =
-      global->allocator()->GetAsReference(
-          thread->GetBaseAddress(),
-          GlobalActivityTracker::kTypeIdActivityTracker);
-  ASSERT_TRUE(tracker_ref);
-  PersistentMemoryAllocator::Reference user_data_ref =
-      global->allocator()->GetAsReference(
-          user_data.GetBaseAddress(),
-          GlobalActivityTracker::kTypeIdUserDataRecord);
-  ASSERT_TRUE(user_data_ref);
-
-  // Make a copy of the thread-tracker state so it can be restored later.
-  const size_t tracker_size = global->allocator()->GetAllocSize(tracker_ref);
-  std::unique_ptr<char[]> tracker_copy(new char[tracker_size]);
-  memcpy(tracker_copy.get(), thread->GetBaseAddress(), tracker_size);
-
-  // Change the objects to appear to be owned by another process.
-  int64_t owning_id;
-  int64_t stamp;
-  ASSERT_TRUE(ActivityUserData::GetOwningProcessId(
-      global->process_data().GetBaseAddress(), &owning_id, &stamp));
-  EXPECT_NE(other_process_id, owning_id);
-  ASSERT_TRUE(ThreadActivityTracker::GetOwningProcessId(
-      thread->GetBaseAddress(), &owning_id, &stamp));
-  EXPECT_NE(other_process_id, owning_id);
-  ASSERT_TRUE(ActivityUserData::GetOwningProcessId(user_data.GetBaseAddress(),
-                                                   &owning_id, &stamp));
-  EXPECT_NE(other_process_id, owning_id);
-  global->process_data().SetOwningProcessIdForTesting(other_process_id, stamp);
-  thread->SetOwningProcessIdForTesting(other_process_id, stamp);
-  user_data.SetOwningProcessIdForTesting(other_process_id, stamp);
-  ASSERT_TRUE(ActivityUserData::GetOwningProcessId(
-      global->process_data().GetBaseAddress(), &owning_id, &stamp));
-  EXPECT_EQ(other_process_id, owning_id);
-  ASSERT_TRUE(ThreadActivityTracker::GetOwningProcessId(
-      thread->GetBaseAddress(), &owning_id, &stamp));
-  EXPECT_EQ(other_process_id, owning_id);
-  ASSERT_TRUE(ActivityUserData::GetOwningProcessId(user_data.GetBaseAddress(),
-                                                   &owning_id, &stamp));
-  EXPECT_EQ(other_process_id, owning_id);
-
-  // Check that process exit will perform callback and free the allocations.
-  ASSERT_EQ(0, exit_id);
-  ASSERT_EQ(GlobalActivityTracker::kTypeIdProcessDataRecord,
-            global->allocator()->GetType(proc_data_ref));
-  ASSERT_EQ(GlobalActivityTracker::kTypeIdActivityTracker,
-            global->allocator()->GetType(tracker_ref));
-  ASSERT_EQ(GlobalActivityTracker::kTypeIdUserDataRecord,
-            global->allocator()->GetType(user_data_ref));
-  global->RecordProcessExit(other_process_id, 0);
-  EXPECT_EQ(other_process_id, exit_id);
-  EXPECT_EQ("foo --bar", exit_command);
-  EXPECT_EQ(GlobalActivityTracker::kTypeIdProcessDataRecordFree,
-            global->allocator()->GetType(proc_data_ref));
-  EXPECT_EQ(GlobalActivityTracker::kTypeIdActivityTrackerFree,
-            global->allocator()->GetType(tracker_ref));
-  EXPECT_EQ(GlobalActivityTracker::kTypeIdUserDataRecordFree,
-            global->allocator()->GetType(user_data_ref));
-
-  // Restore memory contents and types so things don't crash when doing real
-  // process clean-up.
-  memcpy(const_cast<void*>(thread->GetBaseAddress()), tracker_copy.get(),
-         tracker_size);
-  global->allocator()->ChangeType(
-      proc_data_ref, GlobalActivityTracker::kTypeIdProcessDataRecord,
-      GlobalActivityTracker::kTypeIdUserDataRecordFree, false);
-  global->allocator()->ChangeType(
-      tracker_ref, GlobalActivityTracker::kTypeIdActivityTracker,
-      GlobalActivityTracker::kTypeIdActivityTrackerFree, false);
-  global->allocator()->ChangeType(
-      user_data_ref, GlobalActivityTracker::kTypeIdUserDataRecord,
-      GlobalActivityTracker::kTypeIdUserDataRecordFree, false);
-}
-
 }  // namespace debug
 }  // namespace base
diff --git a/base/debug/stack_trace.cc b/base/debug/stack_trace.cc
index 08dcacf..1996dfc 100644
--- a/base/debug/stack_trace.cc
+++ b/base/debug/stack_trace.cc
@@ -35,7 +35,7 @@
 
 namespace {
 
-#if HAVE_TRACE_STACK_FRAME_POINTERS && !defined(OS_WIN)
+#if HAVE_TRACE_STACK_FRAME_POINTERS
 
 #if defined(__arm__) && defined(__GNUC__) && !defined(__clang__)
 // GCC and LLVM generate slightly different frames on ARM, see
@@ -144,7 +144,7 @@
   return prev_parent_fp;
 }
 
-#endif  // HAVE_TRACE_STACK_FRAME_POINTERS && !defined(OS_WIN)
+#endif  // HAVE_TRACE_STACK_FRAME_POINTERS
 
 }  // namespace
 
@@ -227,18 +227,6 @@
 size_t TraceStackFramePointers(const void** out_trace,
                                size_t max_depth,
                                size_t skip_initial) {
-// TODO(699863): Merge the frame-pointer based stack unwinder into the
-// base::debug::StackTrace platform-specific implementation files.
-#if defined(OS_WIN)
-  StackTrace stack(max_depth);
-  size_t count = 0;
-  const void* const* frames = stack.Addresses(&count);
-  if (count < skip_initial)
-    return 0u;
-  count -= skip_initial;
-  memcpy(out_trace, frames + skip_initial, count * sizeof(void*));
-  return count;
-#elif defined(OS_POSIX)
   // Usage of __builtin_frame_address() enables frame pointers in this
   // function even if they are not enabled globally. So 'fp' will always
   // be valid.
@@ -272,10 +260,8 @@
   }
 
   return depth;
-#endif
 }
 
-#if !defined(OS_WIN)
 ScopedStackFrameLinker::ScopedStackFrameLinker(void* fp, void* parent_fp)
     : fp_(fp),
       parent_fp_(parent_fp),
@@ -286,7 +272,6 @@
   CHECK_EQ(parent_fp_, previous_parent_fp)
       << "Stack frame's parent pointer has changed!";
 }
-#endif  // !defined(OS_WIN)
 
 #endif  // HAVE_TRACE_STACK_FRAME_POINTERS
 
diff --git a/base/debug/stack_trace.h b/base/debug/stack_trace.h
index ab1d2eb..4c9b73e 100644
--- a/base/debug/stack_trace.h
+++ b/base/debug/stack_trace.h
@@ -23,23 +23,13 @@
 struct _CONTEXT;
 #endif
 
-// TODO(699863): Clean up HAVE_TRACE_STACK_FRAME_POINTERS.
-#if defined(OS_POSIX)
-
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(OS_POSIX) && ( \
+    defined(__i386__) || defined(__x86_64__) || \
+    (defined(__arm__) && !defined(__thumb__)))
 #define HAVE_TRACE_STACK_FRAME_POINTERS 1
-#elif defined(__arm__) && !defined(__thumb__)
-#define HAVE_TRACE_STACK_FRAME_POINTERS 1
-#else  // defined(__arm__) && !defined(__thumb__)
+#else
 #define HAVE_TRACE_STACK_FRAME_POINTERS 0
-#endif  // defined(__arm__) && !defined(__thumb__)
-
-#elif defined(OS_WIN)
-#define HAVE_TRACE_STACK_FRAME_POINTERS 1
-
-#else  // defined(OS_WIN)
-#define HAVE_TRACE_STACK_FRAME_POINTERS 0
-#endif  // defined(OS_WIN)
+#endif
 
 namespace base {
 namespace debug {
@@ -132,7 +122,6 @@
                                            size_t max_depth,
                                            size_t skip_initial);
 
-#if !defined(OS_WIN)
 // Links stack frame |fp| to |parent_fp|, so that during stack unwinding
 // TraceStackFramePointers() visits |parent_fp| after visiting |fp|.
 // Both frame pointers must come from __builtin_frame_address().
@@ -182,7 +171,6 @@
 
   DISALLOW_COPY_AND_ASSIGN(ScopedStackFrameLinker);
 };
-#endif  // !defined(OS_WIN)
 
 #endif  // HAVE_TRACE_STACK_FRAME_POINTERS
 
diff --git a/base/environment.cc b/base/environment.cc
index 8b1d8fc..534a7a8 100644
--- a/base/environment.cc
+++ b/base/environment.cc
@@ -42,7 +42,7 @@
       alternate_case_var = ToLowerASCII(variable_name);
     else
       return false;
-    return GetVarImpl(alternate_case_var, result);
+    return GetVarImpl(alternate_case_var.c_str(), result);
   }
 
   bool SetVar(StringPiece variable_name,
diff --git a/base/feature_list.cc b/base/feature_list.cc
index 61043ce..353136c 100644
--- a/base/feature_list.cc
+++ b/base/feature_list.cc
@@ -228,9 +228,9 @@
 }
 
 // static
-std::vector<base::StringPiece> FeatureList::SplitFeatureListString(
-    base::StringPiece input) {
-  return SplitStringPiece(input, ",", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+std::vector<std::string> FeatureList::SplitFeatureListString(
+    const std::string& input) {
+  return SplitString(input, ",", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
 }
 
 // static
@@ -340,7 +340,7 @@
     const std::string& feature_list,
     OverrideState overridden_state) {
   for (const auto& value : SplitFeatureListString(feature_list)) {
-    StringPiece feature_name = value;
+    StringPiece feature_name(value);
     base::FieldTrial* trial = nullptr;
 
     // The entry may be of the form FeatureName<FieldTrialName - in which case,
@@ -348,7 +348,7 @@
     std::string::size_type pos = feature_name.find('<');
     if (pos != std::string::npos) {
       feature_name.set(value.data(), pos);
-      trial = base::FieldTrialList::Find(value.substr(pos + 1).as_string());
+      trial = base::FieldTrialList::Find(value.substr(pos + 1));
     }
 
     RegisterOverride(feature_name, overridden_state, trial);
diff --git a/base/feature_list.h b/base/feature_list.h
index c9f4a7b..09e8408 100644
--- a/base/feature_list.h
+++ b/base/feature_list.h
@@ -156,10 +156,9 @@
   // called after the singleton instance has been registered via SetInstance().
   static FieldTrial* GetFieldTrial(const Feature& feature);
 
-  // Splits a comma-separated string containing feature names into a vector. The
-  // resulting pieces point to parts of |input|.
-  static std::vector<base::StringPiece> SplitFeatureListString(
-      base::StringPiece input);
+  // Splits a comma-separated string containing feature names into a vector.
+  static std::vector<std::string> SplitFeatureListString(
+      const std::string& input);
 
   // Initializes and sets an instance of FeatureList with feature overrides via
   // command-line flags |enable_features| and |disable_features| if one has not
diff --git a/base/feature_list_unittest.cc b/base/feature_list_unittest.cc
index 5fbd294..fb3b320 100644
--- a/base/feature_list_unittest.cc
+++ b/base/feature_list_unittest.cc
@@ -14,7 +14,6 @@
 #include "base/memory/ptr_util.h"
 #include "base/metrics/field_trial.h"
 #include "base/metrics/persistent_memory_allocator.h"
-#include "base/strings/string_piece.h"
 #include "base/strings/string_util.h"
 #include "base/strings/stringprintf.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -34,7 +33,7 @@
 };
 
 std::string SortFeatureListString(const std::string& feature_list) {
-  std::vector<base::StringPiece> features =
+  std::vector<std::string> features =
       FeatureList::SplitFeatureListString(feature_list);
   std::sort(features.begin(), features.end());
   return JoinString(features, ",");
diff --git a/base/files/file_path.cc b/base/files/file_path.cc
index 21df995..9f67f9b 100644
--- a/base/files/file_path.cc
+++ b/base/files/file_path.cc
@@ -176,7 +176,7 @@
 
 FilePath::FilePath(const FilePath& that) : path_(that.path_) {
 }
-FilePath::FilePath(FilePath&& that) noexcept = default;
+FilePath::FilePath(FilePath&& that) = default;
 
 FilePath::FilePath(StringPieceType path) {
   path.CopyToString(&path_);
diff --git a/base/files/file_path.h b/base/files/file_path.h
index 0be0ad0..02846f6 100644
--- a/base/files/file_path.h
+++ b/base/files/file_path.h
@@ -184,7 +184,7 @@
 
   // Constructs FilePath with the contents of |that|, which is left in valid but
   // unspecified state.
-  FilePath(FilePath&& that) noexcept;
+  FilePath(FilePath&& that);
   // Replaces the contents with those of |that|, which is left in valid but
   // unspecified state.
   FilePath& operator=(FilePath&& that);
diff --git a/base/files/file_util_mac.mm b/base/files/file_util_mac.mm
index d3e14a3..5a99aa0 100644
--- a/base/files/file_util_mac.mm
+++ b/base/files/file_util_mac.mm
@@ -7,10 +7,8 @@
 #import <Foundation/Foundation.h>
 #include <copyfile.h>
 #include <stdlib.h>
-#include <string.h>
 
 #include "base/files/file_path.h"
-#include "base/logging.h"
 #include "base/mac/foundation_util.h"
 #include "base/strings/string_util.h"
 #include "base/threading/thread_restrictions.h"
@@ -26,14 +24,10 @@
 }
 
 bool GetTempDir(base::FilePath* path) {
-  // In order to facilitate hermetic runs on macOS, first check
-  // $MAC_CHROMIUM_TMPDIR. We check this instead of $TMPDIR because external
-  // programs currently set $TMPDIR with no effect, but when we respect it
-  // directly it can cause crashes (like crbug.com/698759).
-  const char* env_tmpdir = getenv("MAC_CHROMIUM_TMPDIR");
+  // In order to facilitate hermetic runs on macOS, first check $TMPDIR.
+  // NOTE: $TMPDIR is ALMOST ALWAYS set on macOS (unless the user un-set it).
+  const char* env_tmpdir = getenv("TMPDIR");
   if (env_tmpdir) {
-    DCHECK_LT(strlen(env_tmpdir), 50u)
-        << "too-long TMPDIR causes socket name length issues.";
     *path = base::FilePath(env_tmpdir);
     return true;
   }
diff --git a/base/mac/mach_port_broker_unittest.cc b/base/mac/mach_port_broker_unittest.cc
index cb4b82c..bff8eb6 100644
--- a/base/mac/mach_port_broker_unittest.cc
+++ b/base/mac/mach_port_broker_unittest.cc
@@ -95,21 +95,21 @@
   CommandLine command_line(
       base::GetMultiProcessTestChildBaseCommandLine());
   broker_.GetLock().Acquire();
-  base::SpawnChildResult spawn_result = base::SpawnMultiProcessTestChild(
+  base::Process test_child_process = base::SpawnMultiProcessTestChild(
       "MachPortBrokerTestChild", command_line, LaunchOptions());
-  broker_.AddPlaceholderForPid(spawn_result.process.Handle());
+  broker_.AddPlaceholderForPid(test_child_process.Handle());
   broker_.GetLock().Release();
 
   WaitForTaskPort();
-  EXPECT_EQ(spawn_result.process.Handle(), received_process_);
+  EXPECT_EQ(test_child_process.Handle(), received_process_);
 
   int rv = -1;
-  ASSERT_TRUE(spawn_result.process.WaitForExitWithTimeout(
+  ASSERT_TRUE(test_child_process.WaitForExitWithTimeout(
       TestTimeouts::action_timeout(), &rv));
   EXPECT_EQ(0, rv);
 
   EXPECT_NE(static_cast<mach_port_t>(MACH_PORT_NULL),
-            broker_.TaskForPid(spawn_result.process.Handle()));
+            broker_.TaskForPid(test_child_process.Handle()));
 }
 
 TEST_F(MachPortBrokerTest, ReceivePortFromChildWithoutAdding) {
@@ -117,18 +117,17 @@
   CommandLine command_line(
       base::GetMultiProcessTestChildBaseCommandLine());
   broker_.GetLock().Acquire();
-  base::SpawnChildResult spawn_result = base::SpawnMultiProcessTestChild(
+  base::Process test_child_process = base::SpawnMultiProcessTestChild(
       "MachPortBrokerTestChild", command_line, LaunchOptions());
-
   broker_.GetLock().Release();
 
   int rv = -1;
-  ASSERT_TRUE(spawn_result.process.WaitForExitWithTimeout(
+  ASSERT_TRUE(test_child_process.WaitForExitWithTimeout(
       TestTimeouts::action_timeout(), &rv));
   EXPECT_EQ(0, rv);
 
   EXPECT_EQ(static_cast<mach_port_t>(MACH_PORT_NULL),
-            broker_.TaskForPid(spawn_result.process.Handle()));
+            broker_.TaskForPid(test_child_process.Handle()));
 }
 
 }  // namespace base
diff --git a/base/memory/ref_counted.cc b/base/memory/ref_counted.cc
index 039f255..46bbd7a 100644
--- a/base/memory/ref_counted.cc
+++ b/base/memory/ref_counted.cc
@@ -3,17 +3,9 @@
 // found in the LICENSE file.
 
 #include "base/memory/ref_counted.h"
-
 #include "base/threading/thread_collision_warner.h"
 
 namespace base {
-namespace {
-
-#if DCHECK_IS_ON()
-AtomicRefCount g_cross_thread_ref_count_access_allow_count = 0;
-#endif
-
-}  // namespace
 
 namespace subtle {
 
@@ -21,6 +13,8 @@
   return AtomicRefCountIsOne(&ref_count_);
 }
 
+RefCountedThreadSafeBase::RefCountedThreadSafeBase() = default;
+
 RefCountedThreadSafeBase::~RefCountedThreadSafeBase() {
 #if DCHECK_IS_ON()
   DCHECK(in_dtor_) << "RefCountedThreadSafe object deleted without "
@@ -31,10 +25,6 @@
 void RefCountedThreadSafeBase::AddRef() const {
 #if DCHECK_IS_ON()
   DCHECK(!in_dtor_);
-  DCHECK(!needs_adopt_ref_)
-      << "This RefCounted object is created with non-zero reference count."
-      << " The first reference to such a object has to be made by AdoptRef or"
-      << " MakeShared.";
 #endif
   AtomicRefCountInc(&ref_count_);
 }
@@ -53,23 +43,6 @@
   return false;
 }
 
-#if DCHECK_IS_ON()
-bool RefCountedBase::CalledOnValidSequence() const {
-  return sequence_checker_.CalledOnValidSequence() ||
-         !AtomicRefCountIsZero(&g_cross_thread_ref_count_access_allow_count);
-}
-#endif
-
 }  // namespace subtle
 
-#if DCHECK_IS_ON()
-ScopedAllowCrossThreadRefCountAccess::ScopedAllowCrossThreadRefCountAccess() {
-  AtomicRefCountInc(&g_cross_thread_ref_count_access_allow_count);
-}
-
-ScopedAllowCrossThreadRefCountAccess::~ScopedAllowCrossThreadRefCountAccess() {
-  AtomicRefCountDec(&g_cross_thread_ref_count_access_allow_count);
-}
-#endif
-
 }  // namespace base
diff --git a/base/memory/ref_counted.h b/base/memory/ref_counted.h
index 46088f2..9dd09ad 100644
--- a/base/memory/ref_counted.h
+++ b/base/memory/ref_counted.h
@@ -16,40 +16,24 @@
 #include "base/compiler_specific.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/sequence_checker.h"
 #include "base/threading/thread_collision_warner.h"
 #include "build/build_config.h"
 
-template <class T>
-class scoped_refptr;
-
 namespace base {
 
-template <typename T>
-scoped_refptr<T> AdoptRef(T* t);
-
 namespace subtle {
 
-enum AdoptRefTag { kAdoptRefTag };
-enum StartRefCountFromZeroTag { kStartRefCountFromZeroTag };
-enum StartRefCountFromOneTag { kStartRefCountFromOneTag };
-
 class BASE_EXPORT RefCountedBase {
  public:
   bool HasOneRef() const { return ref_count_ == 1; }
 
  protected:
-  explicit RefCountedBase(StartRefCountFromZeroTag) {
+  RefCountedBase()
+      : ref_count_(0)
 #if DCHECK_IS_ON()
-    sequence_checker_.DetachFromSequence();
+        , in_dtor_(false)
 #endif
-  }
-
-  explicit RefCountedBase(StartRefCountFromOneTag) : ref_count_(1) {
-#if DCHECK_IS_ON()
-    needs_adopt_ref_ = true;
-    sequence_checker_.DetachFromSequence();
-#endif
+  {
   }
 
   ~RefCountedBase() {
@@ -58,6 +42,7 @@
 #endif
   }
 
+
   void AddRef() const {
     // TODO(maruel): Add back once it doesn't assert 500 times/sec.
     // Current thread books the critical section "AddRelease"
@@ -65,62 +50,32 @@
     // DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
 #if DCHECK_IS_ON()
     DCHECK(!in_dtor_);
-    DCHECK(!needs_adopt_ref_)
-        << "This RefCounted object is created with non-zero reference count."
-        << " The first reference to such a object has to be made by AdoptRef or"
-        << " MakeShared.";
-    if (ref_count_ >= 1) {
-      DCHECK(CalledOnValidSequence());
-    }
 #endif
-
     ++ref_count_;
   }
 
   // Returns true if the object should self-delete.
   bool Release() const {
-    --ref_count_;
-
     // TODO(maruel): Add back once it doesn't assert 500 times/sec.
     // Current thread books the critical section "AddRelease"
     // without release it.
     // DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
-
 #if DCHECK_IS_ON()
     DCHECK(!in_dtor_);
-    if (ref_count_ == 0)
-      in_dtor_ = true;
-
-    if (ref_count_ >= 1)
-      DCHECK(CalledOnValidSequence());
-    if (ref_count_ == 1)
-      sequence_checker_.DetachFromSequence();
 #endif
-
-    return ref_count_ == 0;
+    if (--ref_count_ == 0) {
+#if DCHECK_IS_ON()
+      in_dtor_ = true;
+#endif
+      return true;
+    }
+    return false;
   }
 
  private:
-  template <typename U>
-  friend scoped_refptr<U> base::AdoptRef(U*);
-
-  void Adopted() const {
+  mutable size_t ref_count_;
 #if DCHECK_IS_ON()
-    DCHECK(needs_adopt_ref_);
-    needs_adopt_ref_ = false;
-#endif
-  }
-
-#if DCHECK_IS_ON()
-  bool CalledOnValidSequence() const;
-#endif
-
-  mutable size_t ref_count_ = 0;
-
-#if DCHECK_IS_ON()
-  mutable bool needs_adopt_ref_ = false;
-  mutable bool in_dtor_ = false;
-  mutable SequenceChecker sequence_checker_;
+  mutable bool in_dtor_;
 #endif
 
   DFAKE_MUTEX(add_release_);
@@ -133,13 +88,7 @@
   bool HasOneRef() const;
 
  protected:
-  explicit RefCountedThreadSafeBase(StartRefCountFromZeroTag) {}
-  explicit RefCountedThreadSafeBase(StartRefCountFromOneTag) : ref_count_(1) {
-#if DCHECK_IS_ON()
-    needs_adopt_ref_ = true;
-#endif
-  }
-
+  RefCountedThreadSafeBase();
   ~RefCountedThreadSafeBase();
 
   void AddRef() const;
@@ -148,19 +97,8 @@
   bool Release() const;
 
  private:
-  template <typename U>
-  friend scoped_refptr<U> base::AdoptRef(U*);
-
-  void Adopted() const {
-#if DCHECK_IS_ON()
-    DCHECK(needs_adopt_ref_);
-    needs_adopt_ref_ = false;
-#endif
-  }
-
   mutable AtomicRefCount ref_count_ = 0;
 #if DCHECK_IS_ON()
-  mutable bool needs_adopt_ref_ = false;
   mutable bool in_dtor_ = false;
 #endif
 
@@ -169,27 +107,6 @@
 
 }  // namespace subtle
 
-// ScopedAllowCrossThreadRefCountAccess disables the check documented on
-// RefCounted below for rare pre-existing use cases where thread-safety was
-// guaranteed through other means (e.g. explicit sequencing of calls across
-// execution sequences when bouncing between threads in order). New callers
-// should refrain from using this (callsites handling thread-safety through
-// locks should use RefCountedThreadSafe per the overhead of its atomics being
-// negligible compared to locks anyways and callsites doing explicit sequencing
-// should properly std::move() the ref to avoid hitting this check).
-// TODO(tzik): Cleanup existing use cases and remove
-// ScopedAllowCrossThreadRefCountAccess.
-class BASE_EXPORT ScopedAllowCrossThreadRefCountAccess final {
- public:
-#if DCHECK_IS_ON()
-  ScopedAllowCrossThreadRefCountAccess();
-  ~ScopedAllowCrossThreadRefCountAccess();
-#else
-  ScopedAllowCrossThreadRefCountAccess() {}
-  ~ScopedAllowCrossThreadRefCountAccess() {}
-#endif
-};
-
 //
 // A base class for reference counted classes.  Otherwise, known as a cheap
 // knock-off of WebKit's RefCounted<T> class.  To use this, just extend your
@@ -204,45 +121,10 @@
 //
 // You should always make your destructor non-public, to avoid any code deleting
 // the object accidently while there are references to it.
-//
-//
-// The ref count manipulation to RefCounted is NOT thread safe and has DCHECKs
-// to trap unsafe cross thread usage. A subclass instance of RefCounted can be
-// passed to another execution sequence only when its ref count is 1. If the ref
-// count is more than 1, the RefCounted class verifies the ref updates are made
-// on the same execution sequence as the previous ones.
-//
-//
-// The reference count starts from zero by default, and we intended to migrate
-// to start-from-one ref count. Put REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() to
-// the ref counted class to opt-in.
-//
-// If an object has start-from-one ref count, the first scoped_refptr need to be
-// created by base::AdoptRef() or base::MakeShared(). We can use
-// base::MakeShared() to create create both type of ref counted object.
-//
-// The motivations to use start-from-one ref count are:
-//  - Start-from-one ref count doesn't need the ref count increment for the
-//    first reference.
-//  - It can detect an invalid object acquisition for a being-deleted object
-//    that has zero ref count. That tends to happen on custom deleter that
-//    delays the deletion.
-//    TODO(tzik): Implement invalid acquisition detection.
-//  - Behavior parity to Blink's WTF::RefCounted, whose count starts from one.
-//    And start-from-one ref count is a step to merge WTF::RefCounted into
-//    base::RefCounted.
-//
-#define REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE()             \
-  static constexpr ::base::subtle::StartRefCountFromOneTag \
-      kRefCountPreference = ::base::subtle::kStartRefCountFromOneTag
-
 template <class T>
 class RefCounted : public subtle::RefCountedBase {
  public:
-  static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
-      subtle::kStartRefCountFromZeroTag;
-
-  RefCounted() : subtle::RefCountedBase(T::kRefCountPreference) {}
+  RefCounted() = default;
 
   void AddRef() const {
     subtle::RefCountedBase::AddRef();
@@ -258,7 +140,7 @@
   ~RefCounted() = default;
 
  private:
-  DISALLOW_COPY_AND_ASSIGN(RefCounted);
+  DISALLOW_COPY_AND_ASSIGN(RefCounted<T>);
 };
 
 // Forward declaration.
@@ -289,17 +171,10 @@
 //    private:
 //     friend class base::RefCountedThreadSafe<MyFoo>;
 //     ~MyFoo();
-//
-// We can use REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() with RefCountedThreadSafe
-// too. See the comment above the RefCounted definition for details.
 template <class T, typename Traits = DefaultRefCountedThreadSafeTraits<T> >
 class RefCountedThreadSafe : public subtle::RefCountedThreadSafeBase {
  public:
-  static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
-      subtle::kStartRefCountFromZeroTag;
-
-  explicit RefCountedThreadSafe()
-      : subtle::RefCountedThreadSafeBase(T::kRefCountPreference) {}
+  RefCountedThreadSafe() = default;
 
   void AddRef() const {
     subtle::RefCountedThreadSafeBase::AddRef();
@@ -339,43 +214,6 @@
   ~RefCountedData() = default;
 };
 
-// Creates a scoped_refptr from a raw pointer without incrementing the reference
-// count. Use this only for a newly created object whose reference count starts
-// from 1 instead of 0.
-template <typename T>
-scoped_refptr<T> AdoptRef(T* obj) {
-  using Tag = typename std::decay<decltype(T::kRefCountPreference)>::type;
-  static_assert(std::is_same<subtle::StartRefCountFromOneTag, Tag>::value,
-                "Use AdoptRef only for the reference count starts from one.");
-
-  DCHECK(obj);
-  DCHECK(obj->HasOneRef());
-  obj->Adopted();
-  return scoped_refptr<T>(obj, subtle::kAdoptRefTag);
-}
-
-namespace subtle {
-
-template <typename T>
-scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromZeroTag) {
-  return scoped_refptr<T>(obj);
-}
-
-template <typename T>
-scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromOneTag) {
-  return AdoptRef(obj);
-}
-
-}  // namespace subtle
-
-// Constructs an instance of T, which is a ref counted type, and wraps the
-// object into a scoped_refptr.
-template <typename T, typename... Args>
-scoped_refptr<T> MakeShared(Args&&... args) {
-  T* obj = new T(std::forward<Args>(args)...);
-  return subtle::AdoptRefIfNeeded(obj, T::kRefCountPreference);
-}
-
 }  // namespace base
 
 //
@@ -547,11 +385,6 @@
   T* ptr_ = nullptr;
 
  private:
-  template <typename U>
-  friend scoped_refptr<U> base::AdoptRef(U*);
-
-  scoped_refptr(T* p, base::subtle::AdoptRefTag) : ptr_(p) {}
-
   // Friend required for move constructors that set r.ptr_ to null.
   template <typename U>
   friend class scoped_refptr;
diff --git a/base/memory/ref_counted_unittest.cc b/base/memory/ref_counted_unittest.cc
index 515f422..65c15d2 100644
--- a/base/memory/ref_counted_unittest.cc
+++ b/base/memory/ref_counted_unittest.cc
@@ -6,7 +6,6 @@
 
 #include <utility>
 
-#include "base/test/gtest_util.h"
 #include "base/test/opaque_ref_counted.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -123,16 +122,6 @@
   return self_assign;
 }
 
-class InitialRefCountIsOne : public base::RefCounted<InitialRefCountIsOne> {
- public:
-  REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE();
-
-  InitialRefCountIsOne() {}
-
- private:
-  friend class base::RefCounted<InitialRefCountIsOne>;
-  ~InitialRefCountIsOne() {}
-};
 
 }  // end namespace
 
@@ -539,30 +528,3 @@
   scoped_refptr<Other> other2(other);
   EXPECT_EQ(other2, Overloaded(std::move(other)));
 }
-
-TEST(RefCountedUnitTest, TestInitialRefCountIsOne) {
-  scoped_refptr<InitialRefCountIsOne> obj =
-      base::MakeShared<InitialRefCountIsOne>();
-  EXPECT_TRUE(obj->HasOneRef());
-  obj = nullptr;
-
-  scoped_refptr<InitialRefCountIsOne> obj2 =
-      base::AdoptRef(new InitialRefCountIsOne);
-  EXPECT_TRUE(obj2->HasOneRef());
-  obj2 = nullptr;
-
-  scoped_refptr<Other> obj3 = base::MakeShared<Other>();
-  EXPECT_TRUE(obj3->HasOneRef());
-  obj3 = nullptr;
-}
-
-TEST(RefCountedDeathTest, TestAdoptRef) {
-  EXPECT_DCHECK_DEATH(make_scoped_refptr(new InitialRefCountIsOne));
-
-  InitialRefCountIsOne* ptr = nullptr;
-  EXPECT_DCHECK_DEATH(base::AdoptRef(ptr));
-
-  scoped_refptr<InitialRefCountIsOne> obj =
-      base::MakeShared<InitialRefCountIsOne>();
-  EXPECT_DCHECK_DEATH(base::AdoptRef(obj.get()));
-}
diff --git a/base/memory/ref_counted_unittest.nc b/base/memory/ref_counted_unittest.nc
deleted file mode 100644
index 5022779..0000000
--- a/base/memory/ref_counted_unittest.nc
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/ref_counted.h"
-
-namespace base {
-
-class InitialRefCountIsZero : public base::RefCounted<InitialRefCountIsZero> {
- public:
-  InitialRefCountIsZero() {}
- private:
-  friend class base::RefCounted<InitialRefCountIsZero>;
-  ~InitialRefCountIsZero() {}
-};
-
-#if defined(NCTEST_ADOPT_REF_TO_ZERO_START)  // [r"fatal error: static_assert failed \"Use AdoptRef only for the reference count starts from one\.\""]
-
-void WontCompile() {
-  AdoptRef(new InitialRefCountIsZero());
-}
-
-#endif
-
-}  // namespace base
diff --git a/base/memory/shared_memory_mac_unittest.cc b/base/memory/shared_memory_mac_unittest.cc
index 4ccee89..c7d20ec 100644
--- a/base/memory/shared_memory_mac_unittest.cc
+++ b/base/memory/shared_memory_mac_unittest.cc
@@ -204,7 +204,7 @@
     // similar tests.
     service_name_ = CreateRandomServiceName();
     server_port_.reset(BecomeMachServer(service_name_.c_str()));
-    spawn_child_ = SpawnChild(name);
+    child_process_ = SpawnChild(name);
     client_port_.reset(ReceiveMachPort(server_port_.get()));
   }
 
@@ -221,7 +221,7 @@
   // process.
   mac::ScopedMachSendRight client_port_;
 
-  base::SpawnChildResult spawn_child_;
+  base::Process child_process_;
   DISALLOW_COPY_AND_ASSIGN(SharedMemoryMacMultiProcessTest);
 };
 
@@ -237,7 +237,7 @@
   SendMachPort(client_port_.get(), shared_memory->handle().GetMemoryObject(),
                MACH_MSG_TYPE_COPY_SEND);
   int rv = -1;
-  ASSERT_TRUE(spawn_child_.process.WaitForExitWithTimeout(
+  ASSERT_TRUE(child_process_.WaitForExitWithTimeout(
       TestTimeouts::action_timeout(), &rv));
   EXPECT_EQ(0, rv);
 }
@@ -277,7 +277,7 @@
   SendMachPort(
       client_port_.get(), shm.GetMemoryObject(), MACH_MSG_TYPE_COPY_SEND);
   int rv = -1;
-  ASSERT_TRUE(spawn_child_.process.WaitForExitWithTimeout(
+  ASSERT_TRUE(child_process_.WaitForExitWithTimeout(
       TestTimeouts::action_timeout(), &rv));
   EXPECT_EQ(0, rv);
 }
diff --git a/base/memory/shared_memory_unittest.cc b/base/memory/shared_memory_unittest.cc
index d87fad0..19dedcc 100644
--- a/base/memory/shared_memory_unittest.cc
+++ b/base/memory/shared_memory_unittest.cc
@@ -682,16 +682,16 @@
 
   // Start |kNumTasks| processes, each of which atomically increments the first
   // word by 1.
-  SpawnChildResult children[kNumTasks];
+  Process processes[kNumTasks];
   for (int index = 0; index < kNumTasks; ++index) {
-    children[index] = SpawnChild("SharedMemoryTestMain");
-    ASSERT_TRUE(children[index].process.IsValid());
+    processes[index] = SpawnChild("SharedMemoryTestMain");
+    ASSERT_TRUE(processes[index].IsValid());
   }
 
   // Check that each process exited correctly.
   int exit_code = 0;
   for (int index = 0; index < kNumTasks; ++index) {
-    EXPECT_TRUE(children[index].process.WaitForExit(&exit_code));
+    EXPECT_TRUE(processes[index].WaitForExit(&exit_code));
     EXPECT_EQ(0, exit_code);
   }
 
diff --git a/base/memory/singleton_objc.h b/base/memory/singleton_objc.h
new file mode 100644
index 0000000..6df3f77
--- /dev/null
+++ b/base/memory/singleton_objc.h
@@ -0,0 +1,60 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Support for using the Singleton<T> pattern with Objective-C objects.  A
+// SingletonObjC is the same as a Singleton, except the default traits are
+// appropriate for Objective-C objects.  A typical Objective-C object of type
+// NSExampleType can be maintained as a singleton and accessed with:
+//
+//   NSExampleType* exampleSingleton = SingletonObjC<NSExampleType>::get();
+//
+// The first time this is used, it will create exampleSingleton as the result
+// of [[NSExampleType alloc] init].  Subsequent calls will return the same
+// NSExampleType* object.  The object will be released by calling
+// -[NSExampleType release] when Singleton's atexit routines run
+// (see singleton.h).
+//
+// For Objective-C objects initialized through means other than the
+// no-parameter -init selector, DefaultSingletonObjCTraits may be extended
+// as needed:
+//
+//   struct FooSingletonTraits : public DefaultSingletonObjCTraits<Foo> {
+//     static Foo* New() {
+//       return [[Foo alloc] initWithName:@"selecty"];
+//     }
+//   };
+//   ...
+//   Foo* widgetSingleton = SingletonObjC<Foo, FooSingletonTraits>::get();
+
+#ifndef BASE_MEMORY_SINGLETON_OBJC_H_
+#define BASE_MEMORY_SINGLETON_OBJC_H_
+
+#import <Foundation/Foundation.h>
+#include "base/memory/singleton.h"
+
+// Singleton traits usable to manage traditional Objective-C objects, which
+// are instantiated by sending |alloc| and |init| messages, and are deallocated
+// in a memory-managed environment when their retain counts drop to 0 by
+// sending |release| messages.
+template<typename Type>
+struct DefaultSingletonObjCTraits : public DefaultSingletonTraits<Type> {
+  static Type* New() {
+    return [[Type alloc] init];
+  }
+
+  static void Delete(Type* object) {
+    [object release];
+  }
+};
+
+// Exactly like Singleton, but without the DefaultSingletonObjCTraits as the
+// default trait class.  This makes it straightforward for Objective-C++ code
+// to hold Objective-C objects as singletons.
+template<typename Type,
+         typename Traits = DefaultSingletonObjCTraits<Type>,
+         typename DifferentiatingType = Type>
+class SingletonObjC : public Singleton<Type, Traits, DifferentiatingType> {
+};
+
+#endif  // BASE_MEMORY_SINGLETON_OBJC_H_
diff --git a/base/message_loop/incoming_task_queue.cc b/base/message_loop/incoming_task_queue.cc
index 1eba532..fed1494 100644
--- a/base/message_loop/incoming_task_queue.cc
+++ b/base/message_loop/incoming_task_queue.cc
@@ -5,7 +5,6 @@
 #include "base/message_loop/incoming_task_queue.h"
 
 #include <limits>
-#include <utility>
 
 #include "base/location.h"
 #include "base/message_loop/message_loop.h"
@@ -61,17 +60,16 @@
 
 bool IncomingTaskQueue::AddToIncomingQueue(
     const tracked_objects::Location& from_here,
-    OnceClosure task,
+    const Closure& task,
     TimeDelta delay,
     bool nestable) {
-  DCHECK(task);
   DLOG_IF(WARNING,
           delay.InSeconds() > kTaskDelayWarningThresholdInSeconds)
       << "Requesting super-long task delay period of " << delay.InSeconds()
       << " seconds from here: " << from_here.ToString();
 
-  PendingTask pending_task(from_here, std::move(task),
-                           CalculateDelayedRuntime(delay), nestable);
+  PendingTask pending_task(from_here, task, CalculateDelayedRuntime(delay),
+                           nestable);
 #if defined(OS_WIN)
   // We consider the task needs a high resolution timer if the delay is
   // more than 0 and less than 32ms. This caps the relative error to
diff --git a/base/message_loop/incoming_task_queue.h b/base/message_loop/incoming_task_queue.h
index 17bea07..157e47f 100644
--- a/base/message_loop/incoming_task_queue.h
+++ b/base/message_loop/incoming_task_queue.h
@@ -6,7 +6,6 @@
 #define BASE_MESSAGE_LOOP_INCOMING_TASK_QUEUE_H_
 
 #include "base/base_export.h"
-#include "base/callback.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
 #include "base/pending_task.h"
@@ -36,7 +35,7 @@
   // returns false. In all cases, the ownership of |task| is transferred to the
   // called method.
   bool AddToIncomingQueue(const tracked_objects::Location& from_here,
-                          OnceClosure task,
+                          const Closure& task,
                           TimeDelta delay,
                           bool nestable);
 
diff --git a/base/message_loop/message_loop.h b/base/message_loop/message_loop.h
index 6b4765b..bfef261 100644
--- a/base/message_loop/message_loop.h
+++ b/base/message_loop/message_loop.h
@@ -320,8 +320,6 @@
   // Runs the specified PendingTask.
   void RunTask(PendingTask* pending_task);
 
-  bool nesting_allowed() const { return allow_nesting_; }
-
   // Disallow nesting. After this is called, running a nested RunLoop or calling
   // Add/RemoveNestingObserver() on this MessageLoop will crash.
   void DisallowNesting() { allow_nesting_ = false; }
diff --git a/base/message_loop/message_loop_task_runner.cc b/base/message_loop/message_loop_task_runner.cc
index aece087..c9b5ffe 100644
--- a/base/message_loop/message_loop_task_runner.cc
+++ b/base/message_loop/message_loop_task_runner.cc
@@ -4,8 +4,6 @@
 
 #include "base/message_loop/message_loop_task_runner.h"
 
-#include <utility>
-
 #include "base/location.h"
 #include "base/logging.h"
 #include "base/message_loop/incoming_task_queue.h"
@@ -26,20 +24,18 @@
 
 bool MessageLoopTaskRunner::PostDelayedTask(
     const tracked_objects::Location& from_here,
-    OnceClosure task,
+    const base::Closure& task,
     base::TimeDelta delay) {
   DCHECK(!task.is_null()) << from_here.ToString();
-  return incoming_queue_->AddToIncomingQueue(from_here, std::move(task), delay,
-                                             true);
+  return incoming_queue_->AddToIncomingQueue(from_here, task, delay, true);
 }
 
 bool MessageLoopTaskRunner::PostNonNestableDelayedTask(
     const tracked_objects::Location& from_here,
-    OnceClosure task,
+    const base::Closure& task,
     base::TimeDelta delay) {
   DCHECK(!task.is_null()) << from_here.ToString();
-  return incoming_queue_->AddToIncomingQueue(from_here, std::move(task), delay,
-                                             false);
+  return incoming_queue_->AddToIncomingQueue(from_here, task, delay, false);
 }
 
 bool MessageLoopTaskRunner::RunsTasksOnCurrentThread() const {
diff --git a/base/message_loop/message_loop_task_runner.h b/base/message_loop/message_loop_task_runner.h
index 99a96a7..5e70b12 100644
--- a/base/message_loop/message_loop_task_runner.h
+++ b/base/message_loop/message_loop_task_runner.h
@@ -6,7 +6,6 @@
 #define BASE_MESSAGE_LOOP_MESSAGE_LOOP_TASK_RUNNER_H_
 
 #include "base/base_export.h"
-#include "base/callback.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
 #include "base/pending_task.h"
@@ -32,10 +31,10 @@
 
   // SingleThreadTaskRunner implementation
   bool PostDelayedTask(const tracked_objects::Location& from_here,
-                       OnceClosure task,
+                       const base::Closure& task,
                        base::TimeDelta delay) override;
   bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
-                                  OnceClosure task,
+                                  const base::Closure& task,
                                   base::TimeDelta delay) override;
   bool RunsTasksOnCurrentThread() const override;
 
diff --git a/base/message_loop/message_loop_task_runner_unittest.cc b/base/message_loop/message_loop_task_runner_unittest.cc
index d403c70..54551da 100644
--- a/base/message_loop/message_loop_task_runner_unittest.cc
+++ b/base/message_loop/message_loop_task_runner_unittest.cc
@@ -127,7 +127,7 @@
   RunLoop().Run();
 
   EXPECT_EQ(task_thread_.message_loop(), task_run_on);
-  EXPECT_EQ(task_thread_.message_loop(), task_deleted_on);
+  EXPECT_EQ(current_loop_.get(), task_deleted_on);
   EXPECT_EQ(current_loop_.get(), reply_run_on);
   EXPECT_EQ(current_loop_.get(), reply_deleted_on);
   EXPECT_LT(task_delete_order, reply_delete_order);
@@ -200,8 +200,7 @@
   EXPECT_LT(task_delete_order, reply_delete_order);
 }
 
-TEST_F(MessageLoopTaskRunnerTest,
-       PostTaskAndReply_DeadReplyTaskRunnerBehavior) {
+TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_DeadReplyLoopDoesNotDelete) {
   // Annotate the scope as having memory leaks to suppress heapchecker reports.
   ANNOTATE_SCOPED_MEMORY_LEAK;
   MessageLoop* task_run_on = NULL;
@@ -238,13 +237,11 @@
   MessageLoop* task_loop = task_thread_.message_loop();
   task_thread_.Stop();
 
-  // Even if the reply task runner is already gone, the original task should
-  // already be deleted. However, the reply which hasn't executed yet should
-  // leak to avoid thread-safety issues.
   EXPECT_EQ(task_loop, task_run_on);
-  EXPECT_EQ(task_loop, task_deleted_on);
+  ASSERT_FALSE(task_deleted_on);
   EXPECT_FALSE(reply_run_on);
   ASSERT_FALSE(reply_deleted_on);
+  EXPECT_EQ(task_delete_order, reply_delete_order);
 
   // The PostTaskAndReplyRelay is leaked here.  Even if we had a reference to
   // it, we cannot just delete it because PostTaskAndReplyRelay's destructor
diff --git a/base/message_loop/message_loop_unittest.cc b/base/message_loop/message_loop_unittest.cc
index 9d771d5..14fe1ee 100644
--- a/base/message_loop/message_loop_unittest.cc
+++ b/base/message_loop/message_loop_unittest.cc
@@ -12,7 +12,6 @@
 #include "base/compiler_specific.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/ptr_util.h"
 #include "base/memory/ref_counted.h"
 #include "base/message_loop/message_loop.h"
 #include "base/message_loop/message_loop_test.h"
@@ -94,19 +93,16 @@
   static_cast<base::MessageLoopForUI*>(base::MessageLoop::current())->Abort();
 }
 
-void RunTest_AbortDontRunMoreTasks(bool delayed, bool init_java_first) {
+void RunTest_AbortDontRunMoreTasks(bool delayed) {
+  MessageLoop loop(MessageLoop::TYPE_JAVA);
+
   WaitableEvent test_done_event(WaitableEvent::ResetPolicy::MANUAL,
                                 WaitableEvent::InitialState::NOT_SIGNALED);
 
-  std::unique_ptr<android::JavaHandlerThread> java_thread;
-  if (init_java_first) {
-    java_thread =
-        android::JavaHandlerThreadForTesting::CreateJavaFirst(&test_done_event);
-  } else {
-    java_thread = android::JavaHandlerThreadForTesting::Create(
-        "JavaHandlerThreadForTesting from AbortDontRunMoreTasks",
-        &test_done_event);
-  }
+  std::unique_ptr<android::JavaHandlerThreadForTesting> java_thread;
+  java_thread.reset(new android::JavaHandlerThreadForTesting(
+      "JavaHandlerThreadForTesting from AbortDontRunMoreTasks",
+      &test_done_event));
   java_thread->Start();
 
   if (delayed) {
@@ -125,19 +121,10 @@
 }
 
 TEST(MessageLoopTest, JavaExceptionAbort) {
-  constexpr bool delayed = false;
-  constexpr bool init_java_first = false;
-  RunTest_AbortDontRunMoreTasks(delayed, init_java_first);
+  RunTest_AbortDontRunMoreTasks(false);
 }
 TEST(MessageLoopTest, DelayedJavaExceptionAbort) {
-  constexpr bool delayed = true;
-  constexpr bool init_java_first = false;
-  RunTest_AbortDontRunMoreTasks(delayed, init_java_first);
-}
-TEST(MessageLoopTest, JavaExceptionAbortInitJavaFirst) {
-  constexpr bool delayed = false;
-  constexpr bool init_java_first = true;
-  RunTest_AbortDontRunMoreTasks(delayed, init_java_first);
+  RunTest_AbortDontRunMoreTasks(true);
 }
 #endif  // defined(OS_ANDROID)
 
diff --git a/base/metrics/histogram_macros.h b/base/metrics/histogram_macros.h
index d39972a..7847376 100644
--- a/base/metrics/histogram_macros.h
+++ b/base/metrics/histogram_macros.h
@@ -41,9 +41,10 @@
 // delete and reused. The value in |sample| must be strictly less than
 // |enum_max|.
 
-#define UMA_HISTOGRAM_ENUMERATION(name, sample, enum_max) \
-  INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG(               \
-      name, sample, enum_max, base::HistogramBase::kUmaTargetedHistogramFlag)
+#define UMA_HISTOGRAM_ENUMERATION(name, sample, enum_max)                      \
+    INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG(                                  \
+        name, sample, enum_max,                                                \
+        base::HistogramBase::kUmaTargetedHistogramFlag)
 
 // Histogram for boolean values.
 
@@ -67,15 +68,14 @@
 // Sample usage:
 //   UMA_HISTOGRAM_EXACT_LINEAR("Histogram.Linear", count, 10);
 #define UMA_HISTOGRAM_EXACT_LINEAR(name, sample, value_max) \
-  INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG(                \
-      name, sample, value_max, base::HistogramBase::kUmaTargetedHistogramFlag)
+  UMA_HISTOGRAM_ENUMERATION(name, sample, value_max)
 
 // Used for capturing basic percentages. This will be 100 buckets of size 1.
 
 // Sample usage:
 //   UMA_HISTOGRAM_PERCENTAGE("Histogram.Percent", percent_as_int);
-#define UMA_HISTOGRAM_PERCENTAGE(name, percent_as_int) \
-  UMA_HISTOGRAM_EXACT_LINEAR(name, percent_as_int, 101)
+#define UMA_HISTOGRAM_PERCENTAGE(name, percent_as_int)                         \
+    UMA_HISTOGRAM_ENUMERATION(name, percent_as_int, 101)
 
 //------------------------------------------------------------------------------
 // Count histograms. These are used for collecting numeric data. Note that we
diff --git a/base/metrics/histogram_macros_internal.h b/base/metrics/histogram_macros_internal.h
index c107a47..53e4f11 100644
--- a/base/metrics/histogram_macros_internal.h
+++ b/base/metrics/histogram_macros_internal.h
@@ -5,11 +5,6 @@
 #ifndef BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
 #define BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
 
-#include <stdint.h>
-
-#include <limits>
-#include <type_traits>
-
 #include "base/atomicops.h"
 #include "base/logging.h"
 #include "base/metrics/histogram.h"
@@ -101,42 +96,17 @@
         base::Histogram::FactoryGet(name, min, max, bucket_count, flag))
 
 // This is a helper macro used by other macros and shouldn't be used directly.
-// The bucketing scheme is linear with a bucket size of 1. For N items,
-// recording values in the range [0, N - 1] creates a linear histogram with N +
-// 1 buckets:
-//   [0, 1), [1, 2), ..., [N - 1, N)
-// and an overflow bucket [N, infinity).
-//
+// For an enumeration with N items, recording values in the range [0, N - 1],
+// this macro creates a linear histogram with N + 1 buckets:
+//   [0, 1), [1, 2), ..., [N - 1, N), and an overflow bucket [N, infinity).
 // Code should never emit to the overflow bucket; only to the other N buckets.
-// This allows future versions of Chrome to safely increase the boundary size.
-// Otherwise, the histogram would have [N - 1, infinity) as its overflow bucket,
-// and so the maximal value (N - 1) would be emitted to this overflow bucket.
-// But, if an additional value were later added, the bucket label for
-// the value (N - 1) would change to [N - 1, N), which would result in different
-// versions of Chrome using different bucket labels for identical data.
-#define INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG(name, sample, boundary,  \
-                                                  flag)                    \
-  do {                                                                     \
-    static_assert(!std::is_enum<decltype(sample)>::value,                  \
-                  "|sample| should not be an enum type!");                 \
-    static_assert(!std::is_enum<decltype(boundary)>::value,                \
-                  "|boundary| should not be an enum type!");               \
-    STATIC_HISTOGRAM_POINTER_BLOCK(                                        \
-        name, Add(sample),                                                 \
-        base::LinearHistogram::FactoryGet(name, 1, boundary, boundary + 1, \
-                                          flag));                          \
-  } while (0)
-
-// Similar to the previous macro but intended for enumerations. This delegates
-// the work to the previous macro, but supports scoped enumerations as well by
-// forcing an explicit cast to the HistogramBase::Sample integral type.
-//
-// Note the range checks verify two separate issues:
-// - that the declared enum max isn't out of range of HistogramBase::Sample
-// - that the declared enum max is > 0
-//
-// TODO(dcheng): This should assert that the passed in types are actually enum
-// types.
+// This allows future versions of Chrome to safely append new entries to the
+// enumeration. Otherwise, the histogram would have [N - 1, infinity) as its
+// overflow bucket, and so the maximal value (N - 1) would be emitted to this
+// overflow bucket. But, if an additional enumerated value were later added, the
+// bucket label for the value (N - 1) would change to [N - 1, N), which would
+// result in different versions of Chrome using different bucket labels for
+// identical data.
 #define INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, boundary, flag) \
   do {                                                                         \
     static_assert(                                                             \
@@ -145,14 +115,9 @@
             std::is_same<std::remove_const<decltype(sample)>::type,            \
                          std::remove_const<decltype(boundary)>::type>::value,  \
         "|sample| and |boundary| shouldn't be of different enums");            \
-    static_assert(                                                             \
-        static_cast<uintmax_t>(boundary) <                                     \
-            static_cast<uintmax_t>(                                            \
-                std::numeric_limits<base::HistogramBase::Sample>::max()),      \
-        "|boundary| is out of range of HistogramBase::Sample");                \
-    INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG(                                 \
-        name, static_cast<base::HistogramBase::Sample>(sample),                \
-        static_cast<base::HistogramBase::Sample>(boundary), flag);             \
+    STATIC_HISTOGRAM_POINTER_BLOCK(                                            \
+        name, Add(sample), base::LinearHistogram::FactoryGet(                  \
+                               name, 1, boundary, boundary + 1, flag));        \
   } while (0)
 
 // This is a helper macro used by other macros and shouldn't be used directly.
diff --git a/base/metrics/histogram_macros_unittest.cc b/base/metrics/histogram_macros_unittest.cc
index 33a9c6e..c599161 100644
--- a/base/metrics/histogram_macros_unittest.cc
+++ b/base/metrics/histogram_macros_unittest.cc
@@ -15,35 +15,4 @@
   SCOPED_UMA_HISTOGRAM_LONG_TIMER("TestLongTimer1");
 }
 
-// Compile tests for UMA_HISTOGRAM_ENUMERATION with the three different types it
-// accepts:
-// - integral types
-// - unscoped enums
-// - scoped enums
-TEST(HistogramMacro, IntegralPsuedoEnumeration) {
-  UMA_HISTOGRAM_ENUMERATION("Test.FauxEnumeration", 1, 10000);
-}
-
-TEST(HistogramMacro, UnscopedEnumeration) {
-  enum TestEnum : char {
-    FIRST_VALUE,
-    SECOND_VALUE,
-    THIRD_VALUE,
-    MAX_ENTRIES,
-  };
-  UMA_HISTOGRAM_ENUMERATION("Test.UnscopedEnumeration", SECOND_VALUE,
-                            MAX_ENTRIES);
-}
-
-TEST(HistogramMacro, ScopedEnumeration) {
-  enum class TestEnum {
-    FIRST_VALUE,
-    SECOND_VALUE,
-    THIRD_VALUE,
-    MAX_ENTRIES,
-  };
-  UMA_HISTOGRAM_ENUMERATION("Test.ScopedEnumeration", TestEnum::SECOND_VALUE,
-                            TestEnum::MAX_ENTRIES);
-}
-
 }  // namespace base
diff --git a/base/metrics/persistent_histogram_allocator.cc b/base/metrics/persistent_histogram_allocator.cc
index 5f44b67..2991003 100644
--- a/base/metrics/persistent_histogram_allocator.cc
+++ b/base/metrics/persistent_histogram_allocator.cc
@@ -785,6 +785,24 @@
 #endif  // !defined(OS_NACL)
 
 // static
+void GlobalHistogramAllocator::CreateWithSharedMemory(
+    std::unique_ptr<SharedMemory> memory,
+    size_t size,
+    uint64_t /*id*/,
+    StringPiece /*name*/) {
+  if ((!memory->memory() && !memory->Map(size)) ||
+      !SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*memory)) {
+    NOTREACHED();
+    return;
+  }
+
+  DCHECK_LE(memory->mapped_size(), size);
+  Set(WrapUnique(
+      new GlobalHistogramAllocator(MakeUnique<SharedPersistentMemoryAllocator>(
+          std::move(memory), 0, StringPiece(), /*readonly=*/false))));
+}
+
+// static
 void GlobalHistogramAllocator::CreateWithSharedMemoryHandle(
     const SharedMemoryHandle& handle,
     size_t size) {
@@ -887,8 +905,6 @@
 }
 
 void GlobalHistogramAllocator::DeletePersistentLocation() {
-  memory_allocator()->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
-
 #if defined(OS_NACL)
   NOTREACHED();
 #else
diff --git a/base/metrics/persistent_histogram_allocator.h b/base/metrics/persistent_histogram_allocator.h
index 851d7ef..2eb28df 100644
--- a/base/metrics/persistent_histogram_allocator.h
+++ b/base/metrics/persistent_histogram_allocator.h
@@ -431,6 +431,15 @@
                                  FilePath* out_active_path);
 #endif
 
+  // Create a global allocator using a block of shared |memory| of the
+  // specified |size|. The allocator takes ownership of the shared memory
+  // and releases it upon destruction, though the memory will continue to
+  // live if other processes have access to it.
+  static void CreateWithSharedMemory(std::unique_ptr<SharedMemory> memory,
+                                     size_t size,
+                                     uint64_t id,
+                                     StringPiece name);
+
   // Create a global allocator using a block of shared memory accessed
   // through the given |handle| and |size|. The allocator takes ownership
   // of the handle and closes it upon destruction, though the memory will
diff --git a/base/metrics/persistent_memory_allocator.cc b/base/metrics/persistent_memory_allocator.cc
index abcc532..f70b396 100644
--- a/base/metrics/persistent_memory_allocator.cc
+++ b/base/metrics/persistent_memory_allocator.cc
@@ -18,7 +18,6 @@
 #include "base/memory/shared_memory.h"
 #include "base/metrics/histogram_macros.h"
 #include "base/metrics/sparse_histogram.h"
-#include "base/threading/thread_restrictions.h"
 
 namespace {
 
@@ -33,7 +32,7 @@
 // The current version of the metadata. If updates are made that change
 // the metadata, the version number can be queried to operate in a backward-
 // compatible manner until the memory segment is completely re-initalized.
-const uint32_t kGlobalVersion = 2;
+const uint32_t kGlobalVersion = 1;
 
 // Constant values placed in the block headers to indicate its state.
 const uint32_t kBlockCookieFree = 0;
@@ -44,7 +43,7 @@
 // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char>
 // types rather than combined bitfield.
 
-// Flags stored in the flags_ field of the SharedMetadata structure below.
+// Flags stored in the flags_ field of the SharedMetaData structure below.
 enum : int {
   kFlagCorrupt = 1 << 0,
   kFlagFull    = 1 << 1
@@ -101,9 +100,7 @@
 };
 
 // The shared metadata exists once at the top of the memory segment to
-// describe the state of the allocator to all processes. The size of this
-// structure must be a multiple of 64-bits to ensure compatibility between
-// architectures.
+// describe the state of the allocator to all processes.
 struct PersistentMemoryAllocator::SharedMetadata {
   uint32_t cookie;     // Some value that indicates complete initialization.
   uint32_t size;       // Total size of memory segment.
@@ -111,15 +108,10 @@
   uint32_t version;    // Version code so upgrades don't break.
   uint64_t id;         // Arbitrary ID number given by creator.
   uint32_t name;       // Reference to stored name string.
-  uint32_t padding1;   // Pad-out read-only data to 64-bit alignment.
 
   // Above is read-only after first construction. Below may be changed and
   // so must be marked "volatile" to provide correct inter-process behavior.
 
-  // State of the memory, plus some padding to keep alignment.
-  volatile std::atomic<uint8_t> memory_state;  // MemoryState enum values.
-  uint8_t padding2[3];
-
   // Bitfield of information flags. Access to this should be done through
   // the CheckFlag() and SetFlag() methods defined above.
   volatile std::atomic<uint32_t> flags;
@@ -129,7 +121,6 @@
 
   // The "iterable" queue is an M&S Queue as described here, append-only:
   // https://www.research.ibm.com/people/m/michael/podc-1996.pdf
-  // |queue| needs to be 64-bit aligned and is itself a multiple of 64 bits.
   volatile std::atomic<uint32_t> tailptr;  // Last block of iteration queue.
   volatile BlockHeader queue;   // Empty block for linked-list head/tail.
 };
@@ -321,7 +312,7 @@
   // definitions and so cannot be moved to the global scope.
   static_assert(sizeof(PersistentMemoryAllocator::BlockHeader) == 16,
                 "struct is not portable across different natural word widths");
-  static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 64,
+  static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 56,
                 "struct is not portable across different natural word widths");
 
   static_assert(sizeof(BlockHeader) % kAllocAlignment == 0,
@@ -393,13 +384,12 @@
       if (name_cstr)
         memcpy(name_cstr, name.data(), name.length());
     }
-
-    shared_meta()->memory_state.store(MEMORY_INITIALIZED,
-                                      std::memory_order_release);
   } else {
-    if (shared_meta()->size == 0 || shared_meta()->version != kGlobalVersion ||
+    if (shared_meta()->size == 0 ||
+        shared_meta()->version == 0 ||
         shared_meta()->freeptr.load(std::memory_order_relaxed) == 0 ||
-        shared_meta()->tailptr == 0 || shared_meta()->queue.cookie == 0 ||
+        shared_meta()->tailptr == 0 ||
+        shared_meta()->queue.cookie == 0 ||
         shared_meta()->queue.next.load(std::memory_order_relaxed) == 0) {
       SetCorrupt();
     }
@@ -480,19 +470,6 @@
       HistogramBase::kUmaTargetedHistogramFlag);
 }
 
-void PersistentMemoryAllocator::Flush(bool sync) {
-  FlushPartial(used(), sync);
-}
-
-void PersistentMemoryAllocator::SetMemoryState(uint8_t memory_state) {
-  shared_meta()->memory_state.store(memory_state, std::memory_order_relaxed);
-  FlushPartial(sizeof(SharedMetadata), false);
-}
-
-uint8_t PersistentMemoryAllocator::GetMemoryState() const {
-  return shared_meta()->memory_state.load(std::memory_order_relaxed);
-}
-
 size_t PersistentMemoryAllocator::used() const {
   return std::min(shared_meta()->freeptr.load(std::memory_order_relaxed),
                   mem_size_);
@@ -839,12 +816,8 @@
 PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
                                     uint32_t size, bool queue_ok,
                                     bool free_ok) const {
-  // Handle special cases.
-  if (ref == kReferenceQueue && queue_ok)
-    return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
-
   // Validation of parameters.
-  if (ref < sizeof(SharedMetadata))
+  if (ref < (queue_ok ? kReferenceQueue : sizeof(SharedMetadata)))
     return nullptr;
   if (ref % kAllocAlignment != 0)
     return nullptr;
@@ -854,13 +827,17 @@
 
   // Validation of referenced block-header.
   if (!free_ok) {
+    uint32_t freeptr = std::min(
+        shared_meta()->freeptr.load(std::memory_order_relaxed), mem_size_);
+    if (ref + size > freeptr)
+      return nullptr;
     const volatile BlockHeader* const block =
         reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref);
-    if (block->cookie != kBlockCookieAllocated)
-      return nullptr;
     if (block->size < size)
       return nullptr;
-    if (ref + block->size > mem_size_)
+    if (ref + block->size > freeptr)
+      return nullptr;
+    if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated)
       return nullptr;
     if (type_id != 0 &&
         block->type_id.load(std::memory_order_relaxed) != type_id) {
@@ -872,13 +849,6 @@
   return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
 }
 
-void PersistentMemoryAllocator::FlushPartial(size_t /*length*/, bool /*sync*/) {
-  // Generally there is nothing to do as every write is done through volatile
-  // memory with atomic instructions to guarantee consistency. This (virtual)
-  // method exists so that derivced classes can do special things, such as
-  // tell the OS to write changes to disk now rather than when convenient.
-}
-
 void PersistentMemoryAllocator::RecordError(int error) const {
   if (errors_histogram_)
     errors_histogram_->Add(error);
@@ -1019,12 +989,7 @@
           id,
           name,
           read_only),
-      mapped_file_(std::move(file)) {
-  // Ensure the disk-copy of the data reflects the fully-initialized memory as
-  // there is no guarantee as to what order the pages might be auto-flushed by
-  // the OS in the future.
-  Flush(true);
-}
+      mapped_file_(std::move(file)) {}
 
 FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {}
 
@@ -1034,33 +999,6 @@
     bool read_only) {
   return IsMemoryAcceptable(file.data(), file.length(), 0, read_only);
 }
-
-void FilePersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
-  if (sync)
-    ThreadRestrictions::AssertIOAllowed();
-  if (IsReadonly())
-    return;
-
-#if defined(OS_WIN)
-  // Windows doesn't support a synchronous flush.
-  BOOL success = ::FlushViewOfFile(data(), length);
-  DPCHECK(success);
-#elif defined(OS_MACOSX)
-  // On OSX, "invalidate" removes all cached pages, forcing a re-read from
-  // disk. That's not applicable to "flush" so omit it.
-  int result =
-      ::msync(const_cast<void*>(data()), length, sync ? MS_SYNC : MS_ASYNC);
-  DCHECK_NE(EINVAL, result);
-#elif defined(OS_POSIX)
-  // On POSIX, "invalidate" forces _other_ processes to recognize what has
-  // been written to disk and so is applicable to "flush".
-  int result = ::msync(const_cast<void*>(data()), length,
-                       MS_INVALIDATE | (sync ? MS_SYNC : MS_ASYNC));
-  DCHECK_NE(EINVAL, result);
-#else
-#error Unsupported OS.
-#endif
-}
 #endif  // !defined(OS_NACL)
 
 }  // namespace base
diff --git a/base/metrics/persistent_memory_allocator.h b/base/metrics/persistent_memory_allocator.h
index 94a7744..b38f284 100644
--- a/base/metrics/persistent_memory_allocator.h
+++ b/base/metrics/persistent_memory_allocator.h
@@ -96,29 +96,6 @@
  public:
   typedef uint32_t Reference;
 
-  // These states are used to indicate the overall condition of the memory
-  // segment irrespective of what is stored within it. Because the data is
-  // often persistent and thus needs to be readable by different versions of
-  // a program, these values are fixed and can never change.
-  enum MemoryState : uint8_t {
-    // Persistent memory starts all zeros and so shows "uninitialized".
-    MEMORY_UNINITIALIZED = 0,
-
-    // The header has been written and the memory is ready for use.
-    MEMORY_INITIALIZED = 1,
-
-    // The data should be considered deleted. This would be set when the
-    // allocator is being cleaned up. If file-backed, the file is likely
-    // to be deleted but since deletion can fail for a variety of reasons,
-    // having this extra status means a future reader can realize what
-    // should have happened.
-    MEMORY_DELETED = 2,
-
-    // Outside code can create states starting with this number; these too
-    // must also never change between code versions.
-    MEMORY_USER_DEFINED = 100,
-  };
-
   // Iterator for going through all iterable memory records in an allocator.
   // Like the allocator itself, iterators are lock-free and thread-secure.
   // That means that multiple threads can share an iterator and the same
@@ -303,11 +280,7 @@
   const char* Name() const;
 
   // Is this segment open only for read?
-  bool IsReadonly() const { return readonly_; }
-
-  // Manage the saved state of the memory.
-  void SetMemoryState(uint8_t memory_state);
-  uint8_t GetMemoryState() const;
+  bool IsReadonly() { return readonly_; }
 
   // Create internal histograms for tracking memory use and allocation sizes
   // for allocator of |name| (which can simply be the result of Name()). This
@@ -320,17 +293,6 @@
   //    UMA.PersistentAllocator.name.UsedPct
   void CreateTrackingHistograms(base::StringPiece name);
 
-  // Flushes the persistent memory to any backing store. This typically does
-  // nothing but is used by the FilePersistentMemoryAllocator to inform the
-  // OS that all the data should be sent to the disk immediately. This is
-  // useful in the rare case where something has just been stored that needs
-  // to survive a hard shutdown of the machine like from a power failure.
-  // The |sync| parameter indicates if this call should block until the flush
-  // is complete but is only advisory and may or may not have an effect
-  // depending on the capabilities of the OS. Synchronous flushes are allowed
-  // only from theads that are allowed to do I/O.
-  void Flush(bool sync);
-
   // Direct access to underlying memory segment. If the segment is shared
   // across threads or processes, reading data through these values does
   // not guarantee consistency. Use with care. Do not write.
@@ -618,9 +580,6 @@
                             uint64_t id, base::StringPiece name,
                             bool readonly);
 
-  // Implementation of Flush that accepts how much to flush.
-  virtual void FlushPartial(size_t length, bool sync);
-
   volatile char* const mem_base_;  // Memory base. (char so sizeof guaranteed 1)
   const MemoryType mem_type_;      // Type of memory allocation.
   const uint32_t mem_size_;        // Size of entire memory segment.
@@ -756,10 +715,6 @@
   // the rest.
   static bool IsFileAcceptable(const MemoryMappedFile& file, bool read_only);
 
- protected:
-  // PersistentMemoryAllocator:
-  void FlushPartial(size_t length, bool sync) override;
-
  private:
   std::unique_ptr<MemoryMappedFile> mapped_file_;
 
diff --git a/base/metrics/persistent_memory_allocator_unittest.cc b/base/metrics/persistent_memory_allocator_unittest.cc
index c3027ec..d12e00f 100644
--- a/base/metrics/persistent_memory_allocator_unittest.cc
+++ b/base/metrics/persistent_memory_allocator_unittest.cc
@@ -100,8 +100,6 @@
   EXPECT_TRUE(allocator_->used_histogram_);
   EXPECT_EQ("UMA.PersistentAllocator." + base_name + ".UsedPct",
             allocator_->used_histogram_->histogram_name());
-  EXPECT_EQ(PersistentMemoryAllocator::MEMORY_INITIALIZED,
-            allocator_->GetMemoryState());
 
   // Get base memory info for later comparison.
   PersistentMemoryAllocator::MemoryInfo meminfo0;
@@ -256,11 +254,6 @@
   allocator_->Delete(obj2);
   PersistentMemoryAllocator::Iterator iter1z(allocator_.get());
   EXPECT_EQ(nullptr, iter1z.GetNextOfObject<TestObject2>());
-
-  // Ensure that the memory state can be set.
-  allocator_->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
-  EXPECT_EQ(PersistentMemoryAllocator::MEMORY_DELETED,
-            allocator_->GetMemoryState());
 }
 
 TEST_F(PersistentMemoryAllocatorTest, PageTest) {
@@ -698,8 +691,8 @@
   const size_t mmlength = mmfile->length();
   EXPECT_GE(meminfo1.total, mmlength);
 
-  FilePersistentMemoryAllocator file(std::move(mmfile), 0, 0, "", false);
-  EXPECT_FALSE(file.IsReadonly());
+  FilePersistentMemoryAllocator file(std::move(mmfile), 0, 0, "", true);
+  EXPECT_TRUE(file.IsReadonly());
   EXPECT_EQ(TEST_ID, file.Id());
   EXPECT_FALSE(file.IsFull());
   EXPECT_FALSE(file.IsCorrupt());
@@ -720,11 +713,6 @@
   EXPECT_GE(meminfo1.free, meminfo2.free);
   EXPECT_EQ(mmlength, meminfo2.total);
   EXPECT_EQ(0U, meminfo2.free);
-
-  // There's no way of knowing if Flush actually does anything but at least
-  // verify that it runs without CHECK violations.
-  file.Flush(false);
-  file.Flush(true);
 }
 
 TEST(FilePersistentMemoryAllocatorTest, ExtendTest) {
diff --git a/base/native_library.h b/base/native_library.h
index e2b9ca7..02eae1d 100644
--- a/base/native_library.h
+++ b/base/native_library.h
@@ -91,6 +91,16 @@
     const NativeLibraryOptions& options,
     NativeLibraryLoadError* error);
 
+#if defined(OS_WIN)
+// Loads a native library from disk.  Release it with UnloadNativeLibrary when
+// you're done.
+// This function retrieves the LoadLibrary function exported from kernel32.dll
+// and calls it instead of directly calling the LoadLibrary function via the
+// import table.
+BASE_EXPORT NativeLibrary LoadNativeLibraryDynamically(
+    const FilePath& library_path);
+#endif  // OS_WIN
+
 // Unloads a native library.
 BASE_EXPORT void UnloadNativeLibrary(NativeLibrary library);
 
diff --git a/base/post_task_and_reply_with_result_internal.h b/base/post_task_and_reply_with_result_internal.h
index 6f50de8..1456129 100644
--- a/base/post_task_and_reply_with_result_internal.h
+++ b/base/post_task_and_reply_with_result_internal.h
@@ -16,15 +16,16 @@
 // Adapts a function that produces a result via a return value to
 // one that returns via an output parameter.
 template <typename ReturnType>
-void ReturnAsParamAdapter(OnceCallback<ReturnType()> func, ReturnType* result) {
-  *result = std::move(func).Run();
+void ReturnAsParamAdapter(const Callback<ReturnType(void)>& func,
+                          ReturnType* result) {
+  *result = func.Run();
 }
 
 // Adapts a T* result to a callblack that expects a T.
 template <typename TaskReturnType, typename ReplyArgType>
-void ReplyAdapter(OnceCallback<void(ReplyArgType)> callback,
+void ReplyAdapter(const Callback<void(ReplyArgType)>& callback,
                   TaskReturnType* result) {
-  std::move(callback).Run(std::move(*result));
+  callback.Run(std::move(*result));
 }
 
 }  // namespace internal
diff --git a/base/process/launch.h b/base/process/launch.h
index 99a7280..be8f6e7 100644
--- a/base/process/launch.h
+++ b/base/process/launch.h
@@ -262,11 +262,6 @@
 BASE_EXPORT bool GetAppOutput(const std::vector<std::string>& argv,
                               std::string* output);
 
-// Like the above POSIX-specific version of GetAppOutput, but also includes
-// stderr.
-BASE_EXPORT bool GetAppOutputAndError(const std::vector<std::string>& argv,
-                                      std::string* output);
-
 // A version of |GetAppOutput()| which also returns the exit code of the
 // executed command. Returns true if the application runs and exits cleanly. If
 // this is the case the exit code of the application is available in
diff --git a/base/process/launch_posix.cc b/base/process/launch_posix.cc
index 1c4df40..19effa2 100644
--- a/base/process/launch_posix.cc
+++ b/base/process/launch_posix.cc
@@ -668,14 +668,6 @@
   return result && exit_code == EXIT_SUCCESS;
 }
 
-bool GetAppOutputAndError(const std::vector<std::string>& argv,
-                          std::string* output) {
-  int exit_code;
-  bool result =
-      GetAppOutputInternal(argv, nullptr, true, output, true, &exit_code);
-  return result && exit_code == EXIT_SUCCESS;
-}
-
 bool GetAppOutputWithExitCode(const CommandLine& cl,
                               std::string* output,
                               int* exit_code) {
diff --git a/base/process/process_info_unittest.cc b/base/process/process_info_unittest.cc
deleted file mode 100644
index a757774..0000000
--- a/base/process/process_info_unittest.cc
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/process/process_info.h"
-
-#include "base/time/time.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-
-#if !defined(OS_IOS)
-TEST(ProcessInfoTest, CreationTime) {
-  Time creation_time = CurrentProcessInfo::CreationTime();
-  ASSERT_FALSE(creation_time.is_null());
-}
-#endif  // !defined(OS_IOS)
-
-}  // namespace base
diff --git a/base/process/process_metrics.cc b/base/process/process_metrics.cc
index ad555ae..a38930a 100644
--- a/base/process/process_metrics.cc
+++ b/base/process/process_metrics.cc
@@ -12,11 +12,6 @@
 
 namespace base {
 
-SystemMemoryInfoKB::SystemMemoryInfoKB() = default;
-
-SystemMemoryInfoKB::SystemMemoryInfoKB(const SystemMemoryInfoKB& other) =
-    default;
-
 SystemMetrics::SystemMetrics() {
   committed_memory_ = 0;
 }
diff --git a/base/process/process_metrics.h b/base/process/process_metrics.h
index 1562e7b..71d6042 100644
--- a/base/process/process_metrics.h
+++ b/base/process/process_metrics.h
@@ -27,10 +27,6 @@
 #include "base/process/port_provider_mac.h"
 #endif
 
-#if defined(OS_WIN)
-#include "base/win/scoped_handle.h"
-#endif
-
 namespace base {
 
 #if defined(OS_WIN)
@@ -67,12 +63,8 @@
 // shareable:      0
 // swapped         Pages swapped out to zram.
 //
-// On macOS:
-// priv:           Resident size (RSS) including shared memory. Warning: This
-//                 does not include compressed size and does not always
-//                 accurately account for shared memory due to things like
-//                 copy-on-write. TODO(erikchen): Revamp this with something
-//                 more accurate.
+// On OS X: TODO(thakis): Revise.
+// priv:           Memory.
 // shared:         0
 // shareable:      0
 //
@@ -162,13 +154,10 @@
   // system call.
   bool GetCommittedAndWorkingSetKBytes(CommittedKBytes* usage,
                                        WorkingSetKBytes* ws_usage) const;
-  // Returns private, shared, and total resident bytes. |locked_bytes| refers to
-  // bytes that must stay resident. |locked_bytes| only counts bytes locked by
-  // this task, not bytes locked by the kernel.
+  // Returns private, shared, and total resident bytes.
   bool GetMemoryBytes(size_t* private_bytes,
                       size_t* shared_bytes,
-                      size_t* resident_bytes,
-                      size_t* locked_bytes) const;
+                      size_t* resident_bytes) const;
 #endif
 
   // Returns the CPU usage in percent since the last time this method or
@@ -199,10 +188,6 @@
   // Returns the number of file descriptors currently open by the process, or
   // -1 on error.
   int GetOpenFdCount() const;
-
-  // Returns the soft limit of file descriptors that can be opened by the
-  // process, or -1 on error.
-  int GetOpenFdSoftLimit() const;
 #endif  // defined(OS_LINUX)
 
  private:
@@ -224,11 +209,7 @@
   int CalculateIdleWakeupsPerSecond(uint64_t absolute_idle_wakeups);
 #endif
 
-#if defined(OS_WIN)
-  win::ScopedHandle process_;
-#else
   ProcessHandle process_;
-#endif
 
   int processor_count_;
 
@@ -283,13 +264,11 @@
 // Data about system-wide memory consumption. Values are in KB. Available on
 // Windows, Mac, Linux, Android and Chrome OS.
 //
-// Total memory are available on all platforms that implement
+// Total/free memory are available on all platforms that implement
 // GetSystemMemoryInfo(). Total/free swap memory are available on all platforms
 // except on Mac. Buffers/cached/active_anon/inactive_anon/active_file/
-// inactive_file/dirty/reclaimable/pswpin/pswpout/pgmajfault are available on
+// inactive_file/dirty/pswpin/pswpout/pgmajfault are available on
 // Linux/Android/Chrome OS. Shmem/slab/gem_objects/gem_size are Chrome OS only.
-// Speculative/file_backed/purgeable are Mac and iOS only.
-// Free is absent on Windows (see "avail_phys" below).
 struct BASE_EXPORT SystemMemoryInfoKB {
   SystemMemoryInfoKB();
   SystemMemoryInfoKB(const SystemMemoryInfoKB& other);
@@ -297,64 +276,44 @@
   // Serializes the platform specific fields to value.
   std::unique_ptr<Value> ToValue() const;
 
-  int total = 0;
+  int total;
+  int free;
 
-#if !defined(OS_WIN)
-  int free = 0;
-#endif
-
-#if defined(OS_WIN)
-  // "This is the amount of physical memory that can be immediately reused
-  // without having to write its contents to disk first. It is the sum of the
-  // size of the standby, free, and zero lists." (MSDN).
-  // Standby: not modified pages of physical ram (file-backed memory) that are
-  // not actively being used.
-  int avail_phys = 0;
-#endif
-
-#if defined(OS_LINUX) || defined(OS_ANDROID)
+#if defined(OS_LINUX)
   // This provides an estimate of available memory as described here:
   // https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773
   // NOTE: this is ONLY valid in kernels 3.14 and up.  Its value will always
   // be 0 in earlier kernel versions.
-  // Note: it includes _all_ file-backed memory (active + inactive).
-  int available = 0;
+  int available;
 #endif
 
 #if !defined(OS_MACOSX)
-  int swap_total = 0;
-  int swap_free = 0;
+  int swap_total;
+  int swap_free;
 #endif
 
 #if defined(OS_ANDROID) || defined(OS_LINUX)
-  int buffers = 0;
-  int cached = 0;
-  int active_anon = 0;
-  int inactive_anon = 0;
-  int active_file = 0;
-  int inactive_file = 0;
-  int dirty = 0;
-  int reclaimable = 0;
+  int buffers;
+  int cached;
+  int active_anon;
+  int inactive_anon;
+  int active_file;
+  int inactive_file;
+  int dirty;
 
   // vmstats data.
-  unsigned long pswpin = 0;
-  unsigned long pswpout = 0;
-  unsigned long pgmajfault = 0;
+  unsigned long pswpin;
+  unsigned long pswpout;
+  unsigned long pgmajfault;
 #endif  // defined(OS_ANDROID) || defined(OS_LINUX)
 
 #if defined(OS_CHROMEOS)
-  int shmem = 0;
-  int slab = 0;
+  int shmem;
+  int slab;
   // Gem data will be -1 if not supported.
-  int gem_objects = -1;
-  long long gem_size = -1;
+  int gem_objects;
+  long long gem_size;
 #endif  // defined(OS_CHROMEOS)
-
-#if defined(OS_MACOSX)
-  int speculative = 0;
-  int file_backed = 0;
-  int purgeable = 0;
-#endif  // defined(OS_MACOSX)
 };
 
 // On Linux/Android/Chrome OS, system-wide memory consumption data is parsed
diff --git a/base/process/process_metrics_linux.cc b/base/process/process_metrics_linux.cc
index ba0dfa7..5d542cc 100644
--- a/base/process/process_metrics_linux.cc
+++ b/base/process/process_metrics_linux.cc
@@ -311,32 +311,6 @@
 
   return total_count;
 }
-
-int ProcessMetrics::GetOpenFdSoftLimit() const {
-  // Use /proc/<pid>/limits to read the open fd limit.
-  FilePath fd_path = internal::GetProcPidDir(process_).Append("limits");
-
-  std::string limits_contents;
-  if (!ReadFileToString(fd_path, &limits_contents))
-    return -1;
-
-  for (const auto& line :
-       base::SplitStringPiece(limits_contents, "\n", base::KEEP_WHITESPACE,
-                              base::SPLIT_WANT_NONEMPTY)) {
-    if (line.starts_with("Max open files")) {
-      auto tokens = base::SplitStringPiece(line, " ", base::TRIM_WHITESPACE,
-                                           base::SPLIT_WANT_NONEMPTY);
-      if (tokens.size() > 3) {
-        int limit = -1;
-        if (StringToInt(tokens[3], &limit))
-          return limit;
-        return -1;
-      }
-    }
-  }
-  return -1;
-}
-
 #endif  // defined(OS_LINUX)
 
 ProcessMetrics::ProcessMetrics(ProcessHandle process)
@@ -558,12 +532,45 @@
 
 }  // namespace
 
+SystemMemoryInfoKB::SystemMemoryInfoKB() {
+  total = 0;
+  free = 0;
+#if defined(OS_LINUX)
+  available = 0;
+#endif
+  buffers = 0;
+  cached = 0;
+  active_anon = 0;
+  inactive_anon = 0;
+  active_file = 0;
+  inactive_file = 0;
+  swap_total = 0;
+  swap_free = 0;
+  dirty = 0;
+
+  pswpin = 0;
+  pswpout = 0;
+  pgmajfault = 0;
+
+#ifdef OS_CHROMEOS
+  shmem = 0;
+  slab = 0;
+  gem_objects = -1;
+  gem_size = -1;
+#endif
+}
+
+SystemMemoryInfoKB::SystemMemoryInfoKB(const SystemMemoryInfoKB& other) =
+    default;
+
 std::unique_ptr<Value> SystemMemoryInfoKB::ToValue() const {
   std::unique_ptr<DictionaryValue> res(new DictionaryValue());
 
   res->SetInteger("total", total);
   res->SetInteger("free", free);
+#if defined(OS_LINUX)
   res->SetInteger("available", available);
+#endif
   res->SetInteger("buffers", buffers);
   res->SetInteger("cached", cached);
   res->SetInteger("active_anon", active_anon);
@@ -574,7 +581,6 @@
   res->SetInteger("swap_free", swap_free);
   res->SetInteger("swap_used", swap_total - swap_free);
   res->SetInteger("dirty", dirty);
-  res->SetInteger("reclaimable", reclaimable);
   res->SetInteger("pswpin", pswpin);
   res->SetInteger("pswpout", pswpout);
   res->SetInteger("pgmajfault", pgmajfault);
@@ -622,8 +628,10 @@
       target = &meminfo->total;
     else if (tokens[0] == "MemFree:")
       target = &meminfo->free;
+#if defined(OS_LINUX)
     else if (tokens[0] == "MemAvailable:")
       target = &meminfo->available;
+#endif
     else if (tokens[0] == "Buffers:")
       target = &meminfo->buffers;
     else if (tokens[0] == "Cached:")
@@ -642,8 +650,6 @@
       target = &meminfo->swap_free;
     else if (tokens[0] == "Dirty:")
       target = &meminfo->dirty;
-    else if (tokens[0] == "SReclaimable:")
-      target = &meminfo->reclaimable;
 #if defined(OS_CHROMEOS)
     // Chrome OS has a tweaked kernel that allows us to query Shmem, which is
     // usually video memory otherwise invisible to the OS.
diff --git a/base/process/process_metrics_mac.cc b/base/process/process_metrics_mac.cc
index d6c0f3c..51f5fd4 100644
--- a/base/process/process_metrics_mac.cc
+++ b/base/process/process_metrics_mac.cc
@@ -16,9 +16,25 @@
 #include "base/mac/mach_logging.h"
 #include "base/mac/scoped_mach_port.h"
 #include "base/memory/ptr_util.h"
-#include "base/numerics/safe_conversions.h"
 #include "base/sys_info.h"
 
+#if !defined(TASK_POWER_INFO)
+// Doesn't exist in the 10.6 or 10.7 SDKs.
+#define TASK_POWER_INFO        21
+struct task_power_info {
+        uint64_t                total_user;
+        uint64_t                total_system;
+        uint64_t                task_interrupt_wakeups;
+        uint64_t                task_platform_idle_wakeups;
+        uint64_t                task_timer_wakeups_bin_1;
+        uint64_t                task_timer_wakeups_bin_2;
+};
+typedef struct task_power_info        task_power_info_data_t;
+typedef struct task_power_info        *task_power_info_t;
+#define TASK_POWER_INFO_COUNT        ((mach_msg_type_number_t) \
+                (sizeof (task_power_info_data_t) / sizeof (natural_t)))
+#endif
+
 namespace base {
 
 namespace {
@@ -62,58 +78,13 @@
   }
 }
 
-enum MachVMRegionResult { Finished, Error, Success };
-
-// Both |size| and |address| are in-out parameters.
-// |info| is an output parameter, only valid on Success.
-MachVMRegionResult GetTopInfo(mach_port_t task,
-                              mach_vm_size_t* size,
-                              mach_vm_address_t* address,
-                              vm_region_top_info_data_t* info) {
-  mach_msg_type_number_t info_count = VM_REGION_TOP_INFO_COUNT;
-  mach_port_t object_name;
-  kern_return_t kr = mach_vm_region(task, address, size, VM_REGION_TOP_INFO,
-                                    reinterpret_cast<vm_region_info_t>(info),
-                                    &info_count, &object_name);
-  // We're at the end of the address space.
-  if (kr == KERN_INVALID_ADDRESS)
-    return Finished;
-
-  if (kr != KERN_SUCCESS)
-    return Error;
-
-  // The kernel always returns a null object for VM_REGION_TOP_INFO, but
-  // balance it with a deallocate in case this ever changes. See 10.9.2
-  // xnu-2422.90.20/osfmk/vm/vm_map.c vm_map_region.
-  mach_port_deallocate(task, object_name);
-  return Success;
-}
-
-MachVMRegionResult GetBasicInfo(mach_port_t task,
-                                mach_vm_size_t* size,
-                                mach_vm_address_t* address,
-                                vm_region_basic_info_64* info) {
-  mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;
-  mach_port_t object_name;
-  kern_return_t kr = mach_vm_region(
-      task, address, size, VM_REGION_BASIC_INFO_64,
-      reinterpret_cast<vm_region_info_t>(info), &info_count, &object_name);
-  if (kr == KERN_INVALID_ADDRESS) {
-    // We're at the end of the address space.
-    return Finished;
-  } else if (kr != KERN_SUCCESS) {
-    return Error;
-  }
-
-  // The kernel always returns a null object for VM_REGION_BASIC_INFO_64, but
-  // balance it with a deallocate in case this ever changes. See 10.9.2
-  // xnu-2422.90.20/osfmk/vm/vm_map.c vm_map_region.
-  mach_port_deallocate(task, object_name);
-  return Success;
-}
-
 }  // namespace
 
+SystemMemoryInfoKB::SystemMemoryInfoKB() : total(0), free(0) {}
+
+SystemMemoryInfoKB::SystemMemoryInfoKB(const SystemMemoryInfoKB& other) =
+    default;
+
 // Getting a mach task from a pid for another process requires permissions in
 // general, so there doesn't really seem to be a way to do these (and spinning
 // up ps to fetch each stats seems dangerous to put in a base api for anyone to
@@ -139,8 +110,10 @@
 }
 
 size_t ProcessMetrics::GetWorkingSetSize() const {
+  size_t private_bytes = 0;
+  size_t shared_bytes = 0;
   size_t resident_bytes = 0;
-  if (!GetMemoryBytes(nullptr, nullptr, &resident_bytes, nullptr))
+  if (!GetMemoryBytes(&private_bytes, &shared_bytes, &resident_bytes))
     return 0;
   return resident_bytes;
 }
@@ -149,21 +122,16 @@
   return 0;
 }
 
-bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
-                                    size_t* shared_bytes) const {
-  return GetMemoryBytes(private_bytes, shared_bytes, nullptr, nullptr);
-}
-
 // This is a rough approximation of the algorithm that libtop uses.
 // private_bytes is the size of private resident memory.
 // shared_bytes is the size of shared resident memory.
 bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
-                                    size_t* shared_bytes,
-                                    size_t* resident_bytes,
-                                    size_t* locked_bytes) const {
+                                    size_t* shared_bytes) const {
   size_t private_pages_count = 0;
   size_t shared_pages_count = 0;
-  size_t wired_pages_count = 0;
+
+  if (!private_bytes && !shared_bytes)
+    return true;
 
   mach_port_t task = TaskForPid(process_);
   if (task == MACH_PORT_NULL) {
@@ -192,26 +160,28 @@
   // http://www.opensource.apple.com/source/top/top-67/libtop.c
   mach_vm_size_t size = 0;
   for (mach_vm_address_t address = MACH_VM_MIN_ADDRESS;; address += size) {
-    mach_vm_size_t size_copy = size;
-    mach_vm_address_t address_copy = address;
-
     vm_region_top_info_data_t info;
-    MachVMRegionResult result = GetTopInfo(task, &size, &address, &info);
-    if (result == Error)
-      return false;
-    if (result == Finished)
+    mach_msg_type_number_t info_count = VM_REGION_TOP_INFO_COUNT;
+    mach_port_t object_name;
+    kern_return_t kr = mach_vm_region(task,
+                                      &address,
+                                      &size,
+                                      VM_REGION_TOP_INFO,
+                                      reinterpret_cast<vm_region_info_t>(&info),
+                                      &info_count,
+                                      &object_name);
+    if (kr == KERN_INVALID_ADDRESS) {
+      // We're at the end of the address space.
       break;
-
-    vm_region_basic_info_64 basic_info;
-    result = GetBasicInfo(task, &size_copy, &address_copy, &basic_info);
-    switch (result) {
-      case Finished:
-      case Error:
-        return false;
-      case Success:
-        break;
+    } else if (kr != KERN_SUCCESS) {
+      MACH_DLOG(ERROR, kr) << "mach_vm_region";
+      return false;
     }
-    bool is_wired = basic_info.user_wired_count > 0;
+
+    // The kernel always returns a null object for VM_REGION_TOP_INFO, but
+    // balance it with a deallocate in case this ever changes. See 10.9.2
+    // xnu-2422.90.20/osfmk/vm/vm_map.c vm_map_region.
+    mach_port_deallocate(mach_task_self(), object_name);
 
     if (IsAddressInSharedRegion(address, cpu_type) &&
         info.share_mode != SM_PRIVATE)
@@ -242,20 +212,12 @@
       default:
         break;
     }
-    if (is_wired) {
-      wired_pages_count +=
-          info.private_pages_resident + info.shared_pages_resident;
-    }
   }
 
   if (private_bytes)
     *private_bytes = private_pages_count * PAGE_SIZE;
   if (shared_bytes)
     *shared_bytes = shared_pages_count * PAGE_SIZE;
-  if (resident_bytes)
-    *resident_bytes = (private_pages_count + shared_pages_count) * PAGE_SIZE;
-  if (locked_bytes)
-    *locked_bytes = wired_pages_count * PAGE_SIZE;
 
   return true;
 }
@@ -290,6 +252,15 @@
   return true;
 }
 
+bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
+                                    size_t* shared_bytes,
+                                    size_t* resident_bytes) const {
+  if (!GetMemoryBytes(private_bytes, shared_bytes))
+    return false;
+  *resident_bytes = *private_bytes + *shared_bytes;
+  return true;
+}
+
 #define TIME_VALUE_TO_TIMEVAL(a, r) do {  \
   (r)->tv_sec = (a)->seconds;             \
   (r)->tv_usec = (a)->microseconds;       \
@@ -421,6 +392,7 @@
   return (data.active_count * PAGE_SIZE) / 1024;
 }
 
+// On Mac, We only get total memory and free memory from the system.
 bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
   struct host_basic_info hostinfo;
   mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
@@ -433,25 +405,17 @@
   DCHECK_EQ(HOST_BASIC_INFO_COUNT, count);
   meminfo->total = static_cast<int>(hostinfo.max_mem / 1024);
 
-  vm_statistics64_data_t vm_info;
-  count = HOST_VM_INFO64_COUNT;
+  vm_statistics_data_t vm_info;
+  count = HOST_VM_INFO_COUNT;
 
-  if (host_statistics64(host.get(), HOST_VM_INFO64,
-                        reinterpret_cast<host_info64_t>(&vm_info),
-                        &count) != KERN_SUCCESS) {
+  if (host_statistics(host.get(), HOST_VM_INFO,
+                      reinterpret_cast<host_info_t>(&vm_info),
+                      &count) != KERN_SUCCESS) {
     return false;
   }
-  DCHECK_EQ(HOST_VM_INFO64_COUNT, count);
 
-  static_assert(PAGE_SIZE % 1024 == 0, "Invalid page size");
-  meminfo->free = saturated_cast<int>(
-      PAGE_SIZE / 1024 * (vm_info.free_count - vm_info.speculative_count));
-  meminfo->speculative =
-      saturated_cast<int>(PAGE_SIZE / 1024 * vm_info.speculative_count);
-  meminfo->file_backed =
-      saturated_cast<int>(PAGE_SIZE / 1024 * vm_info.external_page_count);
-  meminfo->purgeable =
-      saturated_cast<int>(PAGE_SIZE / 1024 * vm_info.purgeable_count);
+  meminfo->free = static_cast<int>(
+      (vm_info.free_count - vm_info.speculative_count) * PAGE_SIZE / 1024);
 
   return true;
 }
diff --git a/base/process/process_metrics_unittest.cc b/base/process/process_metrics_unittest.cc
index 288cde9..b0bd7ea 100644
--- a/base/process/process_metrics_unittest.cc
+++ b/base/process/process_metrics_unittest.cc
@@ -17,17 +17,12 @@
 #include "base/files/scoped_temp_dir.h"
 #include "base/macros.h"
 #include "base/strings/string_number_conversions.h"
-#include "base/sys_info.h"
 #include "base/test/multiprocess_test.h"
 #include "base/threading/thread.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 #include "testing/multiprocess_func_list.h"
 
-#if defined(OS_MACOSX)
-#include <sys/mman.h>
-#endif
-
 namespace base {
 namespace debug {
 
@@ -57,42 +52,6 @@
 
 /////////////////////////////////////////////////////////////////////////////
 
-#if defined(OS_MACOSX) && !defined(OS_IOS) && !defined(ADDRESS_SANITIZER)
-TEST_F(SystemMetricsTest, LockedBytes) {
-  ProcessHandle handle = GetCurrentProcessHandle();
-  std::unique_ptr<ProcessMetrics> metrics(
-      ProcessMetrics::CreateProcessMetrics(handle, nullptr));
-
-  size_t initial_locked_bytes;
-  bool result =
-      metrics->GetMemoryBytes(nullptr, nullptr, nullptr, &initial_locked_bytes);
-  ASSERT_TRUE(result);
-
-  size_t size = 8 * 1024 * 1024;
-  std::unique_ptr<char[]> memory(new char[size]);
-  int r = mlock(memory.get(), size);
-  ASSERT_EQ(0, r);
-
-  size_t new_locked_bytes;
-  result =
-      metrics->GetMemoryBytes(nullptr, nullptr, nullptr, &new_locked_bytes);
-  ASSERT_TRUE(result);
-
-  // There should be around |size| more locked bytes, but multi-threading might
-  // cause noise.
-  EXPECT_LT(initial_locked_bytes + size / 2, new_locked_bytes);
-  EXPECT_GT(initial_locked_bytes + size * 1.5, new_locked_bytes);
-
-  r = munlock(memory.get(), size);
-  ASSERT_EQ(0, r);
-
-  result =
-      metrics->GetMemoryBytes(nullptr, nullptr, nullptr, &new_locked_bytes);
-  ASSERT_TRUE(result);
-  EXPECT_EQ(initial_locked_bytes, new_locked_bytes);
-}
-#endif  // defined(OS_MACOSX) && !defined(OS_IOS) && !defined(ADDRESS_SANITIZER)
-
 #if defined(OS_LINUX) || defined(OS_ANDROID)
 TEST_F(SystemMetricsTest, IsValidDiskName) {
   std::string invalid_input1 = "";
@@ -147,7 +106,6 @@
   std::string valid_input1 =
     "MemTotal:        3981504 kB\n"
     "MemFree:          140764 kB\n"
-    "MemAvailable:     535413 kB\n"
     "Buffers:          116480 kB\n"
     "Cached:           406160 kB\n"
     "SwapCached:        21304 kB\n"
@@ -213,7 +171,6 @@
   EXPECT_TRUE(ParseProcMeminfo(valid_input1, &meminfo));
   EXPECT_EQ(meminfo.total, 3981504);
   EXPECT_EQ(meminfo.free, 140764);
-  EXPECT_EQ(meminfo.available, 535413);
   EXPECT_EQ(meminfo.buffers, 116480);
   EXPECT_EQ(meminfo.cached, 406160);
   EXPECT_EQ(meminfo.active_anon, 2972352);
@@ -223,29 +180,18 @@
   EXPECT_EQ(meminfo.swap_total, 5832280);
   EXPECT_EQ(meminfo.swap_free, 3672368);
   EXPECT_EQ(meminfo.dirty, 184);
-  EXPECT_EQ(meminfo.reclaimable, 30936);
 #if defined(OS_CHROMEOS)
   EXPECT_EQ(meminfo.shmem, 140204);
   EXPECT_EQ(meminfo.slab, 54212);
 #endif
-  EXPECT_EQ(355725,
-            base::SysInfo::AmountOfAvailablePhysicalMemory(meminfo) / 1024);
-  // Simulate as if there is no MemAvailable.
-  meminfo.available = 0;
-  EXPECT_EQ(374448,
-            base::SysInfo::AmountOfAvailablePhysicalMemory(meminfo) / 1024);
-  meminfo = {};
   EXPECT_TRUE(ParseProcMeminfo(valid_input2, &meminfo));
   EXPECT_EQ(meminfo.total, 255908);
   EXPECT_EQ(meminfo.free, 69936);
-  EXPECT_EQ(meminfo.available, 0);
   EXPECT_EQ(meminfo.buffers, 15812);
   EXPECT_EQ(meminfo.cached, 115124);
   EXPECT_EQ(meminfo.swap_total, 524280);
   EXPECT_EQ(meminfo.swap_free, 524200);
   EXPECT_EQ(meminfo.dirty, 4);
-  EXPECT_EQ(69936,
-            base::SysInfo::AmountOfAvailablePhysicalMemory(meminfo) / 1024);
 }
 
 TEST_F(SystemMetricsTest, ParseVmstat) {
@@ -395,19 +341,15 @@
 
 #endif  // defined(OS_LINUX) || defined(OS_CHROMEOS)
 
-#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) || \
-    defined(OS_ANDROID)
+#if defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS)) || \
+    defined(OS_LINUX) || defined(OS_ANDROID)
 TEST(SystemMetrics2Test, GetSystemMemoryInfo) {
   SystemMemoryInfoKB info;
   EXPECT_TRUE(GetSystemMemoryInfo(&info));
 
   // Ensure each field received a value.
   EXPECT_GT(info.total, 0);
-#if defined(OS_WIN)
-  EXPECT_GT(info.avail_phys, 0);
-#else
   EXPECT_GT(info.free, 0);
-#endif
 #if defined(OS_LINUX) || defined(OS_ANDROID)
   EXPECT_GT(info.buffers, 0);
   EXPECT_GT(info.cached, 0);
@@ -418,9 +360,7 @@
 #endif  // defined(OS_LINUX) || defined(OS_ANDROID)
 
   // All the values should be less than the total amount of memory.
-#if !defined(OS_WIN)
   EXPECT_LT(info.free, info.total);
-#endif
 #if defined(OS_LINUX) || defined(OS_ANDROID)
   EXPECT_LT(info.buffers, info.total);
   EXPECT_LT(info.cached, info.total);
@@ -430,10 +370,6 @@
   EXPECT_LT(info.inactive_file, info.total);
 #endif  // defined(OS_LINUX) || defined(OS_ANDROID)
 
-#if defined(OS_MACOSX) || defined(OS_IOS)
-  EXPECT_GT(info.file_backed, 0);
-#endif
-
 #if defined(OS_CHROMEOS)
   // Chrome OS exposes shmem.
   EXPECT_GT(info.shmem, 0);
@@ -442,8 +378,8 @@
   // and gem_size cannot be tested here.
 #endif
 }
-#endif  // defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) ||
-        // defined(OS_ANDROID)
+#endif  // defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS)) ||
+        // defined(OS_LINUX) || defined(OS_ANDROID)
 
 #if defined(OS_LINUX) || defined(OS_ANDROID)
 TEST(ProcessMetricsTest, ParseProcStatCPU) {
@@ -558,13 +494,13 @@
   const FilePath temp_path = temp_dir.GetPath();
   CommandLine child_command_line(GetMultiProcessTestChildBaseCommandLine());
   child_command_line.AppendSwitchPath(kTempDirFlag, temp_path);
-  SpawnChildResult spawn_child = SpawnMultiProcessTestChild(
+  Process child = SpawnMultiProcessTestChild(
       ChildMainString, child_command_line, LaunchOptions());
-  ASSERT_TRUE(spawn_child.process.IsValid());
+  ASSERT_TRUE(child.IsValid());
   WaitForEvent(temp_path, kSignalClosed);
 
   std::unique_ptr<ProcessMetrics> metrics(
-      ProcessMetrics::CreateProcessMetrics(spawn_child.process.Handle()));
+      ProcessMetrics::CreateProcessMetrics(child.Handle()));
   // Try a couple times to observe the child with 0 fds open.
   // Sometimes we've seen that the child can have 1 remaining
   // fd shortly after receiving the signal.  Potentially this
@@ -578,7 +514,7 @@
     PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
   }
   EXPECT_EQ(0, open_fds);
-  ASSERT_TRUE(spawn_child.process.Terminate(0, true));
+  ASSERT_TRUE(child.Terminate(0, true));
 }
 #endif  // !defined(__ANDROID__)
 
diff --git a/base/sequence_checker_impl.cc b/base/sequence_checker_impl.cc
index 6a9b5b2..df2a8cb 100644
--- a/base/sequence_checker_impl.cc
+++ b/base/sequence_checker_impl.cc
@@ -26,7 +26,7 @@
 
   ~Core() = default;
 
-  bool CalledOnValidSequence() const {
+  bool CalledOnValidThread() const {
     if (sequence_token_.IsValid())
       return sequence_token_ == SequenceToken::GetForCurrentThread();
 
@@ -58,7 +58,7 @@
   AutoLock auto_lock(lock_);
   if (!core_)
     core_ = MakeUnique<Core>();
-  return core_->CalledOnValidSequence();
+  return core_->CalledOnValidThread();
 }
 
 void SequenceCheckerImpl::DetachFromSequence() {
diff --git a/base/sequenced_task_runner.cc b/base/sequenced_task_runner.cc
index 4c367cb..dc11ebc 100644
--- a/base/sequenced_task_runner.cc
+++ b/base/sequenced_task_runner.cc
@@ -4,17 +4,14 @@
 
 #include "base/sequenced_task_runner.h"
 
-#include <utility>
-
 #include "base/bind.h"
 
 namespace base {
 
 bool SequencedTaskRunner::PostNonNestableTask(
     const tracked_objects::Location& from_here,
-    OnceClosure task) {
-  return PostNonNestableDelayedTask(from_here, std::move(task),
-                                    base::TimeDelta());
+    const Closure& task) {
+  return PostNonNestableDelayedTask(from_here, task, base::TimeDelta());
 }
 
 bool SequencedTaskRunner::DeleteOrReleaseSoonInternal(
diff --git a/base/sequenced_task_runner.h b/base/sequenced_task_runner.h
index b291539..6b2726e 100644
--- a/base/sequenced_task_runner.h
+++ b/base/sequenced_task_runner.h
@@ -6,7 +6,6 @@
 #define BASE_SEQUENCED_TASK_RUNNER_H_
 
 #include "base/base_export.h"
-#include "base/callback.h"
 #include "base/sequenced_task_runner_helpers.h"
 #include "base/task_runner.h"
 
@@ -110,11 +109,11 @@
   // below.
 
   bool PostNonNestableTask(const tracked_objects::Location& from_here,
-                           OnceClosure task);
+                           const Closure& task);
 
   virtual bool PostNonNestableDelayedTask(
       const tracked_objects::Location& from_here,
-      OnceClosure task,
+      const Closure& task,
       base::TimeDelta delay) = 0;
 
   // Submits a non-nestable task to delete the given object.  Returns
diff --git a/base/strings/string_piece.h b/base/strings/string_piece.h
index 5333640..eaec14d 100644
--- a/base/strings/string_piece.h
+++ b/base/strings/string_piece.h
@@ -245,9 +245,6 @@
     return r;
   }
 
-  // This is the style of conversion preferred by std::string_view in C++17.
-  explicit operator STRING_TYPE() const { return as_string(); }
-
   STRING_TYPE as_string() const {
     // std::string doesn't like to take a NULL pointer even with a 0 size.
     return empty() ? STRING_TYPE() : STRING_TYPE(data(), size());
diff --git a/base/strings/string_piece_unittest.cc b/base/strings/string_piece_unittest.cc
index 7dfd711..f05aa15 100644
--- a/base/strings/string_piece_unittest.cc
+++ b/base/strings/string_piece_unittest.cc
@@ -295,8 +295,6 @@
   ASSERT_EQ(b.rfind(c, 0U), Piece::npos);
   ASSERT_EQ(a.rfind(d), static_cast<size_t>(a.as_string().rfind(TypeParam())));
   ASSERT_EQ(a.rfind(e), a.as_string().rfind(TypeParam()));
-  ASSERT_EQ(a.rfind(d), static_cast<size_t>(TypeParam(a).rfind(TypeParam())));
-  ASSERT_EQ(a.rfind(e), TypeParam(a).rfind(TypeParam()));
   ASSERT_EQ(a.rfind(d, 12), 12U);
   ASSERT_EQ(a.rfind(e, 17), 17U);
   ASSERT_EQ(a.rfind(g), Piece::npos);
@@ -520,12 +518,6 @@
   ASSERT_TRUE(c == s3);
   TypeParam s4(e.as_string());
   ASSERT_TRUE(s4.empty());
-
-  // operator STRING_TYPE()
-  TypeParam s5(TypeParam(a).c_str(), 7);  // Note, has an embedded NULL
-  ASSERT_TRUE(c == s5);
-  TypeParam s6(e);
-  ASSERT_TRUE(s6.empty());
 }
 
 TEST(StringPieceTest, CheckCustom) {
@@ -599,11 +591,7 @@
   ASSERT_EQ(s.data(), (const typename TypeParam::value_type*)NULL);
   ASSERT_EQ(s.size(), 0U);
 
-  TypeParam str(s);
-  ASSERT_EQ(str.length(), 0U);
-  ASSERT_EQ(str, TypeParam());
-
-  str = s.as_string();
+  TypeParam str = s.as_string();
   ASSERT_EQ(str.length(), 0U);
   ASSERT_EQ(str, TypeParam());
 }
diff --git a/base/synchronization/waitable_event.h b/base/synchronization/waitable_event.h
index e8caffe..761965f 100644
--- a/base/synchronization/waitable_event.h
+++ b/base/synchronization/waitable_event.h
@@ -112,9 +112,6 @@
   // You MUST NOT delete any of the WaitableEvent objects while this wait is
   // happening, however WaitMany's return "happens after" the |Signal| call
   // that caused it has completed, like |Wait|.
-  //
-  // If more than one WaitableEvent is signaled to unblock WaitMany, the lowest
-  // index among them is returned.
   static size_t WaitMany(WaitableEvent** waitables, size_t count);
 
   // For asynchronous waiting, see WaitableEventWatcher
diff --git a/base/synchronization/waitable_event_posix.cc b/base/synchronization/waitable_event_posix.cc
index 846fa06..5dfff46 100644
--- a/base/synchronization/waitable_event_posix.cc
+++ b/base/synchronization/waitable_event_posix.cc
@@ -5,7 +5,6 @@
 #include <stddef.h>
 
 #include <algorithm>
-#include <limits>
 #include <vector>
 
 #include "base/debug/activity_tracker.h"
@@ -267,10 +266,12 @@
   SyncWaiter sw;
 
   const size_t r = EnqueueMany(&waitables[0], count, &sw);
-  if (r < count) {
+  if (r) {
     // One of the events is already signaled. The SyncWaiter has not been
-    // enqueued anywhere.
-    return waitables[r].second;
+    // enqueued anywhere. EnqueueMany returns the count of remaining waitables
+    // when the signaled one was seen, so the index of the signaled event is
+    // @count - @r.
+    return waitables[count - r].second;
   }
 
   // At this point, we hold the locks on all the WaitableEvents and we have
@@ -318,50 +319,38 @@
 }
 
 // -----------------------------------------------------------------------------
-// If return value == count:
+// If return value == 0:
 //   The locks of the WaitableEvents have been taken in order and the Waiter has
 //   been enqueued in the wait-list of each. None of the WaitableEvents are
 //   currently signaled
 // else:
 //   None of the WaitableEvent locks are held. The Waiter has not been enqueued
-//   in any of them and the return value is the index of the WaitableEvent which
-//   was signaled with the lowest input index from the original WaitMany call.
+//   in any of them and the return value is the index of the first WaitableEvent
+//   which was signaled, from the end of the array.
 // -----------------------------------------------------------------------------
 // static
-size_t WaitableEvent::EnqueueMany(std::pair<WaitableEvent*, size_t>* waitables,
-                                  size_t count,
-                                  Waiter* waiter) {
-  size_t winner = count;
-  size_t winner_index = count;
-  for (size_t i = 0; i < count; ++i) {
-    auto& kernel = waitables[i].first->kernel_;
-    kernel->lock_.Acquire();
-    if (kernel->signaled_ && waitables[i].second < winner) {
-      winner = waitables[i].second;
-      winner_index = i;
+size_t WaitableEvent::EnqueueMany
+    (std::pair<WaitableEvent*, size_t>* waitables,
+     size_t count, Waiter* waiter) {
+  if (!count)
+    return 0;
+
+  waitables[0].first->kernel_->lock_.Acquire();
+    if (waitables[0].first->kernel_->signaled_) {
+      if (!waitables[0].first->kernel_->manual_reset_)
+        waitables[0].first->kernel_->signaled_ = false;
+      waitables[0].first->kernel_->lock_.Release();
+      return count;
     }
-  }
 
-  // No events signaled. All locks acquired. Enqueue the Waiter on all of them
-  // and return.
-  if (winner == count) {
-    for (size_t i = 0; i < count; ++i)
-      waitables[i].first->Enqueue(waiter);
-    return count;
-  }
-
-  // Unlock in reverse order and possibly clear the chosen winner's signal
-  // before returning its index.
-  for (auto* w = waitables + count - 1; w >= waitables; --w) {
-    auto& kernel = w->first->kernel_;
-    if (w->second == winner) {
-      if (!kernel->manual_reset_)
-        kernel->signaled_ = false;
+    const size_t r = EnqueueMany(waitables + 1, count - 1, waiter);
+    if (r) {
+      waitables[0].first->kernel_->lock_.Release();
+    } else {
+      waitables[0].first->Enqueue(waiter);
     }
-    kernel->lock_.Release();
-  }
 
-  return winner_index;
+    return r;
 }
 
 // -----------------------------------------------------------------------------
diff --git a/base/synchronization/waitable_event_unittest.cc b/base/synchronization/waitable_event_unittest.cc
index 3aa1af1..c0e280a 100644
--- a/base/synchronization/waitable_event_unittest.cc
+++ b/base/synchronization/waitable_event_unittest.cc
@@ -6,8 +6,6 @@
 
 #include <stddef.h>
 
-#include <algorithm>
-
 #include "base/compiler_specific.h"
 #include "base/threading/platform_thread.h"
 #include "base/time/time.h"
@@ -80,42 +78,6 @@
     delete ev[i];
 }
 
-TEST(WaitableEventTest, WaitManyLeftToRight) {
-  WaitableEvent* ev[5];
-  for (size_t i = 0; i < 5; ++i) {
-    ev[i] = new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
-                              WaitableEvent::InitialState::NOT_SIGNALED);
-  }
-
-  // Test for consistent left-to-right return behavior across all permutations
-  // of the input array. This is to verify that only the indices -- and not
-  // the WaitableEvents' addresses -- are relevant in determining who wins when
-  // multiple events are signaled.
-
-  std::sort(ev, ev + 5);
-  do {
-    ev[0]->Signal();
-    ev[1]->Signal();
-    EXPECT_EQ(0u, WaitableEvent::WaitMany(ev, 5));
-
-    ev[2]->Signal();
-    EXPECT_EQ(1u, WaitableEvent::WaitMany(ev, 5));
-    EXPECT_EQ(2u, WaitableEvent::WaitMany(ev, 5));
-
-    ev[3]->Signal();
-    ev[4]->Signal();
-    ev[0]->Signal();
-    EXPECT_EQ(0u, WaitableEvent::WaitMany(ev, 5));
-    EXPECT_EQ(3u, WaitableEvent::WaitMany(ev, 5));
-    ev[2]->Signal();
-    EXPECT_EQ(2u, WaitableEvent::WaitMany(ev, 5));
-    EXPECT_EQ(4u, WaitableEvent::WaitMany(ev, 5));
-  } while (std::next_permutation(ev, ev + 5));
-
-  for (size_t i = 0; i < 5; ++i)
-    delete ev[i];
-}
-
 class WaitableEventSignaler : public PlatformThread::Delegate {
  public:
   WaitableEventSignaler(TimeDelta delay, WaitableEvent* event)
diff --git a/base/sys_info.h b/base/sys_info.h
index 18bdaf0..e35feff 100644
--- a/base/sys_info.h
+++ b/base/sys_info.h
@@ -13,18 +13,11 @@
 
 #include "base/base_export.h"
 #include "base/files/file_path.h"
-#include "base/gtest_prod_util.h"
 #include "base/time/time.h"
 #include "build/build_config.h"
 
 namespace base {
 
-namespace debug {
-FORWARD_DECLARE_TEST(SystemMetricsTest, ParseMeminfo);
-}
-
-struct SystemMemoryInfoKB;
-
 class BASE_EXPORT SysInfo {
  public:
   // Return the number of logical processors/cores on the current machine.
@@ -35,9 +28,6 @@
 
   // Return the number of bytes of current available physical memory on the
   // machine.
-  // (The amount of memory that can be allocated without any significant
-  // impact on the system. It can lead to freeing inactive file-backed
-  // and/or speculative file-backed memory).
   static int64_t AmountOfAvailablePhysicalMemory();
 
   // Return the number of bytes of virtual memory of this process. A return
@@ -80,6 +70,8 @@
   static std::string OperatingSystemVersion();
 
   // Retrieves detailed numeric values for the OS version.
+  // TODO(port): Implement a Linux version of this method and enable the
+  // corresponding unit test.
   // DON'T USE THIS ON THE MAC OR WINDOWS to determine the current OS release
   // for OS version-specific feature checks and workarounds. If you must use
   // an OS version check instead of a feature check, use the base::mac::IsOS*
@@ -155,15 +147,6 @@
   // Low-end device refers to devices having less than 512M memory in the
   // current implementation.
   static bool IsLowEndDevice();
-
- private:
-  FRIEND_TEST_ALL_PREFIXES(SysInfoTest, AmountOfAvailablePhysicalMemory);
-  FRIEND_TEST_ALL_PREFIXES(debug::SystemMetricsTest, ParseMeminfo);
-
-#if defined(OS_LINUX) || defined(OS_ANDROID)
-  static int64_t AmountOfAvailablePhysicalMemory(
-      const SystemMemoryInfoKB& meminfo);
-#endif
 };
 
 }  // namespace base
diff --git a/base/sys_info_linux.cc b/base/sys_info_linux.cc
index 0cd05b3..298d245 100644
--- a/base/sys_info_linux.cc
+++ b/base/sys_info_linux.cc
@@ -13,7 +13,6 @@
 #include "base/lazy_instance.h"
 #include "base/logging.h"
 #include "base/numerics/safe_conversions.h"
-#include "base/process/process_metrics.h"
 #include "base/strings/string_number_conversions.h"
 #include "base/sys_info_internal.h"
 #include "build/build_config.h"
@@ -43,32 +42,16 @@
 namespace base {
 
 // static
+int64_t SysInfo::AmountOfAvailablePhysicalMemory() {
+  return AmountOfMemory(_SC_AVPHYS_PAGES);
+}
+
+// static
 int64_t SysInfo::AmountOfPhysicalMemory() {
   return g_lazy_physical_memory.Get().value();
 }
 
 // static
-int64_t SysInfo::AmountOfAvailablePhysicalMemory() {
-  SystemMemoryInfoKB info;
-  if (!GetSystemMemoryInfo(&info))
-    return 0;
-  return AmountOfAvailablePhysicalMemory(info);
-}
-
-// static
-int64_t SysInfo::AmountOfAvailablePhysicalMemory(
-    const SystemMemoryInfoKB& info) {
-  // See details here:
-  // https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773
-  // The fallback logic (when there is no MemAvailable) would be more precise
-  // if we had info about zones watermarks (/proc/zoneinfo).
-  int64_t res_kb = info.available != 0
-                       ? info.available - info.active_file
-                       : info.free + info.reclaimable + info.inactive_file;
-  return res_kb * 1024;
-}
-
-// static
 std::string SysInfo::CPUModelName() {
 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL)
   const char kCpuModelPrefix[] = "Hardware";
diff --git a/base/sys_info_mac.mm b/base/sys_info_mac.mm
index 1141bd5..aab1103 100644
--- a/base/sys_info_mac.mm
+++ b/base/sys_info_mac.mm
@@ -19,7 +19,6 @@
 #include "base/mac/scoped_mach_port.h"
 #import "base/mac/sdk_forward_declarations.h"
 #include "base/macros.h"
-#include "base/process/process_metrics.h"
 #include "base/strings/stringprintf.h"
 
 namespace base {
@@ -84,12 +83,20 @@
 
 // static
 int64_t SysInfo::AmountOfAvailablePhysicalMemory() {
-  SystemMemoryInfoKB info;
-  if (!GetSystemMemoryInfo(&info))
+  base::mac::ScopedMachSendRight host(mach_host_self());
+  vm_statistics_data_t vm_info;
+  mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
+
+  if (host_statistics(host.get(),
+                      HOST_VM_INFO,
+                      reinterpret_cast<host_info_t>(&vm_info),
+                      &count) != KERN_SUCCESS) {
+    NOTREACHED();
     return 0;
-  // We should add inactive file-backed memory also but there is no such
-  // information from Mac OS unfortunately.
-  return static_cast<int64_t>(info.free + info.speculative) * 1024;
+  }
+
+  return static_cast<int64_t>(vm_info.free_count - vm_info.speculative_count) *
+         PAGE_SIZE;
 }
 
 // static
diff --git a/base/sys_info_posix.cc b/base/sys_info_posix.cc
index 7d37146..cbdfa3f 100644
--- a/base/sys_info_posix.cc
+++ b/base/sys_info_posix.cc
@@ -183,30 +183,6 @@
 }
 #endif
 
-#if !defined(OS_MACOSX) && !defined(OS_ANDROID) && !defined(OS_CHROMEOS)
-// static
-void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
-                                            int32_t* minor_version,
-                                            int32_t* bugfix_version) {
-  struct utsname info;
-  if (uname(&info) < 0) {
-    NOTREACHED();
-    *major_version = 0;
-    *minor_version = 0;
-    *bugfix_version = 0;
-    return;
-  }
-  int num_read = sscanf(info.release, "%d.%d.%d", major_version, minor_version,
-                        bugfix_version);
-  if (num_read < 1)
-    *major_version = 0;
-  if (num_read < 2)
-    *minor_version = 0;
-  if (num_read < 3)
-    *bugfix_version = 0;
-}
-#endif
-
 // static
 std::string SysInfo::OperatingSystemArchitecture() {
   struct utsname info;
diff --git a/base/sys_info_unittest.cc b/base/sys_info_unittest.cc
index 94b5a84..c3b8507 100644
--- a/base/sys_info_unittest.cc
+++ b/base/sys_info_unittest.cc
@@ -6,7 +6,6 @@
 
 #include "base/environment.h"
 #include "base/files/file_util.h"
-#include "base/process/process_metrics.h"
 #include "base/sys_info.h"
 #include "base/threading/platform_thread.h"
 #include "base/time/time.h"
@@ -14,71 +13,46 @@
 #include "testing/gtest/include/gtest/gtest.h"
 #include "testing/platform_test.h"
 
-namespace base {
-
-using SysInfoTest = PlatformTest;
+typedef PlatformTest SysInfoTest;
+using base::FilePath;
 
 TEST_F(SysInfoTest, NumProcs) {
   // We aren't actually testing that it's correct, just that it's sane.
-  EXPECT_GE(SysInfo::NumberOfProcessors(), 1);
+  EXPECT_GE(base::SysInfo::NumberOfProcessors(), 1);
 }
 
 TEST_F(SysInfoTest, AmountOfMem) {
   // We aren't actually testing that it's correct, just that it's sane.
-  EXPECT_GT(SysInfo::AmountOfPhysicalMemory(), 0);
-  EXPECT_GT(SysInfo::AmountOfPhysicalMemoryMB(), 0);
+  EXPECT_GT(base::SysInfo::AmountOfPhysicalMemory(), 0);
+  EXPECT_GT(base::SysInfo::AmountOfPhysicalMemoryMB(), 0);
   // The maxmimal amount of virtual memory can be zero which means unlimited.
-  EXPECT_GE(SysInfo::AmountOfVirtualMemory(), 0);
+  EXPECT_GE(base::SysInfo::AmountOfVirtualMemory(), 0);
 }
 
-#if defined(OS_LINUX) || defined(OS_ANDROID)
-TEST_F(SysInfoTest, AmountOfAvailablePhysicalMemory) {
-  // Note: info is in _K_bytes.
-  SystemMemoryInfoKB info;
-  ASSERT_TRUE(GetSystemMemoryInfo(&info));
-  EXPECT_GT(info.free, 0);
-
-  if (info.available != 0) {
-    // If there is MemAvailable from kernel.
-    EXPECT_LT(info.available, info.total);
-    const int64_t amount = SysInfo::AmountOfAvailablePhysicalMemory(info);
-    // We aren't actually testing that it's correct, just that it's sane.
-    EXPECT_GT(amount, static_cast<int64_t>(info.free) * 1024);
-    EXPECT_LT(amount / 1024, info.available);
-    // Simulate as if there is no MemAvailable.
-    info.available = 0;
-  }
-
-  // There is no MemAvailable. Check the fallback logic.
-  const int64_t amount = SysInfo::AmountOfAvailablePhysicalMemory(info);
-  // We aren't actually testing that it's correct, just that it's sane.
-  EXPECT_GT(amount, static_cast<int64_t>(info.free) * 1024);
-  EXPECT_LT(amount / 1024, info.total);
-}
-#endif  // defined(OS_LINUX) || defined(OS_ANDROID)
-
 TEST_F(SysInfoTest, AmountOfFreeDiskSpace) {
   // We aren't actually testing that it's correct, just that it's sane.
   FilePath tmp_path;
-  ASSERT_TRUE(GetTempDir(&tmp_path));
-  EXPECT_GE(SysInfo::AmountOfFreeDiskSpace(tmp_path), 0) << tmp_path.value();
+  ASSERT_TRUE(base::GetTempDir(&tmp_path));
+  EXPECT_GE(base::SysInfo::AmountOfFreeDiskSpace(tmp_path), 0)
+            << tmp_path.value();
 }
 
 TEST_F(SysInfoTest, AmountOfTotalDiskSpace) {
   // We aren't actually testing that it's correct, just that it's sane.
   FilePath tmp_path;
-  ASSERT_TRUE(GetTempDir(&tmp_path));
-  EXPECT_GT(SysInfo::AmountOfTotalDiskSpace(tmp_path), 0) << tmp_path.value();
+  ASSERT_TRUE(base::GetTempDir(&tmp_path));
+  EXPECT_GT(base::SysInfo::AmountOfTotalDiskSpace(tmp_path), 0)
+            << tmp_path.value();
 }
 
-#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX)
+#if defined(OS_WIN) || defined(OS_MACOSX)
 TEST_F(SysInfoTest, OperatingSystemVersionNumbers) {
   int32_t os_major_version = -1;
   int32_t os_minor_version = -1;
   int32_t os_bugfix_version = -1;
-  SysInfo::OperatingSystemVersionNumbers(&os_major_version,
-                                         &os_minor_version,
-                                         &os_bugfix_version);
+  base::SysInfo::OperatingSystemVersionNumbers(&os_major_version,
+                                               &os_minor_version,
+                                               &os_bugfix_version);
   EXPECT_GT(os_major_version, -1);
   EXPECT_GT(os_minor_version, -1);
   EXPECT_GT(os_bugfix_version, -1);
@@ -86,18 +60,18 @@
 #endif
 
 TEST_F(SysInfoTest, Uptime) {
-  TimeDelta up_time_1 = SysInfo::Uptime();
+  base::TimeDelta up_time_1 = base::SysInfo::Uptime();
   // UpTime() is implemented internally using TimeTicks::Now(), which documents
   // system resolution as being 1-15ms. Sleep a little longer than that.
-  PlatformThread::Sleep(TimeDelta::FromMilliseconds(20));
-  TimeDelta up_time_2 = SysInfo::Uptime();
+  base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(20));
+  base::TimeDelta up_time_2 = base::SysInfo::Uptime();
   EXPECT_GT(up_time_1.InMicroseconds(), 0);
   EXPECT_GT(up_time_2.InMicroseconds(), up_time_1.InMicroseconds());
 }
 
 #if defined(OS_MACOSX) && !defined(OS_IOS)
 TEST_F(SysInfoTest, HardwareModelName) {
-  std::string hardware_model = SysInfo::HardwareModelName();
+  std::string hardware_model = base::SysInfo::HardwareModelName();
   EXPECT_FALSE(hardware_model.empty());
 }
 #endif
@@ -111,10 +85,10 @@
   const char kLsbRelease[] =
       "FOO=1234123.34.5\n"
       "CHROMEOS_RELEASE_VERSION=1.2.3.4\n";
-  SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, Time());
-  SysInfo::OperatingSystemVersionNumbers(&os_major_version,
-                                         &os_minor_version,
-                                         &os_bugfix_version);
+  base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, base::Time());
+  base::SysInfo::OperatingSystemVersionNumbers(&os_major_version,
+                                               &os_minor_version,
+                                               &os_bugfix_version);
   EXPECT_EQ(1, os_major_version);
   EXPECT_EQ(2, os_minor_version);
   EXPECT_EQ(3, os_bugfix_version);
@@ -127,10 +101,10 @@
   const char kLsbRelease[] =
       "CHROMEOS_RELEASE_VERSION=1.2.3.4\n"
       "FOO=1234123.34.5\n";
-  SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, Time());
-  SysInfo::OperatingSystemVersionNumbers(&os_major_version,
-                                         &os_minor_version,
-                                         &os_bugfix_version);
+  base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, base::Time());
+  base::SysInfo::OperatingSystemVersionNumbers(&os_major_version,
+                                               &os_minor_version,
+                                               &os_bugfix_version);
   EXPECT_EQ(1, os_major_version);
   EXPECT_EQ(2, os_minor_version);
   EXPECT_EQ(3, os_bugfix_version);
@@ -141,10 +115,10 @@
   int32_t os_minor_version = -1;
   int32_t os_bugfix_version = -1;
   const char kLsbRelease[] = "FOO=1234123.34.5\n";
-  SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, Time());
-  SysInfo::OperatingSystemVersionNumbers(&os_major_version,
-                                         &os_minor_version,
-                                         &os_bugfix_version);
+  base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, base::Time());
+  base::SysInfo::OperatingSystemVersionNumbers(&os_major_version,
+                                               &os_minor_version,
+                                               &os_bugfix_version);
   EXPECT_EQ(0, os_major_version);
   EXPECT_EQ(0, os_minor_version);
   EXPECT_EQ(0, os_bugfix_version);
@@ -153,45 +127,43 @@
 TEST_F(SysInfoTest, GoogleChromeOSLsbReleaseTime) {
   const char kLsbRelease[] = "CHROMEOS_RELEASE_VERSION=1.2.3.4";
   // Use a fake time that can be safely displayed as a string.
-  const Time lsb_release_time(Time::FromDoubleT(12345.6));
-  SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, lsb_release_time);
-  Time parsed_lsb_release_time = SysInfo::GetLsbReleaseTime();
+  const base::Time lsb_release_time(base::Time::FromDoubleT(12345.6));
+  base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease, lsb_release_time);
+  base::Time parsed_lsb_release_time = base::SysInfo::GetLsbReleaseTime();
   EXPECT_DOUBLE_EQ(lsb_release_time.ToDoubleT(),
                    parsed_lsb_release_time.ToDoubleT());
 }
 
 TEST_F(SysInfoTest, IsRunningOnChromeOS) {
-  SysInfo::SetChromeOSVersionInfoForTest("", Time());
-  EXPECT_FALSE(SysInfo::IsRunningOnChromeOS());
+  base::SysInfo::SetChromeOSVersionInfoForTest("", base::Time());
+  EXPECT_FALSE(base::SysInfo::IsRunningOnChromeOS());
 
   const char kLsbRelease1[] =
       "CHROMEOS_RELEASE_NAME=Non Chrome OS\n"
       "CHROMEOS_RELEASE_VERSION=1.2.3.4\n";
-  SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease1, Time());
-  EXPECT_FALSE(SysInfo::IsRunningOnChromeOS());
+  base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease1, base::Time());
+  EXPECT_FALSE(base::SysInfo::IsRunningOnChromeOS());
 
   const char kLsbRelease2[] =
       "CHROMEOS_RELEASE_NAME=Chrome OS\n"
       "CHROMEOS_RELEASE_VERSION=1.2.3.4\n";
-  SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease2, Time());
-  EXPECT_TRUE(SysInfo::IsRunningOnChromeOS());
+  base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease2, base::Time());
+  EXPECT_TRUE(base::SysInfo::IsRunningOnChromeOS());
 
   const char kLsbRelease3[] =
       "CHROMEOS_RELEASE_NAME=Chromium OS\n";
-  SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease3, Time());
-  EXPECT_TRUE(SysInfo::IsRunningOnChromeOS());
+  base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease3, base::Time());
+  EXPECT_TRUE(base::SysInfo::IsRunningOnChromeOS());
 }
 
 TEST_F(SysInfoTest, GetStrippedReleaseBoard) {
   const char* kLsbRelease1 = "CHROMEOS_RELEASE_BOARD=Glimmer\n";
-  SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease1, Time());
-  EXPECT_EQ("glimmer", SysInfo::GetStrippedReleaseBoard());
+  base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease1, base::Time());
+  EXPECT_EQ("glimmer", base::SysInfo::GetStrippedReleaseBoard());
 
   const char* kLsbRelease2 = "CHROMEOS_RELEASE_BOARD=glimmer-signed-mp-v4keys";
-  SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease2, Time());
-  EXPECT_EQ("glimmer", SysInfo::GetStrippedReleaseBoard());
+  base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease2, base::Time());
+  EXPECT_EQ("glimmer", base::SysInfo::GetStrippedReleaseBoard());
 }
 
 #endif  // OS_CHROMEOS
-
-}  // namespace base
diff --git a/base/task_runner.cc b/base/task_runner.cc
index c3e0574..35c0a23 100644
--- a/base/task_runner.cc
+++ b/base/task_runner.cc
@@ -23,7 +23,7 @@
 
  private:
   bool PostTask(const tracked_objects::Location& from_here,
-                OnceClosure task) override;
+                const Closure& task) override;
 
   // Non-owning.
   TaskRunner* destination_;
@@ -36,20 +36,20 @@
 
 bool PostTaskAndReplyTaskRunner::PostTask(
     const tracked_objects::Location& from_here,
-    OnceClosure task) {
-  return destination_->PostTask(from_here, std::move(task));
+    const Closure& task) {
+  return destination_->PostTask(from_here, task);
 }
 
 }  // namespace
 
 bool TaskRunner::PostTask(const tracked_objects::Location& from_here,
-                          OnceClosure task) {
-  return PostDelayedTask(from_here, std::move(task), base::TimeDelta());
+                          const Closure& task) {
+  return PostDelayedTask(from_here, task, base::TimeDelta());
 }
 
 bool TaskRunner::PostTaskAndReply(const tracked_objects::Location& from_here,
-                                  OnceClosure task,
-                                  OnceClosure reply) {
+                                  Closure task,
+                                  Closure reply) {
   return PostTaskAndReplyTaskRunner(this).PostTaskAndReply(
       from_here, std::move(task), std::move(reply));
 }
diff --git a/base/task_runner.h b/base/task_runner.h
index 0421d56..be3039d 100644
--- a/base/task_runner.h
+++ b/base/task_runner.h
@@ -61,7 +61,8 @@
   // will not be run.
   //
   // Equivalent to PostDelayedTask(from_here, task, 0).
-  bool PostTask(const tracked_objects::Location& from_here, OnceClosure task);
+  bool PostTask(const tracked_objects::Location& from_here,
+                const Closure& task);
 
   // Like PostTask, but tries to run the posted task only after
   // |delay_ms| has passed.
@@ -69,7 +70,7 @@
   // It is valid for an implementation to ignore |delay_ms|; that is,
   // to have PostDelayedTask behave the same as PostTask.
   virtual bool PostDelayedTask(const tracked_objects::Location& from_here,
-                               OnceClosure task,
+                               const Closure& task,
                                base::TimeDelta delay) = 0;
 
   // Returns true if the current thread is a thread on which a task
@@ -122,8 +123,8 @@
   //     and the reply will cancel itself safely because it is bound to a
   //     WeakPtr<>.
   bool PostTaskAndReply(const tracked_objects::Location& from_here,
-                        OnceClosure task,
-                        OnceClosure reply);
+                        Closure task,
+                        Closure reply);
 
  protected:
   friend struct TaskRunnerTraits;
diff --git a/base/task_scheduler/sequence.cc b/base/task_scheduler/sequence.cc
index 9867c1d..601b540 100644
--- a/base/task_scheduler/sequence.cc
+++ b/base/task_scheduler/sequence.cc
@@ -15,8 +15,6 @@
 Sequence::Sequence() = default;
 
 bool Sequence::PushTask(std::unique_ptr<Task> task) {
-  DCHECK(task);
-  DCHECK(task->task);
   DCHECK(task->sequenced_time.is_null());
   task->sequenced_time = base::TimeTicks::Now();
 
diff --git a/base/task_scheduler/sequence_unittest.cc b/base/task_scheduler/sequence_unittest.cc
index 7093b1e..c45d8a8 100644
--- a/base/task_scheduler/sequence_unittest.cc
+++ b/base/task_scheduler/sequence_unittest.cc
@@ -7,7 +7,6 @@
 #include <utility>
 
 #include "base/bind.h"
-#include "base/bind_helpers.h"
 #include "base/macros.h"
 #include "base/memory/ptr_util.h"
 #include "base/test/gtest_util.h"
@@ -25,27 +24,27 @@
   TaskSchedulerSequenceTest()
       : task_a_owned_(
             new Task(FROM_HERE,
-                     Bind(&DoNothing),
+                     Closure(),
                      TaskTraits().WithPriority(TaskPriority::BACKGROUND),
                      TimeDelta())),
         task_b_owned_(
             new Task(FROM_HERE,
-                     Bind(&DoNothing),
+                     Closure(),
                      TaskTraits().WithPriority(TaskPriority::USER_VISIBLE),
                      TimeDelta())),
         task_c_owned_(
             new Task(FROM_HERE,
-                     Bind(&DoNothing),
+                     Closure(),
                      TaskTraits().WithPriority(TaskPriority::USER_BLOCKING),
                      TimeDelta())),
         task_d_owned_(
             new Task(FROM_HERE,
-                     Bind(&DoNothing),
+                     Closure(),
                      TaskTraits().WithPriority(TaskPriority::USER_BLOCKING),
                      TimeDelta())),
         task_e_owned_(
             new Task(FROM_HERE,
-                     Bind(&DoNothing),
+                     Closure(),
                      TaskTraits().WithPriority(TaskPriority::BACKGROUND),
                      TimeDelta())),
         task_a_(task_a_owned_.get()),
diff --git a/base/task_scheduler/task.cc b/base/task_scheduler/task.cc
index fc513e3..3780c16 100644
--- a/base/task_scheduler/task.cc
+++ b/base/task_scheduler/task.cc
@@ -4,28 +4,22 @@
 
 #include "base/task_scheduler/task.h"
 
-#include <utility>
-
-#include "base/critical_closure.h"
-
 namespace base {
 namespace internal {
 
 Task::Task(const tracked_objects::Location& posted_from,
-           OnceClosure task,
+           const Closure& task,
            const TaskTraits& traits,
            TimeDelta delay)
-    : PendingTask(
-          posted_from,
-          traits.shutdown_behavior() == TaskShutdownBehavior::BLOCK_SHUTDOWN
-              ? MakeCriticalClosure(std::move(task))
-              : std::move(task),
-          delay.is_zero() ? TimeTicks() : TimeTicks::Now() + delay,
-          false),  // Not nestable.
+    : PendingTask(posted_from,
+                  task,
+                  delay.is_zero() ? TimeTicks() : TimeTicks::Now() + delay,
+                  false),  // Not nestable.
       // Prevent a delayed BLOCK_SHUTDOWN task from blocking shutdown before
       // being scheduled by changing its shutdown behavior to SKIP_ON_SHUTDOWN.
-      traits(!delay.is_zero() && traits.shutdown_behavior() ==
-                                     TaskShutdownBehavior::BLOCK_SHUTDOWN
+      traits(!delay.is_zero() &&
+                     traits.shutdown_behavior() ==
+                         TaskShutdownBehavior::BLOCK_SHUTDOWN
                  ? TaskTraits(traits).WithShutdownBehavior(
                        TaskShutdownBehavior::SKIP_ON_SHUTDOWN)
                  : traits),
diff --git a/base/task_scheduler/task.h b/base/task_scheduler/task.h
index 43095f2..c5b9bdb 100644
--- a/base/task_scheduler/task.h
+++ b/base/task_scheduler/task.h
@@ -6,7 +6,7 @@
 #define BASE_TASK_SCHEDULER_TASK_H_
 
 #include "base/base_export.h"
-#include "base/callback.h"
+#include "base/callback_forward.h"
 #include "base/location.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
@@ -28,7 +28,7 @@
   // behavior in |traits| is BLOCK_SHUTDOWN, the shutdown behavior is
   // automatically adjusted to SKIP_ON_SHUTDOWN.
   Task(const tracked_objects::Location& posted_from,
-       OnceClosure task,
+       const Closure& task,
        const TaskTraits& traits,
        TimeDelta delay);
   ~Task();
diff --git a/base/template_util.h b/base/template_util.h
index 10154db..4255210 100644
--- a/base/template_util.h
+++ b/base/template_util.h
@@ -51,8 +51,46 @@
 template <class T> struct is_non_const_reference<T&> : std::true_type {};
 template <class T> struct is_non_const_reference<const T&> : std::false_type {};
 
+// is_assignable
+
 namespace internal {
 
+template <typename First, typename Second>
+struct SelectSecond {
+  using type = Second;
+};
+
+struct Any {
+  Any(...);
+};
+
+// True case: If |Lvalue| can be assigned to from |Rvalue|, then the return
+// value is a true_type.
+template <class Lvalue, class Rvalue>
+typename internal::SelectSecond<
+    decltype((std::declval<Lvalue>() = std::declval<Rvalue>())),
+    std::true_type>::type
+IsAssignableTest(Lvalue&&, Rvalue&&);
+
+// False case: Otherwise the return value is a false_type.
+template <class Rvalue>
+std::false_type IsAssignableTest(internal::Any, Rvalue&&);
+
+// Default case: Neither Lvalue nor Rvalue is void. Uses IsAssignableTest to
+// determine the type of IsAssignableImpl.
+template <class Lvalue,
+          class Rvalue,
+          bool = std::is_void<Lvalue>::value || std::is_void<Rvalue>::value>
+struct IsAssignableImpl
+    : public std::common_type<decltype(
+          internal::IsAssignableTest(std::declval<Lvalue>(),
+                                     std::declval<Rvalue>()))>::type {};
+
+// Void case: Either Lvalue or Rvalue is void. Then the type of IsAssignableTest
+// is false_type.
+template <class Lvalue, class Rvalue>
+struct IsAssignableImpl<Lvalue, Rvalue, true> : public std::false_type {};
+
 // Uses expression SFINAE to detect whether using operator<< would work.
 template <typename T, typename = void>
 struct SupportsOstreamOperator : std::false_type {};
@@ -64,6 +102,29 @@
 
 }  // namespace internal
 
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace.
+template <class Lvalue, class Rvalue>
+struct is_assignable : public internal::IsAssignableImpl<Lvalue, Rvalue> {};
+
+// is_copy_assignable is true if a T const& is assignable to a T&.
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace.
+template <class T>
+struct is_copy_assignable
+    : public is_assignable<typename std::add_lvalue_reference<T>::type,
+                           typename std::add_lvalue_reference<
+                               typename std::add_const<T>::type>::type> {};
+
+// is_move_assignable is true if a T&& is assignable to a T&.
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace.
+template <class T>
+struct is_move_assignable
+    : public is_assignable<typename std::add_lvalue_reference<T>::type,
+                           const typename std::add_rvalue_reference<T>::type> {
+};
+
 // underlying_type produces the integer type backing an enum type.
 // TODO(crbug.com/554293): Remove this when all platforms have this in the std
 // namespace.
diff --git a/base/template_util_unittest.cc b/base/template_util_unittest.cc
index e34a25b..9215964 100644
--- a/base/template_util_unittest.cc
+++ b/base/template_util_unittest.cc
@@ -30,6 +30,39 @@
               "IsNonConstReference");
 static_assert(is_non_const_reference<int&>::value, "IsNonConstReference");
 
+class AssignParent {};
+class AssignChild : AssignParent {};
+
+// is_assignable<Type1, Type2>
+static_assert(!is_assignable<int, int>::value, "IsAssignable");  // 1 = 1;
+static_assert(!is_assignable<int, double>::value, "IsAssignable");
+static_assert(is_assignable<int&, int>::value, "IsAssignable");
+static_assert(is_assignable<int&, double>::value, "IsAssignable");
+static_assert(is_assignable<int&, int&>::value, "IsAssignable");
+static_assert(is_assignable<int&, int const&>::value, "IsAssignable");
+static_assert(!is_assignable<int const&, int>::value, "IsAssignable");
+static_assert(!is_assignable<AssignParent&, AssignChild>::value,
+              "IsAssignable");
+static_assert(!is_assignable<AssignChild&, AssignParent>::value,
+              "IsAssignable");
+
+struct AssignCopy {};
+struct AssignNoCopy {
+  AssignNoCopy& operator=(AssignNoCopy&&) { return *this; }
+  AssignNoCopy& operator=(const AssignNoCopy&) = delete;
+};
+struct AssignNoMove {
+  AssignNoMove& operator=(AssignNoMove&&) = delete;
+  AssignNoMove& operator=(const AssignNoMove&) = delete;
+};
+
+static_assert(is_copy_assignable<AssignCopy>::value, "IsCopyAssignable");
+static_assert(!is_copy_assignable<AssignNoCopy>::value, "IsCopyAssignable");
+
+static_assert(is_move_assignable<AssignCopy>::value, "IsMoveAssignable");
+static_assert(is_move_assignable<AssignNoCopy>::value, "IsMoveAssignable");
+static_assert(!is_move_assignable<AssignNoMove>::value, "IsMoveAssignable");
+
 // A few standard types that definitely support printing.
 static_assert(internal::SupportsOstreamOperator<int>::value,
               "ints should be printable");
diff --git a/base/test/BUILD.gn b/base/test/BUILD.gn
index fc0350f..844707e 100644
--- a/base/test/BUILD.gn
+++ b/base/test/BUILD.gn
@@ -329,7 +329,6 @@
   generate_jni("base_unittests_jni_headers") {
     sources = [
       "android/java/src/org/chromium/base/ContentUriTestUtils.java",
-      "android/java/src/org/chromium/base/JavaHandlerThreadTest.java",
       "android/java/src/org/chromium/base/TestSystemMessageHandler.java",
       "android/java/src/org/chromium/base/TestUiThread.java",
     ]
@@ -354,6 +353,7 @@
     ]
     srcjar_deps = [ ":test_support_java_aidl" ]
     java_files = [
+      "android/java/src/org/chromium/base/FileDescriptorInfo.java",
       "android/java/src/org/chromium/base/MainReturnCodeResult.java",
       "android/java/src/org/chromium/base/MultiprocessTestClientLauncher.java",
       "android/java/src/org/chromium/base/MultiprocessTestClientService.java",
@@ -367,10 +367,7 @@
 
   android_aidl("test_support_java_aidl") {
     testonly = true
-    import_include = [
-      "android/java/src",
-      "//base/android/java/src",
-    ]
+    import_include = [ "android/java/src" ]
     sources = [
       "android/java/src/org/chromium/base/ITestClient.aidl",
     ]
diff --git a/base/test/multiprocess_test.cc b/base/test/multiprocess_test.cc
index c8fd3ed..fcc4d12 100644
--- a/base/test/multiprocess_test.cc
+++ b/base/test/multiprocess_test.cc
@@ -13,7 +13,7 @@
 namespace base {
 
 #if !defined(OS_ANDROID) && !defined(__ANDROID__) && !defined(__ANDROID_HOST__)
-SpawnChildResult SpawnMultiProcessTestChild(
+Process SpawnMultiProcessTestChild(
     const std::string& procname,
     const CommandLine& base_command_line,
     const LaunchOptions& options) {
@@ -24,9 +24,7 @@
   if (!command_line.HasSwitch(switches::kTestChildProcess))
     command_line.AppendSwitchASCII(switches::kTestChildProcess, procname);
 
-  SpawnChildResult result;
-  result.process = LaunchProcess(command_line, options);
-  return result;
+  return LaunchProcess(command_line, options);
 }
 
 bool WaitForMultiprocessTestChildExit(const Process& process,
@@ -56,7 +54,7 @@
 
 // Don't compile on Arc++.
 #if 0
-SpawnChildResult MultiProcessTest::SpawnChild(const std::string& procname) {
+Process MultiProcessTest::SpawnChild(const std::string& procname) {
   LaunchOptions options;
 #if defined(OS_WIN)
   options.start_hidden = true;
@@ -64,7 +62,7 @@
   return SpawnChildWithOptions(procname, options);
 }
 
-SpawnChildResult MultiProcessTest::SpawnChildWithOptions(
+Process MultiProcessTest::SpawnChildWithOptions(
     const std::string& procname,
     const LaunchOptions& options) {
   return SpawnMultiProcessTestChild(procname, MakeCmdLine(procname), options);
diff --git a/base/test/multiprocess_test.h b/base/test/multiprocess_test.h
index f0027d9..bf96637 100644
--- a/base/test/multiprocess_test.h
+++ b/base/test/multiprocess_test.h
@@ -17,17 +17,6 @@
 
 class CommandLine;
 
-struct SpawnChildResult {
-  SpawnChildResult() {}
-  SpawnChildResult(SpawnChildResult&& other) = default;
-
-  SpawnChildResult& operator=(SpawnChildResult&& other) = default;
-
-  Process process;
-
-  DISALLOW_COPY_AND_ASSIGN(SpawnChildResult);
-};
-
 // Helpers to spawn a child for a multiprocess test and execute a designated
 // function. Use these when you already have another base class for your test
 // fixture, but you want (some) of your tests to be multiprocess (otherwise you
@@ -44,10 +33,9 @@
 //     // Maybe set some options (e.g., |start_hidden| on Windows)....
 //
 //     // Start a child process and run |a_test_func|.
-//     SpawnChildResult result =
+//     base::Process test_child_process =
 //         base::SpawnMultiProcessTestChild("a_test_func", command_line,
 //                                          options);
-//     base::Process test_child_process = std::move(result.process);
 //
 //     // Do stuff involving |test_child_process| and the child process....
 //
@@ -73,9 +61,10 @@
 // |command_line| should be as provided by
 // |GetMultiProcessTestChildBaseCommandLine()| (below), possibly with arguments
 // added. Note: On Windows, you probably want to set |options.start_hidden|.
-SpawnChildResult SpawnMultiProcessTestChild(const std::string& procname,
-                                            const CommandLine& command_line,
-                                            const LaunchOptions& options);
+Process SpawnMultiProcessTestChild(
+    const std::string& procname,
+    const CommandLine& command_line,
+    const LaunchOptions& options);
 
 // Gets the base command line for |SpawnMultiProcessTestChild()|. To this, you
 // may add any flags needed for your child process.
@@ -132,13 +121,13 @@
   //    }
   //
   // Returns the child process.
-  SpawnChildResult SpawnChild(const std::string& procname);
+  Process SpawnChild(const std::string& procname);
 
   // Run a child process using the given launch options.
   //
   // Note: On Windows, you probably want to set |options.start_hidden|.
-  SpawnChildResult SpawnChildWithOptions(const std::string& procname,
-                                         const LaunchOptions& options);
+  Process SpawnChildWithOptions(const std::string& procname,
+                                const LaunchOptions& options);
 
   // Set up the command line used to spawn the child process.
   // Override this to add things to the command line (calling this first in the
diff --git a/base/test/multiprocess_test_android.cc b/base/test/multiprocess_test_android.cc
index a1b8fcb..c74f013 100644
--- a/base/test/multiprocess_test_android.cc
+++ b/base/test/multiprocess_test_android.cc
@@ -25,10 +25,9 @@
 //  - All options except |fds_to_remap| are ignored.
 //
 // NOTE: This MUST NOT run on the main thread of the NativeTest application.
-SpawnChildResult SpawnMultiProcessTestChild(
-    const std::string& procname,
-    const CommandLine& base_command_line,
-    const LaunchOptions& options) {
+Process SpawnMultiProcessTestChild(const std::string& procname,
+                                   const CommandLine& base_command_line,
+                                   const LaunchOptions& options) {
   JNIEnv* env = android::AttachCurrentThread();
   DCHECK(env);
 
@@ -55,10 +54,7 @@
       android::ToJavaArrayOfStrings(env, command_line.argv());
   jint pid = android::Java_MultiprocessTestClientLauncher_launchClient(
       env, android::GetApplicationContext(), j_argv, fds);
-
-  SpawnChildResult result;
-  result.process = Process(pid);
-  return result;
+  return Process(pid);
 }
 
 bool WaitForMultiprocessTestChildExit(const Process& process,
diff --git a/base/test/test_mock_time_task_runner.cc b/base/test/test_mock_time_task_runner.cc
index a236acf..f4bd724 100644
--- a/base/test/test_mock_time_task_runner.cc
+++ b/base/test/test_mock_time_task_runner.cc
@@ -4,8 +4,6 @@
 
 #include "base/test/test_mock_time_task_runner.h"
 
-#include <utility>
-
 #include "base/logging.h"
 #include "base/macros.h"
 #include "base/memory/ptr_util.h"
@@ -81,7 +79,7 @@
     : public base::TestPendingTask {
   TestOrderedPendingTask();
   TestOrderedPendingTask(const tracked_objects::Location& location,
-                         OnceClosure task,
+                         const Closure& task,
                          TimeTicks post_time,
                          TimeDelta delay,
                          size_t ordinal,
@@ -106,16 +104,12 @@
 
 TestMockTimeTaskRunner::TestOrderedPendingTask::TestOrderedPendingTask(
     const tracked_objects::Location& location,
-    OnceClosure task,
+    const Closure& task,
     TimeTicks post_time,
     TimeDelta delay,
     size_t ordinal,
     TestNestability nestability)
-    : base::TestPendingTask(location,
-                            std::move(task),
-                            post_time,
-                            delay,
-                            nestability),
+    : base::TestPendingTask(location, task, post_time, delay, nestability),
       ordinal(ordinal) {}
 
 TestMockTimeTaskRunner::TestOrderedPendingTask::~TestOrderedPendingTask() {
@@ -240,20 +234,20 @@
 
 bool TestMockTimeTaskRunner::PostDelayedTask(
     const tracked_objects::Location& from_here,
-    OnceClosure task,
+    const Closure& task,
     TimeDelta delay) {
   AutoLock scoped_lock(tasks_lock_);
-  tasks_.push(TestOrderedPendingTask(from_here, std::move(task), now_ticks_,
-                                     delay, next_task_ordinal_++,
+  tasks_.push(TestOrderedPendingTask(from_here, task, now_ticks_, delay,
+                                     next_task_ordinal_++,
                                      TestPendingTask::NESTABLE));
   return true;
 }
 
 bool TestMockTimeTaskRunner::PostNonNestableDelayedTask(
     const tracked_objects::Location& from_here,
-    OnceClosure task,
+    const Closure& task,
     TimeDelta delay) {
-  return PostDelayedTask(from_here, std::move(task), delay);
+  return PostDelayedTask(from_here, task, delay);
 }
 
 bool TestMockTimeTaskRunner::IsElapsingStopped() {
diff --git a/base/test/test_mock_time_task_runner.h b/base/test/test_mock_time_task_runner.h
index 2f892f5..54ebbdb 100644
--- a/base/test/test_mock_time_task_runner.h
+++ b/base/test/test_mock_time_task_runner.h
@@ -12,7 +12,6 @@
 #include <queue>
 #include <vector>
 
-#include "base/callback.h"
 #include "base/callback_helpers.h"
 #include "base/macros.h"
 #include "base/single_thread_task_runner.h"
@@ -141,10 +140,10 @@
   // SingleThreadTaskRunner:
   bool RunsTasksOnCurrentThread() const override;
   bool PostDelayedTask(const tracked_objects::Location& from_here,
-                       OnceClosure task,
+                       const Closure& task,
                        TimeDelta delay) override;
   bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
-                                  OnceClosure task,
+                                  const Closure& task,
                                   TimeDelta delay) override;
 
  protected:
diff --git a/base/test/test_pending_task.cc b/base/test/test_pending_task.cc
index 3f71a99..98bc017 100644
--- a/base/test/test_pending_task.cc
+++ b/base/test/test_pending_task.cc
@@ -2,22 +2,22 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "base/test/test_pending_task.h"
-
 #include <string>
-#include <utility>
+
+#include "base/test/test_pending_task.h"
 
 namespace base {
 
 TestPendingTask::TestPendingTask() : nestability(NESTABLE) {}
 
-TestPendingTask::TestPendingTask(const tracked_objects::Location& location,
-                                 OnceClosure task,
-                                 TimeTicks post_time,
-                                 TimeDelta delay,
-                                 TestNestability nestability)
+TestPendingTask::TestPendingTask(
+    const tracked_objects::Location& location,
+    const Closure& task,
+    TimeTicks post_time,
+    TimeDelta delay,
+    TestNestability nestability)
     : location(location),
-      task(std::move(task)),
+      task(task),
       post_time(post_time),
       delay(delay),
       nestability(nestability) {}
diff --git a/base/test/test_pending_task.h b/base/test/test_pending_task.h
index 52ca592..42f3f42 100644
--- a/base/test/test_pending_task.h
+++ b/base/test/test_pending_task.h
@@ -23,7 +23,7 @@
   TestPendingTask();
   TestPendingTask(TestPendingTask&& other);
   TestPendingTask(const tracked_objects::Location& location,
-                  OnceClosure task,
+                  const Closure& task,
                   TimeTicks post_time,
                   TimeDelta delay,
                   TestNestability nestability);
diff --git a/base/test/test_simple_task_runner.cc b/base/test/test_simple_task_runner.cc
index 4280a0d..090a72e 100644
--- a/base/test/test_simple_task_runner.cc
+++ b/base/test/test_simple_task_runner.cc
@@ -4,8 +4,6 @@
 
 #include "base/test/test_simple_task_runner.h"
 
-#include <utility>
-
 #include "base/logging.h"
 #include "base/memory/ptr_util.h"
 #include "base/threading/thread_task_runner_handle.h"
@@ -18,23 +16,23 @@
 
 bool TestSimpleTaskRunner::PostDelayedTask(
     const tracked_objects::Location& from_here,
-    OnceClosure task,
+    const Closure& task,
     TimeDelta delay) {
   AutoLock auto_lock(lock_);
-  pending_tasks_.push_back(TestPendingTask(from_here, std::move(task),
-                                           TimeTicks(), delay,
-                                           TestPendingTask::NESTABLE));
+  pending_tasks_.push_back(
+      TestPendingTask(from_here, task, TimeTicks(), delay,
+                      TestPendingTask::NESTABLE));
   return true;
 }
 
 bool TestSimpleTaskRunner::PostNonNestableDelayedTask(
     const tracked_objects::Location& from_here,
-    OnceClosure task,
+    const Closure& task,
     TimeDelta delay) {
   AutoLock auto_lock(lock_);
-  pending_tasks_.push_back(TestPendingTask(from_here, std::move(task),
-                                           TimeTicks(), delay,
-                                           TestPendingTask::NON_NESTABLE));
+  pending_tasks_.push_back(
+      TestPendingTask(from_here, task, TimeTicks(), delay,
+                      TestPendingTask::NON_NESTABLE));
   return true;
 }
 
diff --git a/base/test/test_simple_task_runner.h b/base/test/test_simple_task_runner.h
index f46e065..d089ba8 100644
--- a/base/test/test_simple_task_runner.h
+++ b/base/test/test_simple_task_runner.h
@@ -7,7 +7,6 @@
 
 #include <deque>
 
-#include "base/callback.h"
 #include "base/compiler_specific.h"
 #include "base/macros.h"
 #include "base/single_thread_task_runner.h"
@@ -44,10 +43,10 @@
 
   // SingleThreadTaskRunner implementation.
   bool PostDelayedTask(const tracked_objects::Location& from_here,
-                       OnceClosure task,
+                       const Closure& task,
                        TimeDelta delay) override;
   bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
-                                  OnceClosure task,
+                                  const Closure& task,
                                   TimeDelta delay) override;
 
   bool RunsTasksOnCurrentThread() const override;
diff --git a/base/threading/post_task_and_reply_impl.cc b/base/threading/post_task_and_reply_impl.cc
index cddb898..d16f8bd 100644
--- a/base/threading/post_task_and_reply_impl.cc
+++ b/base/threading/post_task_and_reply_impl.cc
@@ -29,8 +29,8 @@
 class PostTaskAndReplyRelay {
  public:
   PostTaskAndReplyRelay(const tracked_objects::Location& from_here,
-                        OnceClosure task,
-                        OnceClosure reply)
+                        Closure task,
+                        Closure reply)
       : sequence_checker_(),
         from_here_(from_here),
         origin_task_runner_(SequencedTaskRunnerHandle::Get()),
@@ -39,10 +39,12 @@
 
   ~PostTaskAndReplyRelay() {
     DCHECK(sequence_checker_.CalledOnValidSequence());
+    task_.Reset();
+    reply_.Reset();
   }
 
   void RunTaskAndPostReply() {
-    std::move(task_).Run();
+    task_.Run();
     origin_task_runner_->PostTask(
         from_here_, Bind(&PostTaskAndReplyRelay::RunReplyAndSelfDestruct,
                          base::Unretained(this)));
@@ -52,12 +54,12 @@
   void RunReplyAndSelfDestruct() {
     DCHECK(sequence_checker_.CalledOnValidSequence());
 
-    // Ensure |task_| has already been released before |reply_| to ensure that
-    // no one accidentally depends on |task_| keeping one of its arguments alive
-    // while |reply_| is executing.
-    DCHECK(!task_);
+    // Force |task_| to be released before |reply_| is to ensure that no one
+    // accidentally depends on |task_| keeping one of its arguments alive while
+    // |reply_| is executing.
+    task_.Reset();
 
-    std::move(reply_).Run();
+    reply_.Run();
 
     // Cue mission impossible theme.
     delete this;
@@ -66,8 +68,8 @@
   const SequenceChecker sequence_checker_;
   const tracked_objects::Location from_here_;
   const scoped_refptr<SequencedTaskRunner> origin_task_runner_;
-  OnceClosure reply_;
-  OnceClosure task_;
+  Closure reply_;
+  Closure task_;
 };
 
 }  // namespace
@@ -76,8 +78,8 @@
 
 bool PostTaskAndReplyImpl::PostTaskAndReply(
     const tracked_objects::Location& from_here,
-    OnceClosure task,
-    OnceClosure reply) {
+    Closure task,
+    Closure reply) {
   DCHECK(!task.is_null()) << from_here.ToString();
   DCHECK(!reply.is_null()) << from_here.ToString();
   PostTaskAndReplyRelay* relay =
diff --git a/base/threading/post_task_and_reply_impl.h b/base/threading/post_task_and_reply_impl.h
index 00aee6d..696b668 100644
--- a/base/threading/post_task_and_reply_impl.h
+++ b/base/threading/post_task_and_reply_impl.h
@@ -29,12 +29,12 @@
   // SequencedTaskRunnerHandle::IsSet(). Both |task| and |reply| are guaranteed
   // to be deleted on the sequence or thread that called this.
   bool PostTaskAndReply(const tracked_objects::Location& from_here,
-                        OnceClosure task,
-                        OnceClosure reply);
+                        Closure task,
+                        Closure reply);
 
  private:
   virtual bool PostTask(const tracked_objects::Location& from_here,
-                        OnceClosure task) = 0;
+                        const Closure& task) = 0;
 };
 
 }  // namespace internal
diff --git a/base/threading/sequenced_worker_pool.cc b/base/threading/sequenced_worker_pool.cc
index e9f4aad..ce594cd 100644
--- a/base/threading/sequenced_worker_pool.cc
+++ b/base/threading/sequenced_worker_pool.cc
@@ -97,15 +97,12 @@
 
   ~SequencedTask() {}
 
-  SequencedTask(SequencedTask&&) = default;
-  SequencedTask& operator=(SequencedTask&&) = default;
-
   int sequence_token_id;
   int trace_id;
   int64_t sequence_task_number;
   SequencedWorkerPool::WorkerShutdown shutdown_behavior;
   tracked_objects::Location posted_from;
-  OnceClosure task;
+  Closure task;
 
   // Non-delayed tasks and delayed tasks are managed together by time-to-run
   // order. We calculate the time by adding the posted time and the given delay.
@@ -147,7 +144,7 @@
 
   // TaskRunner implementation
   bool PostDelayedTask(const tracked_objects::Location& from_here,
-                       OnceClosure task,
+                       const Closure& task,
                        TimeDelta delay) override;
   bool RunsTasksOnCurrentThread() const override;
 
@@ -171,13 +168,13 @@
 
 bool SequencedWorkerPoolTaskRunner::PostDelayedTask(
     const tracked_objects::Location& from_here,
-    OnceClosure task,
+    const Closure& task,
     TimeDelta delay) {
   if (delay.is_zero()) {
-    return pool_->PostWorkerTaskWithShutdownBehavior(from_here, std::move(task),
-                                                     shutdown_behavior_);
+    return pool_->PostWorkerTaskWithShutdownBehavior(
+        from_here, task, shutdown_behavior_);
   }
-  return pool_->PostDelayedWorkerTask(from_here, std::move(task), delay);
+  return pool_->PostDelayedWorkerTask(from_here, task, delay);
 }
 
 bool SequencedWorkerPoolTaskRunner::RunsTasksOnCurrentThread() const {
@@ -201,13 +198,13 @@
 
   // TaskRunner implementation
   bool PostDelayedTask(const tracked_objects::Location& from_here,
-                       OnceClosure task,
+                       const Closure& task,
                        TimeDelta delay) override;
   bool RunsTasksOnCurrentThread() const override;
 
   // SequencedTaskRunner implementation
   bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
-                                  OnceClosure task,
+                                  const Closure& task,
                                   TimeDelta delay) override;
 
  private:
@@ -234,16 +231,15 @@
 SequencedWorkerPool::PoolSequencedTaskRunner::
     ~PoolSequencedTaskRunner() = default;
 
-bool SequencedWorkerPool::PoolSequencedTaskRunner::PostDelayedTask(
-    const tracked_objects::Location& from_here,
-    OnceClosure task,
-    TimeDelta delay) {
+bool SequencedWorkerPool::PoolSequencedTaskRunner::
+    PostDelayedTask(const tracked_objects::Location& from_here,
+                    const Closure& task,
+                    TimeDelta delay) {
   if (delay.is_zero()) {
     return pool_->PostSequencedWorkerTaskWithShutdownBehavior(
-        token_, from_here, std::move(task), shutdown_behavior_);
+        token_, from_here, task, shutdown_behavior_);
   }
-  return pool_->PostDelayedSequencedWorkerTask(token_, from_here,
-                                               std::move(task), delay);
+  return pool_->PostDelayedSequencedWorkerTask(token_, from_here, task, delay);
 }
 
 bool SequencedWorkerPool::PoolSequencedTaskRunner::
@@ -251,13 +247,13 @@
   return pool_->IsRunningSequenceOnCurrentThread(token_);
 }
 
-bool SequencedWorkerPool::PoolSequencedTaskRunner::PostNonNestableDelayedTask(
-    const tracked_objects::Location& from_here,
-    OnceClosure task,
-    TimeDelta delay) {
+bool SequencedWorkerPool::PoolSequencedTaskRunner::
+    PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
+                               const Closure& task,
+                               TimeDelta delay) {
   // There's no way to run nested tasks, so simply forward to
   // PostDelayedTask.
-  return PostDelayedTask(from_here, std::move(task), delay);
+  return PostDelayedTask(from_here, task, delay);
 }
 
 // Worker ---------------------------------------------------------------------
@@ -356,7 +352,7 @@
                 SequenceToken sequence_token,
                 WorkerShutdown shutdown_behavior,
                 const tracked_objects::Location& from_here,
-                OnceClosure task,
+                const Closure& task,
                 TimeDelta delay);
 
   bool RunsTasksOnCurrentThread() const;
@@ -401,7 +397,8 @@
   // Returns true if the task may run at some point in the future and false if
   // it will definitely not run.
   // Coalesce upon resolution of http://crbug.com/622400.
-  bool PostTaskToTaskScheduler(SequencedTask sequenced, const TimeDelta& delay);
+  bool PostTaskToTaskScheduler(const SequencedTask& sequenced,
+                               const TimeDelta& delay);
 
   // Returns the TaskScheduler TaskRunner for the specified |sequence_token_id|
   // and |traits|.
@@ -699,10 +696,8 @@
     SequenceToken sequence_token,
     WorkerShutdown shutdown_behavior,
     const tracked_objects::Location& from_here,
-    OnceClosure task,
+    const Closure& task,
     TimeDelta delay) {
-  DCHECK(task);
-
   // TODO(fdoray): Uncomment this DCHECK. It is initially commented to avoid a
   // revert of the CL that adds debug::DumpWithoutCrashing() if it fails on the
   // waterfall. https://crbug.com/622400
@@ -715,9 +710,9 @@
   sequenced.sequence_token_id = sequence_token.id_;
   sequenced.shutdown_behavior = shutdown_behavior;
   sequenced.posted_from = from_here;
-  sequenced.task = shutdown_behavior == BLOCK_SHUTDOWN
-                       ? base::MakeCriticalClosure(std::move(task))
-                       : std::move(task);
+  sequenced.task =
+      shutdown_behavior == BLOCK_SHUTDOWN ?
+      base::MakeCriticalClosure(task) : task;
   sequenced.time_to_run = TimeTicks::Now() + delay;
 
   int create_thread_id = 0;
@@ -762,15 +757,13 @@
     // See on top of the file why we don't compile this on Arc++.
 #if 0
     if (g_all_pools_state == AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
-      if (!PostTaskToTaskScheduler(std::move(sequenced), delay))
+      if (!PostTaskToTaskScheduler(sequenced, delay))
         return false;
     } else {
 #endif
-      SequencedWorkerPool::WorkerShutdown shutdown_behavior =
-          sequenced.shutdown_behavior;
-      pending_tasks_.insert(std::move(sequenced));
+      pending_tasks_.insert(sequenced);
 
-      if (shutdown_behavior == BLOCK_SHUTDOWN)
+      if (sequenced.shutdown_behavior == BLOCK_SHUTDOWN)
         blocking_shutdown_pending_task_count_++;
 
       create_thread_id = PrepareToStartAdditionalThreadIfHelpful();
@@ -809,7 +802,7 @@
 }
 
 bool SequencedWorkerPool::Inner::PostTaskToTaskScheduler(
-    SequencedTask sequenced,
+    const SequencedTask& sequenced,
     const TimeDelta& delay) {
 #if 1
   NOTREACHED();
@@ -845,8 +838,7 @@
                                 .WithPriority(task_priority_)
                                 .WithShutdownBehavior(task_shutdown_behavior);
   return GetTaskSchedulerTaskRunner(sequenced.sequence_token_id, traits)
-      ->PostDelayedTask(sequenced.posted_from, std::move(sequenced.task),
-                        delay);
+      ->PostDelayedTask(sequenced.posted_from, sequenced.task, delay);
 #endif
 }
 
@@ -1051,7 +1043,7 @@
 
           tracked_objects::TaskStopwatch stopwatch;
           stopwatch.Start();
-          std::move(task.task).Run();
+          task.task.Run();
           stopwatch.Stop();
 
           tracked_objects::ThreadData::TallyRunOnNamedThreadIfTracking(
@@ -1062,7 +1054,7 @@
           // Also, do it before calling reset_running_task_info() so
           // that sequence-checking from within the task's destructor
           // still works.
-          DCHECK(!task.task);
+          task.task = Closure();
 
           this_worker->reset_running_task_info();
         }
@@ -1274,11 +1266,7 @@
       // refcounted, so we just need to keep a copy of them alive until the lock
       // is exited. The calling code can just clear() the vector they passed to
       // us once the lock is exited to make this happen.
-      //
-      // The const_cast here is safe since the object is erased from
-      // |pending_tasks_| soon after the move.
-      delete_these_outside_lock->push_back(
-          std::move(const_cast<SequencedTask&>(*i)));
+      delete_these_outside_lock->push_back(*i);
       pending_tasks_.erase(i++);
       continue;
     }
@@ -1289,18 +1277,14 @@
       status = GET_WORK_WAIT;
       if (cleanup_state_ == CLEANUP_RUNNING) {
         // Deferred tasks are deleted when cleaning up, see Inner::ThreadLoop.
-        // The const_cast here is safe since the object is erased from
-        // |pending_tasks_| soon after the move.
-        delete_these_outside_lock->push_back(
-            std::move(const_cast<SequencedTask&>(*i)));
+        delete_these_outside_lock->push_back(*i);
         pending_tasks_.erase(i);
       }
       break;
     }
 
-    // Found a runnable task. The const_cast is safe here since the object is
-    // erased from |pending_tasks_| soon after the move.
-    *task = std::move(const_cast<SequencedTask&>(*i));
+    // Found a runnable task.
+    *task = *i;
     pending_tasks_.erase(i);
     if (task->shutdown_behavior == BLOCK_SHUTDOWN) {
       blocking_shutdown_pending_task_count_--;
@@ -1578,71 +1562,71 @@
 
 bool SequencedWorkerPool::PostWorkerTask(
     const tracked_objects::Location& from_here,
-    OnceClosure task) {
-  return inner_->PostTask(NULL, SequenceToken(), BLOCK_SHUTDOWN, from_here,
-                          std::move(task), TimeDelta());
+    const Closure& task) {
+  return inner_->PostTask(NULL, SequenceToken(), BLOCK_SHUTDOWN,
+                          from_here, task, TimeDelta());
 }
 
 bool SequencedWorkerPool::PostDelayedWorkerTask(
     const tracked_objects::Location& from_here,
-    OnceClosure task,
+    const Closure& task,
     TimeDelta delay) {
   WorkerShutdown shutdown_behavior =
       delay.is_zero() ? BLOCK_SHUTDOWN : SKIP_ON_SHUTDOWN;
-  return inner_->PostTask(NULL, SequenceToken(), shutdown_behavior, from_here,
-                          std::move(task), delay);
+  return inner_->PostTask(NULL, SequenceToken(), shutdown_behavior,
+                          from_here, task, delay);
 }
 
 bool SequencedWorkerPool::PostWorkerTaskWithShutdownBehavior(
     const tracked_objects::Location& from_here,
-    OnceClosure task,
+    const Closure& task,
     WorkerShutdown shutdown_behavior) {
-  return inner_->PostTask(NULL, SequenceToken(), shutdown_behavior, from_here,
-                          std::move(task), TimeDelta());
+  return inner_->PostTask(NULL, SequenceToken(), shutdown_behavior,
+                          from_here, task, TimeDelta());
 }
 
 bool SequencedWorkerPool::PostSequencedWorkerTask(
     SequenceToken sequence_token,
     const tracked_objects::Location& from_here,
-    OnceClosure task) {
-  return inner_->PostTask(NULL, sequence_token, BLOCK_SHUTDOWN, from_here,
-                          std::move(task), TimeDelta());
+    const Closure& task) {
+  return inner_->PostTask(NULL, sequence_token, BLOCK_SHUTDOWN,
+                          from_here, task, TimeDelta());
 }
 
 bool SequencedWorkerPool::PostDelayedSequencedWorkerTask(
     SequenceToken sequence_token,
     const tracked_objects::Location& from_here,
-    OnceClosure task,
+    const Closure& task,
     TimeDelta delay) {
   WorkerShutdown shutdown_behavior =
       delay.is_zero() ? BLOCK_SHUTDOWN : SKIP_ON_SHUTDOWN;
-  return inner_->PostTask(NULL, sequence_token, shutdown_behavior, from_here,
-                          std::move(task), delay);
+  return inner_->PostTask(NULL, sequence_token, shutdown_behavior,
+                          from_here, task, delay);
 }
 
 bool SequencedWorkerPool::PostNamedSequencedWorkerTask(
     const std::string& token_name,
     const tracked_objects::Location& from_here,
-    OnceClosure task) {
+    const Closure& task) {
   DCHECK(!token_name.empty());
   return inner_->PostTask(&token_name, SequenceToken(), BLOCK_SHUTDOWN,
-                          from_here, std::move(task), TimeDelta());
+                          from_here, task, TimeDelta());
 }
 
 bool SequencedWorkerPool::PostSequencedWorkerTaskWithShutdownBehavior(
     SequenceToken sequence_token,
     const tracked_objects::Location& from_here,
-    OnceClosure task,
+    const Closure& task,
     WorkerShutdown shutdown_behavior) {
-  return inner_->PostTask(NULL, sequence_token, shutdown_behavior, from_here,
-                          std::move(task), TimeDelta());
+  return inner_->PostTask(NULL, sequence_token, shutdown_behavior,
+                          from_here, task, TimeDelta());
 }
 
 bool SequencedWorkerPool::PostDelayedTask(
     const tracked_objects::Location& from_here,
-    OnceClosure task,
+    const Closure& task,
     TimeDelta delay) {
-  return PostDelayedWorkerTask(from_here, std::move(task), delay);
+  return PostDelayedWorkerTask(from_here, task, delay);
 }
 
 bool SequencedWorkerPool::RunsTasksOnCurrentThread() const {
diff --git a/base/threading/sequenced_worker_pool.h b/base/threading/sequenced_worker_pool.h
index e577e1b..0d42de9 100644
--- a/base/threading/sequenced_worker_pool.h
+++ b/base/threading/sequenced_worker_pool.h
@@ -12,7 +12,7 @@
 #include <string>
 
 #include "base/base_export.h"
-#include "base/callback.h"
+#include "base/callback_forward.h"
 #include "base/compiler_specific.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
@@ -276,7 +276,7 @@
   // Returns true if the task was posted successfully. This may fail during
   // shutdown regardless of the specified ShutdownBehavior.
   bool PostWorkerTask(const tracked_objects::Location& from_here,
-                      OnceClosure task);
+                      const Closure& task);
 
   // Same as PostWorkerTask but allows a delay to be specified (although doing
   // so changes the shutdown behavior). The task will be run after the given
@@ -288,13 +288,13 @@
   // task will be guaranteed to run to completion before shutdown
   // (BLOCK_SHUTDOWN semantics).
   bool PostDelayedWorkerTask(const tracked_objects::Location& from_here,
-                             OnceClosure task,
+                             const Closure& task,
                              TimeDelta delay);
 
   // Same as PostWorkerTask but allows specification of the shutdown behavior.
   bool PostWorkerTaskWithShutdownBehavior(
       const tracked_objects::Location& from_here,
-      OnceClosure task,
+      const Closure& task,
       WorkerShutdown shutdown_behavior);
 
   // Like PostWorkerTask above, but provides sequencing semantics. This means
@@ -310,13 +310,13 @@
   // shutdown regardless of the specified ShutdownBehavior.
   bool PostSequencedWorkerTask(SequenceToken sequence_token,
                                const tracked_objects::Location& from_here,
-                               OnceClosure task);
+                               const Closure& task);
 
   // Like PostSequencedWorkerTask above, but allows you to specify a named
   // token, which saves an extra call to GetNamedSequenceToken.
   bool PostNamedSequencedWorkerTask(const std::string& token_name,
                                     const tracked_objects::Location& from_here,
-                                    OnceClosure task);
+                                    const Closure& task);
 
   // Same as PostSequencedWorkerTask but allows a delay to be specified
   // (although doing so changes the shutdown behavior). The task will be run
@@ -330,7 +330,7 @@
   bool PostDelayedSequencedWorkerTask(
       SequenceToken sequence_token,
       const tracked_objects::Location& from_here,
-      OnceClosure task,
+      const Closure& task,
       TimeDelta delay);
 
   // Same as PostSequencedWorkerTask but allows specification of the shutdown
@@ -338,12 +338,12 @@
   bool PostSequencedWorkerTaskWithShutdownBehavior(
       SequenceToken sequence_token,
       const tracked_objects::Location& from_here,
-      OnceClosure task,
+      const Closure& task,
       WorkerShutdown shutdown_behavior);
 
   // TaskRunner implementation. Forwards to PostDelayedWorkerTask().
   bool PostDelayedTask(const tracked_objects::Location& from_here,
-                       OnceClosure task,
+                       const Closure& task,
                        TimeDelta delay) override;
   bool RunsTasksOnCurrentThread() const override;
 
diff --git a/base/threading/worker_pool.cc b/base/threading/worker_pool.cc
index 26ff10f..d47037d 100644
--- a/base/threading/worker_pool.cc
+++ b/base/threading/worker_pool.cc
@@ -27,8 +27,8 @@
 
  private:
   bool PostTask(const tracked_objects::Location& from_here,
-                OnceClosure task) override {
-    return WorkerPool::PostTask(from_here, std::move(task), task_is_slow_);
+                const Closure& task) override {
+    return WorkerPool::PostTask(from_here, task, task_is_slow_);
   }
 
   bool task_is_slow_;
@@ -45,7 +45,7 @@
 
   // TaskRunner implementation
   bool PostDelayedTask(const tracked_objects::Location& from_here,
-                       OnceClosure task,
+                       const Closure& task,
                        TimeDelta delay) override;
   bool RunsTasksOnCurrentThread() const override;
 
@@ -56,7 +56,7 @@
   // zero because non-zero delays are not supported.
   bool PostDelayedTaskAssertZeroDelay(
       const tracked_objects::Location& from_here,
-      OnceClosure task,
+      const Closure& task,
       base::TimeDelta delay);
 
   const bool tasks_are_slow_;
@@ -73,9 +73,9 @@
 
 bool WorkerPoolTaskRunner::PostDelayedTask(
     const tracked_objects::Location& from_here,
-    OnceClosure task,
+    const Closure& task,
     TimeDelta delay) {
-  return PostDelayedTaskAssertZeroDelay(from_here, std::move(task), delay);
+  return PostDelayedTaskAssertZeroDelay(from_here, task, delay);
 }
 
 bool WorkerPoolTaskRunner::RunsTasksOnCurrentThread() const {
@@ -84,11 +84,11 @@
 
 bool WorkerPoolTaskRunner::PostDelayedTaskAssertZeroDelay(
     const tracked_objects::Location& from_here,
-    OnceClosure task,
+    const Closure& task,
     base::TimeDelta delay) {
   DCHECK_EQ(delay.InMillisecondsRoundedUp(), 0)
       << "WorkerPoolTaskRunner does not support non-zero delays";
-  return WorkerPool::PostTask(from_here, std::move(task), tasks_are_slow_);
+  return WorkerPool::PostTask(from_here, task, tasks_are_slow_);
 }
 
 struct TaskRunnerHolder {
@@ -102,8 +102,8 @@
 }  // namespace
 
 bool WorkerPool::PostTaskAndReply(const tracked_objects::Location& from_here,
-                                  OnceClosure task,
-                                  OnceClosure reply,
+                                  Closure task,
+                                  Closure reply,
                                   bool task_is_slow) {
   // Do not report PostTaskAndReplyRelay leaks in tests. There's nothing we can
   // do about them because WorkerPool doesn't have a flushing API.
diff --git a/base/threading/worker_pool.h b/base/threading/worker_pool.h
index d1c666d..865948e 100644
--- a/base/threading/worker_pool.h
+++ b/base/threading/worker_pool.h
@@ -32,15 +32,14 @@
   // false if |task| could not be posted to a worker thread.  Regardless of
   // return value, ownership of |task| is transferred to the worker pool.
   static bool PostTask(const tracked_objects::Location& from_here,
-                       OnceClosure task,
-                       bool task_is_slow);
+                       const base::Closure& task, bool task_is_slow);
 
   // Just like TaskRunner::PostTaskAndReply, except the destination
   // for |task| is a worker thread and you can specify |task_is_slow| just
   // like you can for PostTask above.
   static bool PostTaskAndReply(const tracked_objects::Location& from_here,
-                               OnceClosure task,
-                               OnceClosure reply,
+                               Closure task,
+                               Closure reply,
                                bool task_is_slow);
 
   // Return true if the current thread is one that this WorkerPool runs tasks
diff --git a/base/threading/worker_pool_posix.cc b/base/threading/worker_pool_posix.cc
index 5a5f288..0e19a1a 100644
--- a/base/threading/worker_pool_posix.cc
+++ b/base/threading/worker_pool_posix.cc
@@ -6,8 +6,6 @@
 
 #include <stddef.h>
 
-#include <utility>
-
 #include "base/bind.h"
 #include "base/callback.h"
 #include "base/lazy_instance.h"
@@ -49,7 +47,7 @@
   ~WorkerPoolImpl() = delete;
 
   void PostTask(const tracked_objects::Location& from_here,
-                base::OnceClosure task,
+                const base::Closure& task,
                 bool task_is_slow);
 
  private:
@@ -61,9 +59,9 @@
                                              kIdleSecondsBeforeExit)) {}
 
 void WorkerPoolImpl::PostTask(const tracked_objects::Location& from_here,
-                              base::OnceClosure task,
+                              const base::Closure& task,
                               bool /*task_is_slow*/) {
-  pool_->PostTask(from_here, std::move(task));
+  pool_->PostTask(from_here, task);
 }
 
 base::LazyInstance<WorkerPoolImpl>::Leaky g_lazy_worker_pool =
@@ -114,10 +112,9 @@
 
 // static
 bool WorkerPool::PostTask(const tracked_objects::Location& from_here,
-                          base::OnceClosure task,
+                          const base::Closure& task,
                           bool task_is_slow) {
-  g_lazy_worker_pool.Pointer()->PostTask(from_here, std::move(task),
-                                         task_is_slow);
+  g_lazy_worker_pool.Pointer()->PostTask(from_here, task, task_is_slow);
   return true;
 }
 
@@ -140,14 +137,12 @@
 
 void PosixDynamicThreadPool::PostTask(
     const tracked_objects::Location& from_here,
-    base::OnceClosure task) {
-  PendingTask pending_task(from_here, std::move(task));
+    const base::Closure& task) {
+  PendingTask pending_task(from_here, task);
   AddTask(&pending_task);
 }
 
 void PosixDynamicThreadPool::AddTask(PendingTask* pending_task) {
-  DCHECK(pending_task);
-  DCHECK(pending_task->task);
   AutoLock locked(lock_);
 
   pending_tasks_.push(std::move(*pending_task));
diff --git a/base/threading/worker_pool_posix.h b/base/threading/worker_pool_posix.h
index 0b10adf..d65ae8f 100644
--- a/base/threading/worker_pool_posix.h
+++ b/base/threading/worker_pool_posix.h
@@ -28,7 +28,7 @@
 #include <queue>
 #include <string>
 
-#include "base/callback.h"
+#include "base/callback_forward.h"
 #include "base/location.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
@@ -51,7 +51,8 @@
                          int idle_seconds_before_exit);
 
   // Adds |task| to the thread pool.
-  void PostTask(const tracked_objects::Location& from_here, OnceClosure task);
+  void PostTask(const tracked_objects::Location& from_here,
+                const Closure& task);
 
   // Worker thread method to wait for up to |idle_seconds_before_exit| for more
   // work from the thread pool.  Returns NULL if no work is available.
diff --git a/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc b/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
index 6317886..577f500 100644
--- a/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
+++ b/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
@@ -34,9 +34,7 @@
     "      \"excluded_categories\": [],"
     "      \"filter_args\": {},"
     "      \"filter_predicate\": \"heap_profiler_predicate\","
-    "      \"included_categories\": ["
-    "        \"*\","
-    "        \"" TRACE_DISABLED_BY_DEFAULT("Testing") "\"]"
+    "      \"included_categories\": [\"*\"]"
     "    }"
     "  ]"
     "}";
@@ -124,7 +122,6 @@
     }
 
     {
-      TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("NotTesting"), kDonut);
       TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("Testing"), kCupcake);
       StackFrame frame_cc[] = {t, c, c};
       AssertBacktraceEquals(frame_cc);
diff --git a/base/trace_event/heap_profiler_allocation_register.cc b/base/trace_event/heap_profiler_allocation_register.cc
index b9f440a..63d4061 100644
--- a/base/trace_event/heap_profiler_allocation_register.cc
+++ b/base/trace_event/heap_profiler_allocation_register.cc
@@ -5,7 +5,6 @@
 #include "base/trace_event/heap_profiler_allocation_register.h"
 
 #include <algorithm>
-#include <limits>
 
 #include "base/trace_event/trace_event_memory_overhead.h"
 
@@ -13,9 +12,9 @@
 namespace trace_event {
 
 AllocationRegister::ConstIterator::ConstIterator(
-    const AllocationRegister& alloc_register,
-    AllocationIndex index)
-    : register_(alloc_register), index_(index) {}
+    const AllocationRegister& alloc_register, AllocationIndex index)
+    : register_(alloc_register),
+      index_(index) {}
 
 void AllocationRegister::ConstIterator::operator++() {
   index_ = register_.allocations_.Next(index_ + 1);
@@ -26,12 +25,12 @@
   return index_ != other.index_;
 }
 
-AllocationRegister::Allocation AllocationRegister::ConstIterator::operator*()
-    const {
+AllocationRegister::Allocation
+AllocationRegister::ConstIterator::operator*() const {
   return register_.GetAllocation(index_);
 }
 
-size_t AllocationRegister::BacktraceHasher::operator()(
+size_t AllocationRegister::BacktraceHasher::operator () (
     const Backtrace& backtrace) const {
   const size_t kSampleLength = 10;
 
@@ -43,7 +42,7 @@
   }
 
   size_t tail_start = backtrace.frame_count -
-                      std::min(backtrace.frame_count - head_end, kSampleLength);
+      std::min(backtrace.frame_count - head_end, kSampleLength);
   for (size_t i = tail_start; i != backtrace.frame_count; ++i) {
     total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value);
   }
@@ -56,7 +55,7 @@
   return (total_value * 131101) >> 14;
 }
 
-size_t AllocationRegister::AddressHasher::operator()(
+size_t AllocationRegister::AddressHasher::operator () (
     const void* address) const {
   // The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has
   // been chosen carefully based on measurements with real-word data (addresses
@@ -76,48 +75,34 @@
 
 AllocationRegister::AllocationRegister(size_t allocation_capacity,
                                        size_t backtrace_capacity)
-    : allocations_(allocation_capacity), backtraces_(backtrace_capacity) {
-  Backtrace sentinel = {};
-  sentinel.frames[0] = StackFrame::FromThreadName("[out of heap profiler mem]");
-  sentinel.frame_count = 1;
+    : allocations_(allocation_capacity),
+      backtraces_(backtrace_capacity) {}
 
-  // Rationale for max / 2: in theory we could just start the sentinel with a
-  // refcount == 0. However, using max / 2 allows short circuiting of the
-  // conditional in RemoveBacktrace() keeping the sentinel logic out of the fast
-  // path. From a functional viewpoint, the sentinel is safe even if we wrap
-  // over refcount because .
-  BacktraceMap::KVPair::second_type sentinel_refcount =
-      std::numeric_limits<BacktraceMap::KVPair::second_type>::max() / 2;
-  auto index_and_flag = backtraces_.Insert(sentinel, sentinel_refcount);
-  DCHECK(index_and_flag.second);
-  DCHECK_EQ(index_and_flag.first, kOutOfStorageBacktraceIndex);
+AllocationRegister::~AllocationRegister() {
 }
 
-AllocationRegister::~AllocationRegister() {}
-
-bool AllocationRegister::Insert(const void* address,
+void AllocationRegister::Insert(const void* address,
                                 size_t size,
                                 const AllocationContext& context) {
   DCHECK(address != nullptr);
   if (size == 0) {
-    return false;
+    return;
   }
 
-  AllocationInfo info = {size, context.type_name,
-                         InsertBacktrace(context.backtrace)};
+  AllocationInfo info = {
+      size,
+      context.type_name,
+      InsertBacktrace(context.backtrace)
+  };
 
   // Try to insert the allocation.
   auto index_and_flag = allocations_.Insert(address, info);
-  if (!index_and_flag.second &&
-      index_and_flag.first != AllocationMap::kInvalidKVIndex) {
+  if (!index_and_flag.second) {
     // |address| is already there - overwrite the allocation info.
     auto& old_info = allocations_.Get(index_and_flag.first).second;
     RemoveBacktrace(old_info.backtrace_index);
     old_info = info;
-    return true;
   }
-
-  return index_and_flag.second;
 }
 
 void AllocationRegister::Remove(const void* address) {
@@ -155,17 +140,15 @@
 void AllocationRegister::EstimateTraceMemoryOverhead(
     TraceEventMemoryOverhead* overhead) const {
   size_t allocated = sizeof(AllocationRegister);
-  size_t resident = sizeof(AllocationRegister) +
-                    allocations_.EstimateUsedMemory() +
-                    backtraces_.EstimateUsedMemory();
+  size_t resident = sizeof(AllocationRegister)
+                    + allocations_.EstimateUsedMemory()
+                    + backtraces_.EstimateUsedMemory();
   overhead->Add("AllocationRegister", allocated, resident);
 }
 
 AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace(
     const Backtrace& backtrace) {
   auto index = backtraces_.Insert(backtrace, 0).first;
-  if (index == BacktraceMap::kInvalidKVIndex)
-    return kOutOfStorageBacktraceIndex;
   auto& backtrace_and_count = backtraces_.Get(index);
   backtrace_and_count.second++;
   return index;
@@ -173,8 +156,7 @@
 
 void AllocationRegister::RemoveBacktrace(BacktraceMap::KVIndex index) {
   auto& backtrace_and_count = backtraces_.Get(index);
-  if (--backtrace_and_count.second == 0 &&
-      index != kOutOfStorageBacktraceIndex) {
+  if (--backtrace_and_count.second == 0) {
     // Backtrace is not referenced anymore - remove it.
     backtraces_.Remove(index);
   }
@@ -183,11 +165,15 @@
 AllocationRegister::Allocation AllocationRegister::GetAllocation(
     AllocationMap::KVIndex index) const {
   const auto& address_and_info = allocations_.Get(index);
-  const auto& backtrace_and_count =
-      backtraces_.Get(address_and_info.second.backtrace_index);
-  return {address_and_info.first, address_and_info.second.size,
-          AllocationContext(backtrace_and_count.first,
-                            address_and_info.second.type_name)};
+  const auto& backtrace_and_count = backtraces_.Get(
+      address_and_info.second.backtrace_index);
+  return {
+      address_and_info.first,
+      address_and_info.second.size,
+      AllocationContext(
+          backtrace_and_count.first,
+          address_and_info.second.type_name)
+  };
 }
 
 }  // namespace trace_event
diff --git a/base/trace_event/heap_profiler_allocation_register.h b/base/trace_event/heap_profiler_allocation_register.h
index ac9872f..d6a02fa 100644
--- a/base/trace_event/heap_profiler_allocation_register.h
+++ b/base/trace_event/heap_profiler_allocation_register.h
@@ -48,26 +48,24 @@
   // For implementation simplicity API uses integer index instead
   // of iterators. Most operations (except Find) on KVIndex are O(1).
   using KVIndex = size_t;
-  enum : KVIndex { kInvalidKVIndex = static_cast<KVIndex>(-1) };
+  static const KVIndex kInvalidKVIndex = static_cast<KVIndex>(-1);
 
   // Capacity controls how many items this hash map can hold, and largely
   // affects memory footprint.
-  explicit FixedHashMap(size_t capacity)
-      : num_cells_(capacity),
-        num_inserts_dropped_(0),
-        cells_(static_cast<Cell*>(
-            AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))),
-        buckets_(static_cast<Bucket*>(
-            AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))),
-        free_list_(nullptr),
-        next_unused_cell_(0) {}
+  FixedHashMap(size_t capacity)
+    : num_cells_(capacity),
+      cells_(static_cast<Cell*>(
+          AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))),
+      buckets_(static_cast<Bucket*>(
+          AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))),
+      free_list_(nullptr),
+      next_unused_cell_(0) {}
 
   ~FixedHashMap() {
     FreeGuardedVirtualMemory(cells_, num_cells_ * sizeof(Cell));
     FreeGuardedVirtualMemory(buckets_, NumBuckets * sizeof(Bucket));
   }
 
-  // Returns {kInvalidKVIndex, false} if the table is full.
   std::pair<KVIndex, bool> Insert(const Key& key, const Value& value) {
     Cell** p_cell = Lookup(key);
     Cell* cell = *p_cell;
@@ -76,15 +74,7 @@
     }
 
     // Get a free cell and link it.
-    cell = GetFreeCell();
-    if (!cell) {
-      if (num_inserts_dropped_ <
-          std::numeric_limits<decltype(num_inserts_dropped_)>::max()) {
-        ++num_inserts_dropped_;
-      }
-      return {kInvalidKVIndex, false};
-    }
-    *p_cell = cell;
+    *p_cell = cell = GetFreeCell();
     cell->p_prev = p_cell;
     cell->next = nullptr;
 
@@ -147,8 +137,6 @@
            bits::Align(sizeof(Bucket) * NumBuckets, page_size);
   }
 
-  size_t num_inserts_dropped() const { return num_inserts_dropped_; }
-
  private:
   friend base::trace_event::AllocationRegisterTest;
 
@@ -187,8 +175,7 @@
   }
 
   // Returns a cell that is not being used to store an entry (either by
-  // recycling from the free list or by taking a fresh cell). May return
-  // nullptr if the hash table has run out of memory.
+  // recycling from the free list or by taking a fresh cell).
   Cell* GetFreeCell() {
     // First try to re-use a cell from the free list.
     if (free_list_) {
@@ -197,14 +184,26 @@
       return cell;
     }
 
-    // If the hash table has too little capacity (when too little address space
-    // was reserved for |cells_|), return nullptr.
-    if (next_unused_cell_ >= num_cells_) {
-      return nullptr;
-    }
-
     // Otherwise pick the next cell that has not been touched before.
-    return &cells_[next_unused_cell_++];
+    size_t idx = next_unused_cell_;
+    next_unused_cell_++;
+
+    // If the hash table has too little capacity (when too little address space
+    // was reserved for |cells_|), |next_unused_cell_| can be an index outside
+    // of the allocated storage. A guard page is allocated there to crash the
+    // program in that case. There are alternative solutions:
+    // - Deal with it, increase capacity by reallocating |cells_|.
+    // - Refuse to insert and let the caller deal with it.
+    // Because free cells are re-used before accessing fresh cells with a higher
+    // index, and because reserving address space without touching it is cheap,
+    // the simplest solution is to just allocate a humongous chunk of address
+    // space.
+
+    CHECK_LT(next_unused_cell_, num_cells_ + 1)
+        << "Allocation Register hash table has too little capacity. Increase "
+           "the capacity to run heap profiler in large sessions.";
+
+    return &cells_[idx];
   }
 
   // Returns a value in the range [0, NumBuckets - 1] (inclusive).
@@ -220,9 +219,6 @@
   // Number of cells.
   size_t const num_cells_;
 
-  // Number of calls to Insert() that were lost because the hashtable was full.
-  size_t num_inserts_dropped_;
-
   // The array of cells. This array is backed by mmapped memory. Lower indices
   // are accessed first, higher indices are accessed only when the |free_list_|
   // is empty. This is to minimize the amount of resident memory used.
@@ -252,8 +248,6 @@
 // freed. Internally it has two hashtables: one for Backtraces and one for
 // actual allocations. Sizes of both hashtables are fixed, and this class
 // allocates (mmaps) only in its constructor.
-//
-// When either hash table hits max size, new inserts are dropped.
 class BASE_EXPORT AllocationRegister {
  public:
   // Details about an allocation.
@@ -288,10 +282,7 @@
 
   // Inserts allocation details into the table. If the address was present
   // already, its details are updated. |address| must not be null.
-  //
-  // Returns true if an insert occurred. Inserts may fail because the table
-  // is full.
-  bool Insert(const void* address,
+  void Insert(const void* address,
               size_t size,
               const AllocationContext& context);
 
@@ -368,14 +359,6 @@
   AllocationMap allocations_;
   BacktraceMap backtraces_;
 
-  // Sentinel used when the |backtraces_| table is full.
-  //
-  // This is a slightly abstraction to allow for constant propagation. It
-  // knows that the sentinel will be the first item inserted into the table
-  // and that the first index retuned will be 0. The constructor DCHECKs
-  // this assumption.
-  enum : BacktraceMap::KVIndex { kOutOfStorageBacktraceIndex = 0 };
-
   DISALLOW_COPY_AND_ASSIGN(AllocationRegister);
 };
 
diff --git a/base/trace_event/malloc_dump_provider.cc b/base/trace_event/malloc_dump_provider.cc
index d78de9b..5f5a80a 100644
--- a/base/trace_event/malloc_dump_provider.cc
+++ b/base/trace_event/malloc_dump_provider.cc
@@ -54,10 +54,10 @@
   return ptr;
 }
 
-void* HookAllocAligned(const AllocatorDispatch* self,
-                       size_t alignment,
-                       size_t size,
-                       void* context) {
+void* HookllocAligned(const AllocatorDispatch* self,
+                      size_t alignment,
+                      size_t size,
+                      void* context) {
   const AllocatorDispatch* const next = self->next;
   void* ptr = next->alloc_aligned_function(next, alignment, size, context);
   if (ptr)
@@ -129,7 +129,7 @@
 AllocatorDispatch g_allocator_hooks = {
     &HookAlloc,            /* alloc_function */
     &HookZeroInitAlloc,    /* alloc_zero_initialized_function */
-    &HookAllocAligned,     /* alloc_aligned_function */
+    &HookllocAligned,      /* alloc_aligned_function */
     &HookRealloc,          /* realloc_function */
     &HookFree,             /* free_function */
     &HookGetSizeEstimate,  /* get_size_estimate_function */
diff --git a/base/trace_event/memory_allocator_dump.cc b/base/trace_event/memory_allocator_dump.cc
index 2692521..7583763 100644
--- a/base/trace_event/memory_allocator_dump.cc
+++ b/base/trace_event/memory_allocator_dump.cc
@@ -29,8 +29,7 @@
       process_memory_dump_(process_memory_dump),
       attributes_(new TracedValue),
       guid_(guid),
-      flags_(Flags::DEFAULT),
-      size_(0) {
+      flags_(Flags::DEFAULT) {
   // The |absolute_name| cannot be empty.
   DCHECK(!absolute_name.empty());
 
@@ -60,8 +59,6 @@
 void MemoryAllocatorDump::AddScalar(const char* name,
                                     const char* units,
                                     uint64_t value) {
-  if (strcmp(kNameSize, name) == 0)
-    size_ = value;
   SStringPrintf(&string_conversion_buffer_, "%" PRIx64, value);
   attributes_->BeginDictionary(name);
   attributes_->SetString("type", kTypeScalar);
diff --git a/base/trace_event/memory_allocator_dump.h b/base/trace_event/memory_allocator_dump.h
index 99ff114..c781f07 100644
--- a/base/trace_event/memory_allocator_dump.h
+++ b/base/trace_event/memory_allocator_dump.h
@@ -11,7 +11,6 @@
 #include <string>
 
 #include "base/base_export.h"
-#include "base/gtest_prod_util.h"
 #include "base/logging.h"
 #include "base/macros.h"
 #include "base/trace_event/memory_allocator_dump_guid.h"
@@ -86,21 +85,11 @@
   TracedValue* attributes_for_testing() const { return attributes_.get(); }
 
  private:
-  // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
-  friend class MemoryDumpManager;
-  FRIEND_TEST_ALL_PREFIXES(MemoryAllocatorDumpTest, GetSize);
-
-  // Get the size for this dump.
-  // The size is the value set with AddScalar(kNameSize, kUnitsBytes, size);
-  // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
-  uint64_t GetSize() const { return size_; };
-
   const std::string absolute_name_;
   ProcessMemoryDump* const process_memory_dump_;  // Not owned (PMD owns this).
   std::unique_ptr<TracedValue> attributes_;
   MemoryAllocatorDumpGuid guid_;
   int flags_;  // See enum Flags.
-  uint64_t size_;
 
   // A local buffer for Sprintf conversion on fastpath. Avoids allocating
   // temporary strings on each AddScalar() call.
diff --git a/base/trace_event/memory_allocator_dump_unittest.cc b/base/trace_event/memory_allocator_dump_unittest.cc
index e1818f6..1bf9715 100644
--- a/base/trace_event/memory_allocator_dump_unittest.cc
+++ b/base/trace_event/memory_allocator_dump_unittest.cc
@@ -172,16 +172,6 @@
   pmd.AsValueInto(traced_value.get());
 }
 
-TEST(MemoryAllocatorDumpTest, GetSize) {
-  MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
-  ProcessMemoryDump pmd(new MemoryDumpSessionState, dump_args);
-  MemoryAllocatorDump* dump = pmd.CreateAllocatorDump("allocator_for_size");
-  dump->AddScalar(MemoryAllocatorDump::kNameSize,
-                  MemoryAllocatorDump::kUnitsBytes, 1);
-  dump->AddScalar("foo", MemoryAllocatorDump::kUnitsBytes, 2);
-  EXPECT_EQ(1u, dump->GetSize());
-}
-
 // DEATH tests are not supported in Android / iOS.
 #if !defined(NDEBUG) && !defined(OS_ANDROID) && !defined(OS_IOS)
 TEST(MemoryAllocatorDumpTest, ForbidDuplicatesDeathTest) {
diff --git a/base/trace_event/memory_dump_manager.cc b/base/trace_event/memory_dump_manager.cc
index 6ed1ca8..5a54a77 100644
--- a/base/trace_event/memory_dump_manager.cc
+++ b/base/trace_event/memory_dump_manager.cc
@@ -4,9 +4,6 @@
 
 #include "base/trace_event/memory_dump_manager.h"
 
-#include <inttypes.h>
-#include <stdio.h>
-
 #include <algorithm>
 #include <utility>
 
@@ -20,8 +17,6 @@
 #include "base/debug/stack_trace.h"
 #include "base/debug/thread_heap_usage_tracker.h"
 #include "base/memory/ptr_util.h"
-#include "base/strings/pattern.h"
-#include "base/strings/string_piece.h"
 #include "base/threading/thread.h"
 #include "base/threading/thread_task_runner_handle.h"
 #include "base/trace_event/heap_profiler.h"
@@ -85,12 +80,9 @@
 void OnGlobalDumpDone(MemoryDumpCallback wrapped_callback,
                       uint64_t dump_guid,
                       bool success) {
-  char guid_str[20];
-  sprintf(guid_str, "0x%" PRIx64, dump_guid);
-  TRACE_EVENT_NESTABLE_ASYNC_END2(MemoryDumpManager::kTraceCategory,
-                                  "GlobalMemoryDump", TRACE_ID_LOCAL(dump_guid),
-                                  "dump_guid", TRACE_STR_COPY(guid_str),
-                                  "success", success);
+  TRACE_EVENT_NESTABLE_ASYNC_END1(
+      MemoryDumpManager::kTraceCategory, "GlobalMemoryDump",
+      TRACE_ID_MANGLE(dump_guid), "success", success);
 
   if (!wrapped_callback.is_null()) {
     wrapped_callback.Run(dump_guid, success);
@@ -163,7 +155,9 @@
 }
 
 MemoryDumpManager::MemoryDumpManager()
-    : memory_tracing_enabled_(0),
+    : delegate_(nullptr),
+      is_coordinator_(false),
+      memory_tracing_enabled_(0),
       tracing_process_id_(kInvalidTracingProcessId),
       dumper_registrations_ignored_for_testing_(false),
       heap_profiling_enabled_(false) {
@@ -220,13 +214,14 @@
   heap_profiling_enabled_ = true;
 }
 
-void MemoryDumpManager::Initialize(
-    std::unique_ptr<MemoryDumpManagerDelegate> delegate) {
+void MemoryDumpManager::Initialize(MemoryDumpManagerDelegate* delegate,
+                                   bool is_coordinator) {
   {
     AutoLock lock(lock_);
     DCHECK(delegate);
     DCHECK(!delegate_);
-    delegate_ = std::move(delegate);
+    delegate_ = delegate;
+    is_coordinator_ = is_coordinator;
     EnableHeapProfilingIfNeeded();
   }
 
@@ -248,19 +243,11 @@
           AllocationContextTracker::CaptureMode::PSEUDO_STACK &&
       !(TraceLog::GetInstance()->enabled_modes() & TraceLog::FILTERING_MODE)) {
     // Create trace config with heap profiling filter.
-    std::string filter_string = "*";
-    const char* const kFilteredCategories[] = {
-        TRACE_DISABLED_BY_DEFAULT("net"), TRACE_DISABLED_BY_DEFAULT("cc"),
-        MemoryDumpManager::kTraceCategory};
-    for (const char* cat : kFilteredCategories)
-      filter_string = filter_string + "," + cat;
-    TraceConfigCategoryFilter category_filter;
-    category_filter.InitializeFromString(filter_string);
-
     TraceConfig::EventFilterConfig heap_profiler_filter_config(
         HeapProfilerEventFilter::kName);
-    heap_profiler_filter_config.SetCategoryFilter(category_filter);
-
+    heap_profiler_filter_config.AddIncludedCategory("*");
+    heap_profiler_filter_config.AddIncludedCategory(
+        MemoryDumpManager::kTraceCategory);
     TraceConfig::EventFilters filters;
     filters.push_back(heap_profiler_filter_config);
     TraceConfig filtering_trace_config;
@@ -426,7 +413,7 @@
 }
 
 void MemoryDumpManager::RegisterPollingMDPOnDumpThread(
-    scoped_refptr<MemoryDumpProviderInfo> mdpinfo) {
+    scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
   AutoLock lock(lock_);
   dump_providers_for_polling_.insert(mdpinfo);
 
@@ -434,11 +421,11 @@
   // registered. This handles the case where OnTraceLogEnabled() did not notify
   // ready since no polling supported mdp has yet been registered.
   if (dump_providers_for_polling_.size() == 1)
-    MemoryDumpScheduler::GetInstance()->EnablePollingIfNeeded();
+    dump_scheduler_->NotifyPollingSupported();
 }
 
 void MemoryDumpManager::UnregisterPollingMDPOnDumpThread(
-    scoped_refptr<MemoryDumpProviderInfo> mdpinfo) {
+    scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
   mdpinfo->dump_provider->SuspendFastMemoryPolling();
 
   AutoLock lock(lock_);
@@ -469,16 +456,25 @@
   // Creates an async event to keep track of the global dump evolution.
   // The |wrapped_callback| will generate the ASYNC_END event and then invoke
   // the real |callback| provided by the caller.
-  TRACE_EVENT_NESTABLE_ASYNC_BEGIN2(
-      kTraceCategory, "GlobalMemoryDump", TRACE_ID_LOCAL(guid), "dump_type",
-      MemoryDumpTypeToString(dump_type), "level_of_detail",
-      MemoryDumpLevelOfDetailToString(level_of_detail));
+  TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "GlobalMemoryDump",
+                                    TRACE_ID_MANGLE(guid));
   MemoryDumpCallback wrapped_callback = Bind(&OnGlobalDumpDone, callback);
 
+  // Technically there is no need to grab the |lock_| here as the delegate is
+  // long-lived and can only be set by Initialize(), which is locked and
+  // necessarily happens before memory_tracing_enabled_ == true.
+  // Not taking the |lock_|, though, is lakely make TSan barf and, at this point
+  // (memory-infra is enabled) we're not in the fast-path anymore.
+  MemoryDumpManagerDelegate* delegate;
+  {
+    AutoLock lock(lock_);
+    delegate = delegate_;
+  }
+
   // The delegate will coordinate the IPC broadcast and at some point invoke
   // CreateProcessDump() to get a dump for the current process.
   MemoryDumpRequestArgs args = {guid, dump_type, level_of_detail};
-  delegate_->RequestGlobalMemoryDump(args, wrapped_callback);
+  delegate->RequestGlobalMemoryDump(args, wrapped_callback);
 }
 
 void MemoryDumpManager::RequestGlobalDump(
@@ -487,24 +483,10 @@
   RequestGlobalDump(dump_type, level_of_detail, MemoryDumpCallback());
 }
 
-bool MemoryDumpManager::IsDumpProviderRegisteredForTesting(
-    MemoryDumpProvider* provider) {
-  AutoLock lock(lock_);
-
-  for (const auto& info : dump_providers_) {
-    if (info->dump_provider == provider)
-      return true;
-  }
-  return false;
-}
-
 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
                                           const MemoryDumpCallback& callback) {
-  char guid_str[20];
-  sprintf(guid_str, "0x%" PRIx64, args.dump_guid);
-  TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(kTraceCategory, "ProcessMemoryDump",
-                                    TRACE_ID_LOCAL(args.dump_guid), "dump_guid",
-                                    TRACE_STR_COPY(guid_str));
+  TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump",
+                                    TRACE_ID_MANGLE(args.dump_guid));
 
   // If argument filter is enabled then only background mode dumps should be
   // allowed. In case the trace config passed for background tracing session
@@ -533,9 +515,14 @@
     CHECK(!session_state_ ||
           session_state_->IsDumpModeAllowed(args.level_of_detail));
 
-    MemoryDumpScheduler::GetInstance()->NotifyDumpTriggered();
+    if (dump_scheduler_)
+      dump_scheduler_->NotifyDumpTriggered();
   }
 
+  TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump",
+                         TRACE_ID_MANGLE(args.dump_guid),
+                         TRACE_EVENT_FLAG_FLOW_OUT);
+
   // Start the process dump. This involves task runner hops as specified by the
   // MemoryDumpProvider(s) in RegisterDumpProvider()).
   SetupNextMemoryDump(std::move(pmd_async_state));
@@ -679,8 +666,11 @@
 
   if (should_dump) {
     // Invoke the dump provider.
-    TRACE_EVENT1(kTraceCategory, "MemoryDumpManager::InvokeOnMemoryDump",
-                 "dump_provider.name", mdpinfo->name);
+    TRACE_EVENT_WITH_FLOW1(kTraceCategory,
+                           "MemoryDumpManager::InvokeOnMemoryDump",
+                           TRACE_ID_MANGLE(pmd_async_state->req_args.dump_guid),
+                           TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
+                           "dump_provider.name", mdpinfo->name);
 
     // A stack allocated string with dump provider name is useful to debug
     // crashes while invoking dump after a |dump_provider| is not unregistered
@@ -732,18 +722,6 @@
 }
 
 // static
-uint32_t MemoryDumpManager::GetDumpsSumKb(const std::string& pattern,
-                                          const ProcessMemoryDump* pmd) {
-  uint64_t sum = 0;
-  for (const auto& kv : pmd->allocator_dumps()) {
-    auto name = StringPiece(kv.first);
-    if (MatchPattern(name, pattern))
-      sum += kv.second->GetSize();
-  }
-  return sum / 1024;
-}
-
-// static
 void MemoryDumpManager::FinalizeDumpAndAddToTrace(
     std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
   HEAP_PROFILER_SCOPED_IGNORE;
@@ -758,11 +736,9 @@
     return;
   }
 
-  TRACE_EVENT0(kTraceCategory, "MemoryDumpManager::FinalizeDumpAndAddToTrace");
-
-  // The results struct to fill.
-  // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
-  MemoryDumpCallbackResult result;
+  TRACE_EVENT_WITH_FLOW0(kTraceCategory,
+                         "MemoryDumpManager::FinalizeDumpAndAddToTrace",
+                         TRACE_ID_MANGLE(dump_guid), TRACE_EVENT_FLAG_FLOW_IN);
 
   for (const auto& kv : pmd_async_state->process_dumps) {
     ProcessId pid = kv.first;  // kNullProcessId for the current process.
@@ -784,30 +760,6 @@
         kTraceEventNumArgs, kTraceEventArgNames,
         kTraceEventArgTypes, nullptr /* arg_values */, &event_value,
         TRACE_EVENT_FLAG_HAS_ID);
-
-    // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
-    // Don't try to fill the struct in detailed mode since it is hard to avoid
-    // double counting.
-    if (pmd_async_state->req_args.level_of_detail ==
-        MemoryDumpLevelOfDetail::DETAILED)
-      continue;
-
-    // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
-    if (pid == kNullProcessId) {
-      result.chrome_dump.malloc_total_kb =
-          GetDumpsSumKb("malloc", process_memory_dump);
-      result.chrome_dump.v8_total_kb =
-          GetDumpsSumKb("v8/*", process_memory_dump);
-
-      // partition_alloc reports sizes for both allocated_objects and
-      // partitions. The memory allocated_objects uses is a subset of
-      // the partitions memory so to avoid double counting we only
-      // count partitions memory.
-      result.chrome_dump.partition_alloc_total_kb =
-          GetDumpsSumKb("partition_alloc/partitions/*", process_memory_dump);
-      result.chrome_dump.blink_gc_total_kb =
-          GetDumpsSumKb("blink_gc", process_memory_dump);
-    }
   }
 
   bool tracing_still_enabled;
@@ -824,7 +776,7 @@
   }
 
   TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump",
-                                  TRACE_ID_LOCAL(dump_guid));
+                                  TRACE_ID_MANGLE(dump_guid));
 }
 
 void MemoryDumpManager::OnTraceLogEnabled() {
@@ -877,6 +829,18 @@
             session_state, &MemoryDumpSessionState::type_name_deduplicator));
   }
 
+  std::unique_ptr<MemoryDumpScheduler> dump_scheduler(
+      new MemoryDumpScheduler(this, dump_thread->task_runner()));
+  DCHECK_LE(memory_dump_config.triggers.size(), 3u);
+  for (const auto& trigger : memory_dump_config.triggers) {
+    if (!session_state->IsDumpModeAllowed(trigger.level_of_detail)) {
+      NOTREACHED();
+      continue;
+    }
+    dump_scheduler->AddTrigger(trigger.trigger_type, trigger.level_of_detail,
+                               trigger.min_time_between_dumps_ms);
+  }
+
   {
     AutoLock lock(lock_);
 
@@ -885,6 +849,7 @@
 
     DCHECK(!dump_thread_);
     dump_thread_ = std::move(dump_thread);
+    dump_scheduler_ = std::move(dump_scheduler);
 
     subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
 
@@ -893,28 +858,15 @@
       if (mdpinfo->options.is_fast_polling_supported)
         dump_providers_for_polling_.insert(mdpinfo);
     }
-
-    MemoryDumpScheduler* dump_scheduler = MemoryDumpScheduler::GetInstance();
-    dump_scheduler->Setup(this, dump_thread_->task_runner());
-    DCHECK_LE(memory_dump_config.triggers.size(), 3u);
-    for (const auto& trigger : memory_dump_config.triggers) {
-      if (!session_state_->IsDumpModeAllowed(trigger.level_of_detail)) {
-        NOTREACHED();
-        continue;
-      }
-      dump_scheduler->AddTrigger(trigger.trigger_type, trigger.level_of_detail,
-                                 trigger.min_time_between_dumps_ms);
-    }
-
     // Notify polling supported only if some polling supported provider was
     // registered, else RegisterPollingMDPOnDumpThread() will notify when first
     // polling MDP registers.
     if (!dump_providers_for_polling_.empty())
-      dump_scheduler->EnablePollingIfNeeded();
+      dump_scheduler_->NotifyPollingSupported();
 
     // Only coordinator process triggers periodic global memory dumps.
-    if (delegate_->IsCoordinator())
-      dump_scheduler->EnablePeriodicTriggerIfNeeded();
+    if (is_coordinator_)
+      dump_scheduler_->NotifyPeriodicTriggerSupported();
   }
 
 }
@@ -927,12 +879,14 @@
     return;
   subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
   std::unique_ptr<Thread> dump_thread;
+  std::unique_ptr<MemoryDumpScheduler> scheduler;
   {
     AutoLock lock(lock_);
     dump_thread = std::move(dump_thread_);
     session_state_ = nullptr;
-    MemoryDumpScheduler::GetInstance()->DisableAllTriggers();
+    scheduler = std::move(dump_scheduler_);
   }
+  scheduler->DisableAllTriggers();
 
   // Thread stops are blocking and must be performed outside of the |lock_|
   // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it).
@@ -956,6 +910,38 @@
   return session_state_->IsDumpModeAllowed(dump_mode);
 }
 
+uint64_t MemoryDumpManager::GetTracingProcessId() const {
+  return delegate_->GetTracingProcessId();
+}
+
+MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
+    MemoryDumpProvider* dump_provider,
+    const char* name,
+    scoped_refptr<SequencedTaskRunner> task_runner,
+    const MemoryDumpProvider::Options& options,
+    bool whitelisted_for_background_mode)
+    : dump_provider(dump_provider),
+      name(name),
+      task_runner(std::move(task_runner)),
+      options(options),
+      consecutive_failures(0),
+      disabled(false),
+      whitelisted_for_background_mode(whitelisted_for_background_mode) {}
+
+MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {}
+
+bool MemoryDumpManager::MemoryDumpProviderInfo::Comparator::operator()(
+    const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& a,
+    const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& b) const {
+  if (!a || !b)
+    return a.get() < b.get();
+  // Ensure that unbound providers (task_runner == nullptr) always run last.
+  // Rationale: some unbound dump providers are known to be slow, keep them last
+  // to avoid skewing timings of the other dump providers.
+  return std::tie(a->task_runner, a->dump_provider) >
+         std::tie(b->task_runner, b->dump_provider);
+}
+
 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
     MemoryDumpRequestArgs req_args,
     const MemoryDumpProviderInfo::OrderedSet& dump_providers,
diff --git a/base/trace_event/memory_dump_manager.h b/base/trace_event/memory_dump_manager.h
index e7f5194..92cc2f4 100644
--- a/base/trace_event/memory_dump_manager.h
+++ b/base/trace_event/memory_dump_manager.h
@@ -9,7 +9,7 @@
 
 #include <map>
 #include <memory>
-#include <unordered_set>
+#include <set>
 #include <vector>
 
 #include "base/atomicops.h"
@@ -18,20 +18,10 @@
 #include "base/memory/ref_counted.h"
 #include "base/memory/singleton.h"
 #include "base/synchronization/lock.h"
-#include "base/trace_event/memory_allocator_dump.h"
-#include "base/trace_event/memory_dump_provider_info.h"
 #include "base/trace_event/memory_dump_request_args.h"
 #include "base/trace_event/process_memory_dump.h"
 #include "base/trace_event/trace_event.h"
 
-// Forward declare |MemoryDumpManagerDelegateImplTest| so that we can make it a
-// friend of |MemoryDumpManager| and give it access to |SetInstanceForTesting|.
-namespace memory_instrumentation {
-
-class MemoryDumpManagerDelegateImplTest;
-
-}  // namespace memory_instrumentation
-
 namespace base {
 
 class SingleThreadTaskRunner;
@@ -64,10 +54,13 @@
   // On the other side, the MemoryDumpManager will not be fully operational
   // (i.e. will NACK any RequestGlobalMemoryDump()) until initialized.
   // Arguments:
+  //  is_coordinator: if true this MemoryDumpManager instance will act as a
+  //      coordinator and schedule periodic dumps (if enabled via TraceConfig);
+  //      false when the MemoryDumpManager is initialized in a slave process.
   //  delegate: inversion-of-control interface for embedder-specific behaviors
   //      (multiprocess handshaking). See the lifetime and thread-safety
   //      requirements in the |MemoryDumpManagerDelegate| docstring.
-  void Initialize(std::unique_ptr<MemoryDumpManagerDelegate> delegate);
+  void Initialize(MemoryDumpManagerDelegate* delegate, bool is_coordinator);
 
   // (Un)Registers a MemoryDumpProvider instance.
   // Args:
@@ -130,9 +123,6 @@
   // Returns true if the dump mode is allowed for current tracing session.
   bool IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode);
 
-  // Lets tests see if a dump provider is registered.
-  bool IsDumpProviderRegisteredForTesting(MemoryDumpProvider*);
-
   // Returns the MemoryDumpSessionState object, which is shared by all the
   // ProcessMemoryDump and MemoryAllocatorDump instances through all the tracing
   // session lifetime.
@@ -145,10 +135,7 @@
   // retrieved by child processes only when tracing is enabled. This is
   // intended to express cross-process sharing of memory dumps on the
   // child-process side, without having to know its own child process id.
-  uint64_t GetTracingProcessId() const { return tracing_process_id_; }
-  void set_tracing_process_id(uint64_t tracing_process_id) {
-    tracing_process_id_ = tracing_process_id;
-  }
+  uint64_t GetTracingProcessId() const;
 
   // Returns the name for a the allocated_objects dump. Use this to declare
   // suballocator dumps from other dump providers.
@@ -169,7 +156,70 @@
   friend class MemoryDumpManagerDelegate;
   friend class MemoryDumpManagerTest;
   friend class MemoryDumpScheduler;
-  friend class memory_instrumentation::MemoryDumpManagerDelegateImplTest;
+
+  // Descriptor used to hold information about registered MDPs.
+  // Some important considerations about lifetime of this object:
+  // - In nominal conditions, all the MemoryDumpProviderInfo instances live in
+  //   the |dump_providers_| collection (% unregistration while dumping).
+  // - Upon each dump they (actually their scoped_refptr-s) are copied into
+  //   the ProcessMemoryDumpAsyncState. This is to allow removal (see below).
+  // - When the MDP.OnMemoryDump() is invoked, the corresponding MDPInfo copy
+  //   inside ProcessMemoryDumpAsyncState is removed.
+  // - In most cases, the MDPInfo is destroyed within UnregisterDumpProvider().
+  // - If UnregisterDumpProvider() is called while a dump is in progress, the
+  //   MDPInfo is destroyed in SetupNextMemoryDump() or InvokeOnMemoryDump(),
+  //   when the copy inside ProcessMemoryDumpAsyncState is erase()-d.
+  // - The non-const fields of MemoryDumpProviderInfo are safe to access only
+  //   on tasks running in the |task_runner|, unless the thread has been
+  //   destroyed.
+  struct MemoryDumpProviderInfo
+      : public RefCountedThreadSafe<MemoryDumpProviderInfo> {
+    // Define a total order based on the |task_runner| affinity, so that MDPs
+    // belonging to the same SequencedTaskRunner are adjacent in the set.
+    struct Comparator {
+      bool operator()(const scoped_refptr<MemoryDumpProviderInfo>& a,
+                      const scoped_refptr<MemoryDumpProviderInfo>& b) const;
+    };
+    using OrderedSet =
+        std::set<scoped_refptr<MemoryDumpProviderInfo>, Comparator>;
+
+    MemoryDumpProviderInfo(MemoryDumpProvider* dump_provider,
+                           const char* name,
+                           scoped_refptr<SequencedTaskRunner> task_runner,
+                           const MemoryDumpProvider::Options& options,
+                           bool whitelisted_for_background_mode);
+
+    MemoryDumpProvider* const dump_provider;
+
+    // Used to transfer ownership for UnregisterAndDeleteDumpProviderSoon().
+    // nullptr in all other cases.
+    std::unique_ptr<MemoryDumpProvider> owned_dump_provider;
+
+    // Human readable name, for debugging and testing. Not necessarily unique.
+    const char* const name;
+
+    // The task runner affinity. Can be nullptr, in which case the dump provider
+    // will be invoked on |dump_thread_|.
+    const scoped_refptr<SequencedTaskRunner> task_runner;
+
+    // The |options| arg passed to RegisterDumpProvider().
+    const MemoryDumpProvider::Options options;
+
+    // For fail-safe logic (auto-disable failing MDPs).
+    int consecutive_failures;
+
+    // Flagged either by the auto-disable logic or during unregistration.
+    bool disabled;
+
+    // True if the dump provider is whitelisted for background mode.
+    const bool whitelisted_for_background_mode;
+
+   private:
+    friend class base::RefCountedThreadSafe<MemoryDumpProviderInfo>;
+    ~MemoryDumpProviderInfo();
+
+    DISALLOW_COPY_AND_ASSIGN(MemoryDumpProviderInfo);
+  };
 
   // Holds the state of a process memory dump that needs to be carried over
   // across task runners in order to fulfil an asynchronous CreateProcessDump()
@@ -235,7 +285,6 @@
   ~MemoryDumpManager() override;
 
   static void SetInstanceForTesting(MemoryDumpManager* instance);
-  static uint32_t GetDumpsSumKb(const std::string&, const ProcessMemoryDump*);
   static void FinalizeDumpAndAddToTrace(
       std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state);
 
@@ -299,7 +348,10 @@
   std::unordered_set<StringPiece, StringPieceHash>
       strict_thread_check_blacklist_;
 
-  std::unique_ptr<MemoryDumpManagerDelegate> delegate_;
+  MemoryDumpManagerDelegate* delegate_;  // Not owned.
+
+  // When true, this instance is in charge of coordinating periodic dumps.
+  bool is_coordinator_;
 
   // Protects from concurrent accesses to the |dump_providers_*| and |delegate_|
   // to guard against disabling logging while dumping on another thread.
@@ -309,6 +361,9 @@
   // dump_providers_enabled_ list) when tracing is not enabled.
   subtle::AtomicWord memory_tracing_enabled_;
 
+  // For triggering memory dumps.
+  std::unique_ptr<MemoryDumpScheduler> dump_scheduler_;
+
   // Thread used for MemoryDumpProviders which don't specify a task runner
   // affinity.
   std::unique_ptr<Thread> dump_thread_;
@@ -330,15 +385,17 @@
 // safe (i.e. should expect calls from any thread and handle thread hopping).
 class BASE_EXPORT MemoryDumpManagerDelegate {
  public:
-  MemoryDumpManagerDelegate() {}
-  virtual ~MemoryDumpManagerDelegate() {}
-
   virtual void RequestGlobalMemoryDump(const MemoryDumpRequestArgs& args,
                                        const MemoryDumpCallback& callback) = 0;
 
-  virtual bool IsCoordinator() const = 0;
+  // Returns tracing process id of the current process. This is used by
+  // MemoryDumpManager::GetTracingProcessId.
+  virtual uint64_t GetTracingProcessId() const = 0;
 
  protected:
+  MemoryDumpManagerDelegate() {}
+  virtual ~MemoryDumpManagerDelegate() {}
+
   void CreateProcessDump(const MemoryDumpRequestArgs& args,
                          const MemoryDumpCallback& callback) {
     MemoryDumpManager::GetInstance()->CreateProcessDump(args, callback);
diff --git a/base/trace_event/memory_dump_manager_unittest.cc b/base/trace_event/memory_dump_manager_unittest.cc
index e126edd..51d4194 100644
--- a/base/trace_event/memory_dump_manager_unittest.cc
+++ b/base/trace_event/memory_dump_manager_unittest.cc
@@ -7,11 +7,9 @@
 #include <stdint.h>
 
 #include <memory>
-#include <utility>
 #include <vector>
 
 #include "base/bind_helpers.h"
-#include "base/callback.h"
 #include "base/memory/ptr_util.h"
 #include "base/memory/ref_counted_memory.h"
 #include "base/message_loop/message_loop.h"
@@ -32,7 +30,6 @@
 #include "base/trace_event/process_memory_dump.h"
 #include "base/trace_event/trace_buffer.h"
 #include "base/trace_event/trace_config_memory_test_util.h"
-#include "build/build_config.h"
 #include "testing/gmock/include/gmock/gmock.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -105,10 +102,10 @@
 // Posts |task| to |task_runner| and blocks until it is executed.
 void PostTaskAndWait(const tracked_objects::Location& from_here,
                      SequencedTaskRunner* task_runner,
-                     base::OnceClosure task) {
+                     const base::Closure& task) {
   base::WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
                             WaitableEvent::InitialState::NOT_SIGNALED);
-  task_runner->PostTask(from_here, std::move(task));
+  task_runner->PostTask(from_here, task);
   task_runner->PostTask(
       FROM_HERE, base::Bind(&WaitableEvent::Signal, base::Unretained(&event)));
   // The SequencedTaskRunner guarantees that |event| will only be signaled after
@@ -116,12 +113,13 @@
   event.Wait();
 }
 
+}  // namespace
+
 // Testing MemoryDumpManagerDelegate which, by default, short-circuits dump
 // requests locally to the MemoryDumpManager instead of performing IPC dances.
 class MemoryDumpManagerDelegateForTesting : public MemoryDumpManagerDelegate {
  public:
-  MemoryDumpManagerDelegateForTesting(bool is_coordinator)
-      : is_coordinator_(is_coordinator) {
+  MemoryDumpManagerDelegateForTesting() {
     ON_CALL(*this, RequestGlobalMemoryDump(_, _))
         .WillByDefault(Invoke(
             this, &MemoryDumpManagerDelegateForTesting::CreateProcessDump));
@@ -131,13 +129,13 @@
                void(const MemoryDumpRequestArgs& args,
                     const MemoryDumpCallback& callback));
 
-  bool IsCoordinator() const override { return is_coordinator_; }
+  uint64_t GetTracingProcessId() const override {
+    NOTREACHED();
+    return MemoryDumpManager::kInvalidTracingProcessId;
+  }
 
   // Promote the CreateProcessDump to public so it can be used by test fixtures.
   using MemoryDumpManagerDelegate::CreateProcessDump;
-
- private:
-  bool is_coordinator_;
 };
 
 class MockMemoryDumpProvider : public MemoryDumpProvider {
@@ -182,19 +180,19 @@
   unsigned no_of_post_tasks() const { return num_of_post_tasks_; }
 
   bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
-                                  OnceClosure task,
+                                  const Closure& task,
                                   TimeDelta delay) override {
     NOTREACHED();
     return false;
   }
 
   bool PostDelayedTask(const tracked_objects::Location& from_here,
-                       OnceClosure task,
+                       const Closure& task,
                        TimeDelta delay) override {
     num_of_post_tasks_++;
     if (enabled_) {
       return worker_pool_.pool()->PostSequencedWorkerTask(token_, from_here,
-                                                          std::move(task));
+                                                          task);
     }
     return false;
   }
@@ -212,8 +210,6 @@
   unsigned num_of_post_tasks_;
 };
 
-}  // namespace
-
 class MemoryDumpManagerTest : public testing::Test {
  public:
   MemoryDumpManagerTest() : testing::Test(), kDefaultOptions() {}
@@ -224,12 +220,13 @@
     mdm_.reset(new MemoryDumpManager());
     MemoryDumpManager::SetInstanceForTesting(mdm_.get());
     ASSERT_EQ(mdm_.get(), MemoryDumpManager::GetInstance());
+    delegate_.reset(new MemoryDumpManagerDelegateForTesting);
   }
 
   void TearDown() override {
     MemoryDumpManager::SetInstanceForTesting(nullptr);
-    delegate_ = nullptr;
     mdm_.reset();
+    delegate_.reset();
     message_loop_.reset();
     TraceLog::DeleteForTesting();
   }
@@ -251,8 +248,7 @@
  protected:
   void InitializeMemoryDumpManager(bool is_coordinator) {
     mdm_->set_dumper_registrations_ignored_for_testing(true);
-    delegate_ = new MemoryDumpManagerDelegateForTesting(is_coordinator);
-    mdm_->Initialize(base::WrapUnique(delegate_));
+    mdm_->Initialize(delegate_.get(), is_coordinator);
   }
 
   void RequestGlobalDumpAndWait(MemoryDumpType dump_type,
@@ -278,8 +274,7 @@
   void DisableTracing() { TraceLog::GetInstance()->SetDisabled(); }
 
   bool IsPeriodicDumpingEnabled() const {
-    return MemoryDumpScheduler::GetInstance()
-        ->IsPeriodicTimerRunningForTesting();
+    return mdm_->dump_scheduler_->IsPeriodicTimerRunningForTesting();
   }
 
   int GetMaxConsecutiveFailuresCount() const {
@@ -288,7 +283,7 @@
 
   const MemoryDumpProvider::Options kDefaultOptions;
   std::unique_ptr<MemoryDumpManager> mdm_;
-  MemoryDumpManagerDelegateForTesting* delegate_;
+  std::unique_ptr<MemoryDumpManagerDelegateForTesting> delegate_;
   bool last_callback_success_;
 
  private:
@@ -440,13 +435,7 @@
 
 // Checks that the dump provider invocations depend only on the current
 // registration state and not on previous registrations and dumps.
-// Flaky on iOS, see crbug.com/706874
-#if defined(OS_IOS)
-#define MAYBE_RegistrationConsistency DISABLED_RegistrationConsistency
-#else
-#define MAYBE_RegistrationConsistency RegistrationConsistency
-#endif
-TEST_F(MemoryDumpManagerTest, MAYBE_RegistrationConsistency) {
+TEST_F(MemoryDumpManagerTest, RegistrationConsistency) {
   InitializeMemoryDumpManager(false /* is_coordinator */);
   MockMemoryDumpProvider mdp;
 
@@ -908,6 +897,7 @@
   // initialization gets NACK-ed cleanly.
   {
     EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
+    EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(0);
     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
                              MemoryDumpLevelOfDetail::DETAILED);
     EXPECT_FALSE(last_callback_success_);
@@ -916,9 +906,9 @@
   // Now late-initialize the MemoryDumpManager and check that the
   // RequestGlobalDump completes successfully.
   {
-    InitializeMemoryDumpManager(false /* is_coordinator */);
     EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(1);
     EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+    InitializeMemoryDumpManager(false /* is_coordinator */);
     RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
                              MemoryDumpLevelOfDetail::DETAILED);
     EXPECT_TRUE(last_callback_success_);
@@ -1020,13 +1010,7 @@
 
 // Tests against race conditions that might arise when disabling tracing in the
 // middle of a global memory dump.
-// Flaky on iOS, see crbug.com/706961
-#if defined(OS_IOS)
-#define MAYBE_DisableTracingWhileDumping DISABLED_DisableTracingWhileDumping
-#else
-#define MAYBE_DisableTracingWhileDumping DisableTracingWhileDumping
-#endif
-TEST_F(MemoryDumpManagerTest, MAYBE_DisableTracingWhileDumping) {
+TEST_F(MemoryDumpManagerTest, DisableTracingWhileDumping) {
   base::WaitableEvent tracing_disabled_event(
       WaitableEvent::ResetPolicy::AUTOMATIC,
       WaitableEvent::InitialState::NOT_SIGNALED);
diff --git a/base/trace_event/memory_dump_provider_info.cc b/base/trace_event/memory_dump_provider_info.cc
deleted file mode 100644
index 6bb7110..0000000
--- a/base/trace_event/memory_dump_provider_info.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/memory_dump_provider_info.h"
-
-#include <tuple>
-
-#include "base/sequenced_task_runner.h"
-
-namespace base {
-namespace trace_event {
-
-MemoryDumpProviderInfo::MemoryDumpProviderInfo(
-    MemoryDumpProvider* dump_provider,
-    const char* name,
-    scoped_refptr<SequencedTaskRunner> task_runner,
-    const MemoryDumpProvider::Options& options,
-    bool whitelisted_for_background_mode)
-    : dump_provider(dump_provider),
-      options(options),
-      name(name),
-      task_runner(std::move(task_runner)),
-      whitelisted_for_background_mode(whitelisted_for_background_mode),
-      consecutive_failures(0),
-      disabled(false) {}
-
-MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {}
-
-bool MemoryDumpProviderInfo::Comparator::operator()(
-    const scoped_refptr<MemoryDumpProviderInfo>& a,
-    const scoped_refptr<MemoryDumpProviderInfo>& b) const {
-  if (!a || !b)
-    return a.get() < b.get();
-  // Ensure that unbound providers (task_runner == nullptr) always run last.
-  // Rationale: some unbound dump providers are known to be slow, keep them last
-  // to avoid skewing timings of the other dump providers.
-  return std::tie(a->task_runner, a->dump_provider) >
-         std::tie(b->task_runner, b->dump_provider);
-}
-
-}  // namespace trace_event
-}  // namespace base
diff --git a/base/trace_event/memory_dump_provider_info.h b/base/trace_event/memory_dump_provider_info.h
deleted file mode 100644
index ca63a98..0000000
--- a/base/trace_event/memory_dump_provider_info.h
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_INFO_H_
-#define BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_INFO_H_
-
-#include <memory>
-#include <set>
-
-#include "base/base_export.h"
-#include "base/memory/ref_counted.h"
-#include "base/trace_event/memory_dump_provider.h"
-
-namespace base {
-
-class SequencedTaskRunner;
-
-namespace trace_event {
-
-// Wraps a MemoryDumpProvider (MDP), which is registered via
-// MemoryDumpManager(MDM)::RegisterDumpProvider(), holding the extra information
-// required to deal with it (which task runner it should be invoked onto,
-// whether it has been disabled, etc.)
-// More importantly, having a refptr to this object guarantees that a MDP that
-// is not thread-bound (hence which can only be unregistered via
-// MDM::UnregisterAndDeleteDumpProviderSoon()) will stay alive as long as the
-// refptr is held.
-//
-// Lifetime:
-// At any time, there is at most one instance of this class for each instance
-// of a given MemoryDumpProvider, but there might be several scoped_refptr
-// holding onto each of this. Specifically:
-// - In nominal conditions, there is a refptr for each registerd MDP in the
-//   MDM's |dump_providers_| list.
-// - In most cases, the only refptr (in the |dump_providers_| list) is destroyed
-//   by MDM::UnregisterDumpProvider().
-// - However, when MDM starts a dump, the list of refptrs is copied into the
-//   ProcessMemoryDumpAsyncState. That list is pruned as MDP(s) are invoked.
-// - If UnregisterDumpProvider() is called on a non-thread-bound MDP while a
-//   dump is in progress, the extar extra of the handle is destroyed in
-//   MDM::SetupNextMemoryDump() or MDM::InvokeOnMemoryDump(), when the copy
-//   inside ProcessMemoryDumpAsyncState is erase()-d.
-// - The PeakDetector can keep extra refptrs when enabled.
-struct BASE_EXPORT MemoryDumpProviderInfo
-    : public RefCountedThreadSafe<MemoryDumpProviderInfo> {
- public:
-  // Define a total order based on the |task_runner| affinity, so that MDPs
-  // belonging to the same SequencedTaskRunner are adjacent in the set.
-  struct Comparator {
-    bool operator()(const scoped_refptr<MemoryDumpProviderInfo>& a,
-                    const scoped_refptr<MemoryDumpProviderInfo>& b) const;
-  };
-  using OrderedSet =
-      std::set<scoped_refptr<MemoryDumpProviderInfo>, Comparator>;
-
-  MemoryDumpProviderInfo(MemoryDumpProvider* dump_provider,
-                         const char* name,
-                         scoped_refptr<SequencedTaskRunner> task_runner,
-                         const MemoryDumpProvider::Options& options,
-                         bool whitelisted_for_background_mode);
-
-  // It is safe to access the const fields below from any thread as they are
-  // never mutated.
-
-  MemoryDumpProvider* const dump_provider;
-
-  // The |options| arg passed to MDM::RegisterDumpProvider().
-  const MemoryDumpProvider::Options options;
-
-  // Human readable name, not unique (distinct MDP instances might have the same
-  // name). Used for debugging, testing and whitelisting for BACKGROUND mode.
-  const char* const name;
-
-  // The task runner on which the MDP::OnMemoryDump call should be posted onto.
-  // Can be nullptr, in which case the MDP will be invoked on a background
-  // thread handled by MDM.
-  const scoped_refptr<SequencedTaskRunner> task_runner;
-
-  // True if the dump provider is whitelisted for background mode.
-  const bool whitelisted_for_background_mode;
-
-  // These fields below, instead, are not thread safe and can be mutated only:
-  // - On the |task_runner|, when not null (i.e. for thread-bound MDPS).
-  // - By the MDM's background thread (or in any other way that guarantees
-  //   sequencing) for non-thread-bound MDPs.
-
-  // Used to transfer ownership for UnregisterAndDeleteDumpProviderSoon().
-  // nullptr in all other cases.
-  std::unique_ptr<MemoryDumpProvider> owned_dump_provider;
-
-  // For fail-safe logic (auto-disable failing MDPs).
-  int consecutive_failures;
-
-  // Flagged either by the auto-disable logic or during unregistration.
-  bool disabled;
-
- private:
-  friend class base::RefCountedThreadSafe<MemoryDumpProviderInfo>;
-  ~MemoryDumpProviderInfo();
-
-  DISALLOW_COPY_AND_ASSIGN(MemoryDumpProviderInfo);
-};
-
-}  // namespace trace_event
-}  // namespace base
-
-#endif  // BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_INFO_H_
diff --git a/base/trace_event/memory_dump_request_args.cc b/base/trace_event/memory_dump_request_args.cc
index f274400..bf72bef 100644
--- a/base/trace_event/memory_dump_request_args.cc
+++ b/base/trace_event/memory_dump_request_args.cc
@@ -60,9 +60,5 @@
   return MemoryDumpLevelOfDetail::LAST;
 }
 
-MemoryDumpCallbackResult::MemoryDumpCallbackResult() {}
-
-MemoryDumpCallbackResult::~MemoryDumpCallbackResult() {}
-
 }  // namespace trace_event
 }  // namespace base
diff --git a/base/trace_event/memory_dump_request_args.h b/base/trace_event/memory_dump_request_args.h
index a8b3f42..90a866f 100644
--- a/base/trace_event/memory_dump_request_args.h
+++ b/base/trace_event/memory_dump_request_args.h
@@ -9,12 +9,10 @@
 // These are also used in the IPCs for coordinating inter-process memory dumps.
 
 #include <stdint.h>
-#include <map>
 #include <string>
 
 #include "base/base_export.h"
 #include "base/callback.h"
-#include "base/process/process_handle.h"
 
 namespace base {
 namespace trace_event {
@@ -74,33 +72,6 @@
   MemoryDumpLevelOfDetail level_of_detail;
 };
 
-// TODO(hjd): Not used yet, see crbug.com/703184
-// Summarises information about memory use as seen by a single process.
-// This information will eventually be passed to a service to be colated
-// and reported.
-struct MemoryDumpCallbackResult {
-  struct OSMemDump {
-    uint32_t resident_set_kb = 0;
-  };
-  struct ChromeMemDump {
-    uint32_t malloc_total_kb = 0;
-    uint32_t partition_alloc_total_kb = 0;
-    uint32_t blink_gc_total_kb = 0;
-    uint32_t v8_total_kb = 0;
-  };
-
-  // These are for the current process.
-  OSMemDump os_dump;
-  ChromeMemDump chrome_dump;
-
-  // In some cases, OS stats can only be dumped from a privileged process to
-  // get around to sandboxing/selinux restrictions (see crbug.com/461788).
-  std::map<ProcessId, OSMemDump> extra_processes_dump;
-
-  MemoryDumpCallbackResult();
-  ~MemoryDumpCallbackResult();
-};
-
 using MemoryDumpCallback = Callback<void(uint64_t dump_guid, bool success)>;
 
 BASE_EXPORT const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type);
diff --git a/base/trace_event/memory_dump_scheduler.cc b/base/trace_event/memory_dump_scheduler.cc
index 150feb8..eaa8d63 100644
--- a/base/trace_event/memory_dump_scheduler.cc
+++ b/base/trace_event/memory_dump_scheduler.cc
@@ -21,131 +21,108 @@
 uint32_t g_polling_interval_ms_for_testing = 0;
 }  // namespace
 
-// static
-MemoryDumpScheduler* MemoryDumpScheduler::GetInstance() {
-  static MemoryDumpScheduler* instance = new MemoryDumpScheduler();
-  return instance;
-}
-
-MemoryDumpScheduler::MemoryDumpScheduler() : mdm_(nullptr), is_setup_(false) {}
-MemoryDumpScheduler::~MemoryDumpScheduler() {}
-
-void MemoryDumpScheduler::Setup(
+MemoryDumpScheduler::MemoryDumpScheduler(
     MemoryDumpManager* mdm,
-    scoped_refptr<SingleThreadTaskRunner> polling_task_runner) {
-  mdm_ = mdm;
-  polling_task_runner_ = polling_task_runner;
-  periodic_state_.reset(new PeriodicTriggerState);
-  polling_state_.reset(new PollingTriggerState);
-  is_setup_ = true;
-}
+    scoped_refptr<SingleThreadTaskRunner> polling_task_runner)
+    : mdm_(mdm), polling_state_(polling_task_runner) {}
+
+MemoryDumpScheduler::~MemoryDumpScheduler() {}
 
 void MemoryDumpScheduler::AddTrigger(MemoryDumpType trigger_type,
                                      MemoryDumpLevelOfDetail level_of_detail,
                                      uint32_t min_time_between_dumps_ms) {
-  DCHECK(is_setup_);
   if (trigger_type == MemoryDumpType::PEAK_MEMORY_USAGE) {
-    DCHECK(!periodic_state_->is_configured);
-    DCHECK_EQ(PollingTriggerState::DISABLED, polling_state_->current_state);
+    DCHECK(!periodic_state_.is_configured);
+    DCHECK_EQ(PollingTriggerState::DISABLED, polling_state_.current_state);
     DCHECK_NE(0u, min_time_between_dumps_ms);
 
-    polling_state_->level_of_detail = level_of_detail;
-    polling_state_->min_polls_between_dumps =
-        (min_time_between_dumps_ms + polling_state_->polling_interval_ms - 1) /
-        polling_state_->polling_interval_ms;
-    polling_state_->current_state = PollingTriggerState::CONFIGURED;
+    polling_state_.level_of_detail = level_of_detail;
+    polling_state_.min_polls_between_dumps =
+        (min_time_between_dumps_ms + polling_state_.polling_interval_ms - 1) /
+        polling_state_.polling_interval_ms;
+    polling_state_.current_state = PollingTriggerState::CONFIGURED;
   } else if (trigger_type == MemoryDumpType::PERIODIC_INTERVAL) {
-    DCHECK_EQ(PollingTriggerState::DISABLED, polling_state_->current_state);
-    periodic_state_->is_configured = true;
+    DCHECK_EQ(PollingTriggerState::DISABLED, polling_state_.current_state);
+    periodic_state_.is_configured = true;
     DCHECK_NE(0u, min_time_between_dumps_ms);
     switch (level_of_detail) {
       case MemoryDumpLevelOfDetail::BACKGROUND:
         break;
       case MemoryDumpLevelOfDetail::LIGHT:
-        DCHECK_EQ(0u, periodic_state_->light_dump_period_ms);
-        periodic_state_->light_dump_period_ms = min_time_between_dumps_ms;
+        DCHECK_EQ(0u, periodic_state_.light_dump_period_ms);
+        periodic_state_.light_dump_period_ms = min_time_between_dumps_ms;
         break;
       case MemoryDumpLevelOfDetail::DETAILED:
-        DCHECK_EQ(0u, periodic_state_->heavy_dump_period_ms);
-        periodic_state_->heavy_dump_period_ms = min_time_between_dumps_ms;
+        DCHECK_EQ(0u, periodic_state_.heavy_dump_period_ms);
+        periodic_state_.heavy_dump_period_ms = min_time_between_dumps_ms;
         break;
     }
 
-    periodic_state_->min_timer_period_ms = std::min(
-        periodic_state_->min_timer_period_ms, min_time_between_dumps_ms);
-    DCHECK_EQ(0u, periodic_state_->light_dump_period_ms %
-                      periodic_state_->min_timer_period_ms);
-    DCHECK_EQ(0u, periodic_state_->heavy_dump_period_ms %
-                      periodic_state_->min_timer_period_ms);
+    periodic_state_.min_timer_period_ms = std::min(
+        periodic_state_.min_timer_period_ms, min_time_between_dumps_ms);
+    DCHECK_EQ(0u, periodic_state_.light_dump_period_ms %
+                      periodic_state_.min_timer_period_ms);
+    DCHECK_EQ(0u, periodic_state_.heavy_dump_period_ms %
+                      periodic_state_.min_timer_period_ms);
   }
 }
 
-void MemoryDumpScheduler::EnablePeriodicTriggerIfNeeded() {
-  DCHECK(is_setup_);
-  if (!periodic_state_->is_configured || periodic_state_->timer.IsRunning())
+void MemoryDumpScheduler::NotifyPeriodicTriggerSupported() {
+  if (!periodic_state_.is_configured || periodic_state_.timer.IsRunning())
     return;
-  periodic_state_->light_dumps_rate = periodic_state_->light_dump_period_ms /
-                                      periodic_state_->min_timer_period_ms;
-  periodic_state_->heavy_dumps_rate = periodic_state_->heavy_dump_period_ms /
-                                      periodic_state_->min_timer_period_ms;
+  periodic_state_.light_dumps_rate = periodic_state_.light_dump_period_ms /
+                                     periodic_state_.min_timer_period_ms;
+  periodic_state_.heavy_dumps_rate = periodic_state_.heavy_dump_period_ms /
+                                     periodic_state_.min_timer_period_ms;
 
-  periodic_state_->dump_count = 0;
-  periodic_state_->timer.Start(
+  periodic_state_.dump_count = 0;
+  periodic_state_.timer.Start(
       FROM_HERE,
-      TimeDelta::FromMilliseconds(periodic_state_->min_timer_period_ms),
+      TimeDelta::FromMilliseconds(periodic_state_.min_timer_period_ms),
       Bind(&MemoryDumpScheduler::RequestPeriodicGlobalDump, Unretained(this)));
 }
 
-void MemoryDumpScheduler::EnablePollingIfNeeded() {
-  DCHECK(is_setup_);
-  if (polling_state_->current_state != PollingTriggerState::CONFIGURED)
+void MemoryDumpScheduler::NotifyPollingSupported() {
+  if (polling_state_.current_state != PollingTriggerState::CONFIGURED)
     return;
 
-  polling_state_->current_state = PollingTriggerState::ENABLED;
-  polling_state_->ResetTotals();
+  polling_state_.current_state = PollingTriggerState::ENABLED;
+  polling_state_.ResetTotals();
 
-  polling_task_runner_->PostTask(
+  polling_state_.polling_task_runner->PostTask(
       FROM_HERE,
       Bind(&MemoryDumpScheduler::PollMemoryOnPollingThread, Unretained(this)));
 }
 
 void MemoryDumpScheduler::NotifyDumpTriggered() {
-  if (polling_task_runner_ &&
-      !polling_task_runner_->RunsTasksOnCurrentThread()) {
-    polling_task_runner_->PostTask(
+  if (polling_state_.polling_task_runner &&
+      polling_state_.polling_task_runner->RunsTasksOnCurrentThread()) {
+    polling_state_.polling_task_runner->PostTask(
         FROM_HERE,
         Bind(&MemoryDumpScheduler::NotifyDumpTriggered, Unretained(this)));
     return;
   }
-
-  if (!polling_state_ ||
-      polling_state_->current_state != PollingTriggerState::ENABLED) {
+  if (polling_state_.current_state != PollingTriggerState::ENABLED)
     return;
-  }
 
-  polling_state_->ResetTotals();
+  polling_state_.ResetTotals();
 }
 
 void MemoryDumpScheduler::DisableAllTriggers() {
-  if (periodic_state_) {
-    if (periodic_state_->timer.IsRunning())
-      periodic_state_->timer.Stop();
-    periodic_state_.reset();
-  }
-
-  if (polling_task_runner_) {
-    DCHECK(polling_state_);
-    polling_task_runner_->PostTask(
-        FROM_HERE, Bind(&MemoryDumpScheduler::DisablePollingOnPollingThread,
-                        Unretained(this)));
-    polling_task_runner_ = nullptr;
-  }
-  is_setup_ = false;
+  if (periodic_state_.timer.IsRunning())
+    periodic_state_.timer.Stop();
+  DisablePolling();
 }
 
-void MemoryDumpScheduler::DisablePollingOnPollingThread() {
-  polling_state_->current_state = PollingTriggerState::DISABLED;
-  polling_state_.reset();
+void MemoryDumpScheduler::DisablePolling() {
+  if (polling_state_.polling_task_runner->RunsTasksOnCurrentThread()) {
+    if (polling_state_.polling_task_runner->PostTask(
+            FROM_HERE,
+            Bind(&MemoryDumpScheduler::DisablePolling, Unretained(this))))
+      return;
+  }
+  polling_state_.current_state = PollingTriggerState::DISABLED;
+  polling_state_.polling_task_runner = nullptr;
 }
 
 // static
@@ -154,32 +131,30 @@
 }
 
 bool MemoryDumpScheduler::IsPeriodicTimerRunningForTesting() {
-  return periodic_state_->timer.IsRunning();
+  return periodic_state_.timer.IsRunning();
 }
 
 void MemoryDumpScheduler::RequestPeriodicGlobalDump() {
   MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
-  if (periodic_state_->light_dumps_rate > 0 &&
-      periodic_state_->dump_count % periodic_state_->light_dumps_rate == 0)
+  if (periodic_state_.light_dumps_rate > 0 &&
+      periodic_state_.dump_count % periodic_state_.light_dumps_rate == 0)
     level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
-  if (periodic_state_->heavy_dumps_rate > 0 &&
-      periodic_state_->dump_count % periodic_state_->heavy_dumps_rate == 0)
+  if (periodic_state_.heavy_dumps_rate > 0 &&
+      periodic_state_.dump_count % periodic_state_.heavy_dumps_rate == 0)
     level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
-  ++periodic_state_->dump_count;
+  ++periodic_state_.dump_count;
 
   mdm_->RequestGlobalDump(MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
 }
 
 void MemoryDumpScheduler::PollMemoryOnPollingThread() {
-  if (!polling_state_)
+  if (polling_state_.current_state != PollingTriggerState::ENABLED)
     return;
 
-  DCHECK_EQ(PollingTriggerState::ENABLED, polling_state_->current_state);
-
   uint64_t polled_memory = 0;
   bool res = mdm_->PollFastMemoryTotal(&polled_memory);
   DCHECK(res);
-  if (polling_state_->level_of_detail == MemoryDumpLevelOfDetail::DETAILED) {
+  if (polling_state_.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) {
     TRACE_COUNTER1(MemoryDumpManager::kTraceCategory, "PolledMemoryMB",
                    polled_memory / 1024 / 1024);
   }
@@ -191,14 +166,14 @@
                          polled_memory / 1024 / 1024);
 
     mdm_->RequestGlobalDump(MemoryDumpType::PEAK_MEMORY_USAGE,
-                            polling_state_->level_of_detail);
+                            polling_state_.level_of_detail);
   }
 
   // TODO(ssid): Use RequestSchedulerCallback, crbug.com/607533.
   ThreadTaskRunnerHandle::Get()->PostDelayedTask(
       FROM_HERE,
       Bind(&MemoryDumpScheduler::PollMemoryOnPollingThread, Unretained(this)),
-      TimeDelta::FromMilliseconds(polling_state_->polling_interval_ms));
+      TimeDelta::FromMilliseconds(polling_state_.polling_interval_ms));
 }
 
 bool MemoryDumpScheduler::ShouldTriggerDump(uint64_t current_memory_total) {
@@ -209,52 +184,52 @@
     return false;
 
   bool should_dump = false;
-  ++polling_state_->num_polls_from_last_dump;
-  if (polling_state_->last_dump_memory_total == 0) {
+  ++polling_state_.num_polls_from_last_dump;
+  if (polling_state_.last_dump_memory_total == 0) {
     // If it's first sample then trigger memory dump.
     should_dump = true;
-  } else if (polling_state_->min_polls_between_dumps >
-             polling_state_->num_polls_from_last_dump) {
+  } else if (polling_state_.min_polls_between_dumps >
+             polling_state_.num_polls_from_last_dump) {
     return false;
   }
 
   int64_t increase_from_last_dump =
-      current_memory_total - polling_state_->last_dump_memory_total;
+      current_memory_total - polling_state_.last_dump_memory_total;
   should_dump |=
-      increase_from_last_dump > polling_state_->memory_increase_threshold;
+      increase_from_last_dump > polling_state_.memory_increase_threshold;
   should_dump |= IsCurrentSamplePeak(current_memory_total);
   if (should_dump)
-    polling_state_->ResetTotals();
+    polling_state_.ResetTotals();
   return should_dump;
 }
 
 bool MemoryDumpScheduler::IsCurrentSamplePeak(
     uint64_t current_memory_total_bytes) {
   uint64_t current_memory_total_kb = current_memory_total_bytes / 1024;
-  polling_state_->last_memory_totals_kb_index =
-      (polling_state_->last_memory_totals_kb_index + 1) %
+  polling_state_.last_memory_totals_kb_index =
+      (polling_state_.last_memory_totals_kb_index + 1) %
       PollingTriggerState::kMaxNumMemorySamples;
   uint64_t mean = 0;
   for (uint32_t i = 0; i < PollingTriggerState::kMaxNumMemorySamples; ++i) {
-    if (polling_state_->last_memory_totals_kb[i] == 0) {
+    if (polling_state_.last_memory_totals_kb[i] == 0) {
       // Not enough samples to detect peaks.
       polling_state_
-          ->last_memory_totals_kb[polling_state_->last_memory_totals_kb_index] =
+          .last_memory_totals_kb[polling_state_.last_memory_totals_kb_index] =
           current_memory_total_kb;
       return false;
     }
-    mean += polling_state_->last_memory_totals_kb[i];
+    mean += polling_state_.last_memory_totals_kb[i];
   }
   mean = mean / PollingTriggerState::kMaxNumMemorySamples;
   uint64_t variance = 0;
   for (uint32_t i = 0; i < PollingTriggerState::kMaxNumMemorySamples; ++i) {
-    variance += (polling_state_->last_memory_totals_kb[i] - mean) *
-                (polling_state_->last_memory_totals_kb[i] - mean);
+    variance += (polling_state_.last_memory_totals_kb[i] - mean) *
+                (polling_state_.last_memory_totals_kb[i] - mean);
   }
   variance = variance / PollingTriggerState::kMaxNumMemorySamples;
 
   polling_state_
-      ->last_memory_totals_kb[polling_state_->last_memory_totals_kb_index] =
+      .last_memory_totals_kb[polling_state_.last_memory_totals_kb_index] =
       current_memory_total_kb;
 
   // If stddev is less than 0.2% then we consider that the process is inactive.
@@ -281,9 +256,11 @@
   DCHECK(!timer.IsRunning());
 }
 
-MemoryDumpScheduler::PollingTriggerState::PollingTriggerState()
+MemoryDumpScheduler::PollingTriggerState::PollingTriggerState(
+    scoped_refptr<SingleThreadTaskRunner> polling_task_runner)
     : current_state(DISABLED),
       level_of_detail(MemoryDumpLevelOfDetail::FIRST),
+      polling_task_runner(polling_task_runner),
       polling_interval_ms(g_polling_interval_ms_for_testing
                               ? g_polling_interval_ms_for_testing
                               : kMemoryTotalsPollingInterval),
@@ -293,7 +270,9 @@
       memory_increase_threshold(0),
       last_memory_totals_kb_index(0) {}
 
-MemoryDumpScheduler::PollingTriggerState::~PollingTriggerState() {}
+MemoryDumpScheduler::PollingTriggerState::~PollingTriggerState() {
+  DCHECK(!polling_task_runner);
+}
 
 void MemoryDumpScheduler::PollingTriggerState::ResetTotals() {
   if (!memory_increase_threshold) {
@@ -303,11 +282,8 @@
     // Set threshold to 1% of total system memory.
     SystemMemoryInfoKB meminfo;
     bool res = GetSystemMemoryInfo(&meminfo);
-    if (res) {
-      memory_increase_threshold =
-          (static_cast<int64_t>(meminfo.total) / 100) * 1024;
-    }
-    DCHECK_GT(memory_increase_threshold, 0u);
+    if (res)
+      memory_increase_threshold = (meminfo.total / 100) * 1024;
 #endif
   }
 
diff --git a/base/trace_event/memory_dump_scheduler.h b/base/trace_event/memory_dump_scheduler.h
index ab8441b..fd21fce 100644
--- a/base/trace_event/memory_dump_scheduler.h
+++ b/base/trace_event/memory_dump_scheduler.h
@@ -5,8 +5,6 @@
 #ifndef BASE_TRACE_EVENT_MEMORY_DUMP_SCHEDULER_H
 #define BASE_TRACE_EVENT_MEMORY_DUMP_SCHEDULER_H
 
-#include <memory>
-
 #include "base/base_export.h"
 #include "base/gtest_prod_util.h"
 #include "base/memory/ref_counted.h"
@@ -20,50 +18,42 @@
 
 class MemoryDumpManager;
 
-// Schedules global dump requests based on the triggers added. The methods of
-// this class are NOT thread safe and the client has to take care of invoking
-// all the methods of the class safely.
+// Schedules global dump requests based on the triggers added.
 class BASE_EXPORT MemoryDumpScheduler {
  public:
-  static MemoryDumpScheduler* GetInstance();
-
-  // Initializes the scheduler. NOT thread safe.
-  void Setup(MemoryDumpManager* mdm_,
-             scoped_refptr<SingleThreadTaskRunner> polling_task_runner);
+  MemoryDumpScheduler(
+      MemoryDumpManager* mdm_,
+      scoped_refptr<SingleThreadTaskRunner> polling_task_runner);
+  ~MemoryDumpScheduler();
 
   // Adds triggers for scheduling global dumps. Both periodic and peak triggers
   // cannot be added together. At the moment the periodic support is limited to
   // at most one periodic trigger per dump mode and peak triggers are limited to
   // at most one. All intervals should be an integeral multiple of the smallest
-  // interval specified. NOT thread safe.
+  // interval specified.
   void AddTrigger(MemoryDumpType trigger_type,
                   MemoryDumpLevelOfDetail level_of_detail,
                   uint32_t min_time_between_dumps_ms);
 
-  // Starts periodic dumps. NOT thread safe and triggers must be added before
-  // enabling.
-  void EnablePeriodicTriggerIfNeeded();
+  // Starts periodic dumps.
+  void NotifyPeriodicTriggerSupported();
 
-  // Starts polling memory total. NOT thread safe and triggers must be added
-  // before enabling.
-  void EnablePollingIfNeeded();
+  // Starts polling memory total.
+  void NotifyPollingSupported();
 
   // Resets time for triggering dump to account for minimum time between the
-  // dumps. NOT thread safe.
+  // dumps.
   void NotifyDumpTriggered();
 
-  // Disables all triggers. NOT thread safe. This should be called before
-  // polling thread is stopped to stop polling cleanly.
+  // Disables all triggers.
   void DisableAllTriggers();
 
  private:
   friend class MemoryDumpManagerTest;
-  friend class MemoryDumpSchedulerPollingTest;
   FRIEND_TEST_ALL_PREFIXES(MemoryDumpManagerTest, TestPollingOnDumpThread);
-  FRIEND_TEST_ALL_PREFIXES(MemoryDumpSchedulerPollingTest, NotifyDumpTriggered);
 
   // Helper class to schdule periodic memory dumps.
-  struct BASE_EXPORT PeriodicTriggerState {
+  struct PeriodicTriggerState {
     PeriodicTriggerState();
     ~PeriodicTriggerState();
 
@@ -81,7 +71,7 @@
     DISALLOW_COPY_AND_ASSIGN(PeriodicTriggerState);
   };
 
-  struct BASE_EXPORT PollingTriggerState {
+  struct PollingTriggerState {
     enum State {
       CONFIGURED,  // Polling trigger was added.
       ENABLED,     // Polling is running.
@@ -90,7 +80,8 @@
 
     static const uint32_t kMaxNumMemorySamples = 50;
 
-    PollingTriggerState();
+    explicit PollingTriggerState(
+        scoped_refptr<SingleThreadTaskRunner> polling_task_runner);
     ~PollingTriggerState();
 
     // Helper to clear the tracked memory totals and poll count from last dump.
@@ -99,6 +90,7 @@
     State current_state;
     MemoryDumpLevelOfDetail level_of_detail;
 
+    scoped_refptr<SingleThreadTaskRunner> polling_task_runner;
     uint32_t polling_interval_ms;
 
     // Minimum numer of polls after the last dump at which next dump can be
@@ -114,11 +106,8 @@
     DISALLOW_COPY_AND_ASSIGN(PollingTriggerState);
   };
 
-  MemoryDumpScheduler();
-  ~MemoryDumpScheduler();
-
-  // Helper to set polling disabled.
-  void DisablePollingOnPollingThread();
+  // Helper to set polling disabled on the polling thread.
+  void DisablePolling();
 
   // Periodically called by the timer.
   void RequestPeriodicGlobalDump();
@@ -140,19 +129,8 @@
 
   MemoryDumpManager* mdm_;
 
-  // Accessed on the thread of the client before enabling and only accessed on
-  // the thread that called "EnablePeriodicTriggersIfNeeded()" after enabling.
-  std::unique_ptr<PeriodicTriggerState> periodic_state_;
-
-  // Accessed on the thread of the client before enabling and only accessed on
-  // the polling thread after enabling.
-  std::unique_ptr<PollingTriggerState> polling_state_;
-
-  // Accessed on the thread of the client only.
-  scoped_refptr<SingleThreadTaskRunner> polling_task_runner_;
-
-  // True when the scheduler is setup. Accessed on the thread of client only.
-  bool is_setup_;
+  PeriodicTriggerState periodic_state_;
+  PollingTriggerState polling_state_;
 
   DISALLOW_COPY_AND_ASSIGN(MemoryDumpScheduler);
 };
diff --git a/base/trace_event/memory_dump_scheduler_unittest.cc b/base/trace_event/memory_dump_scheduler_unittest.cc
deleted file mode 100644
index 9af2a3b..0000000
--- a/base/trace_event/memory_dump_scheduler_unittest.cc
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/memory_dump_scheduler.h"
-
-#include <memory>
-
-#include "base/single_thread_task_runner.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-namespace trace_event {
-
-class MemoryDumpSchedulerPollingTest : public testing::Test {
- public:
-  static const uint32_t kMinPollsToDump = 5;
-
-  MemoryDumpSchedulerPollingTest()
-      : testing::Test(),
-        num_samples_tracked_(
-            MemoryDumpScheduler::PollingTriggerState::kMaxNumMemorySamples) {}
-
-  void SetUp() override {
-    MemoryDumpScheduler::SetPollingIntervalForTesting(1);
-    uint32_t kMinPollsToDump = 5;
-    mds_ = MemoryDumpScheduler::GetInstance();
-    mds_->Setup(nullptr, nullptr);
-    mds_->AddTrigger(MemoryDumpType::PEAK_MEMORY_USAGE,
-                     MemoryDumpLevelOfDetail::LIGHT, kMinPollsToDump);
-    mds_->polling_state_->ResetTotals();
-    mds_->polling_state_->current_state =
-        MemoryDumpScheduler::PollingTriggerState::ENABLED;
-  }
-
-  void TearDown() override {
-    mds_->polling_state_->current_state =
-        MemoryDumpScheduler::PollingTriggerState::DISABLED;
-  }
-
- protected:
-  bool ShouldTriggerDump(uint64_t total) {
-    return mds_->ShouldTriggerDump(total);
-  }
-
-  uint32_t num_samples_tracked_;
-  MemoryDumpScheduler* mds_;
-};
-
-TEST_F(MemoryDumpSchedulerPollingTest, PeakDetection) {
-  for (uint32_t i = 0; i < num_samples_tracked_ * 6; ++i) {
-    // Memory is increased in steps and dumps must be triggered at every step.
-    uint64_t total = (2 + (i / (2 * num_samples_tracked_))) * 1024 * 1204;
-    bool did_trigger = ShouldTriggerDump(total);
-    // Dumps must be triggered only at specific iterations.
-    bool should_have_triggered = i == 0;
-    should_have_triggered |=
-        (i > num_samples_tracked_) && (i % (2 * num_samples_tracked_) == 1);
-    if (should_have_triggered) {
-      ASSERT_TRUE(did_trigger) << "Dump wasn't triggered at " << i;
-    } else {
-      ASSERT_FALSE(did_trigger) << "Unexpected dump at " << i;
-    }
-  }
-}
-
-TEST_F(MemoryDumpSchedulerPollingTest, SlowGrowthDetection) {
-  for (uint32_t i = 0; i < 15; ++i) {
-    // Record 1GiB of increase in each call. Dumps are triggered with 1% w.r.t
-    // system's total memory.
-    uint64_t total = static_cast<uint64_t>(i + 1) * 1024 * 1024 * 1024;
-    bool did_trigger = ShouldTriggerDump(total);
-    bool should_have_triggered = i % kMinPollsToDump == 0;
-    if (should_have_triggered) {
-      ASSERT_TRUE(did_trigger) << "Dump wasn't triggered at " << i;
-    } else {
-      ASSERT_FALSE(did_trigger) << "Unexpected dump at " << i;
-    }
-  }
-}
-
-TEST_F(MemoryDumpSchedulerPollingTest, NotifyDumpTriggered) {
-  for (uint32_t i = 0; i < num_samples_tracked_ * 6; ++i) {
-    uint64_t total = (2 + (i / (2 * num_samples_tracked_))) * 1024 * 1204;
-    if (i % num_samples_tracked_ == 0)
-      mds_->NotifyDumpTriggered();
-    bool did_trigger = ShouldTriggerDump(total);
-    // Dumps should never be triggered since NotifyDumpTriggered() is called
-    // frequently.
-    EXPECT_NE(0u, mds_->polling_state_->last_dump_memory_total);
-    EXPECT_GT(num_samples_tracked_ - 1,
-              mds_->polling_state_->last_memory_totals_kb_index);
-    EXPECT_LT(static_cast<int64_t>(
-                  total - mds_->polling_state_->last_dump_memory_total),
-              mds_->polling_state_->memory_increase_threshold);
-    ASSERT_FALSE(did_trigger && i) << "Unexpected dump at " << i;
-  }
-}
-
-}  // namespace trace_event
-}  // namespace base
diff --git a/base/trace_event/memory_infra_background_whitelist.cc b/base/trace_event/memory_infra_background_whitelist.cc
index 746068a..ae74322 100644
--- a/base/trace_event/memory_infra_background_whitelist.cc
+++ b/base/trace_event/memory_infra_background_whitelist.cc
@@ -69,70 +69,10 @@
     "net/http_network_session_0x?/stream_factory",
     "net/sdch_manager_0x?",
     "net/ssl_session_cache",
-    "net/url_request_context",
-    "net/url_request_context/app_request",
-    "net/url_request_context/app_request/0x?",
-    "net/url_request_context/app_request/0x?/http_cache",
-    "net/url_request_context/app_request/0x?/http_cache/memory_backend",
-    "net/url_request_context/app_request/0x?/http_cache/simple_backend",
-    "net/url_request_context/app_request/0x?/http_network_session",
-    "net/url_request_context/app_request/0x?/sdch_manager",
-    "net/url_request_context/extensions",
-    "net/url_request_context/extensions/0x?",
-    "net/url_request_context/extensions/0x?/http_cache",
-    "net/url_request_context/extensions/0x?/http_cache/memory_backend",
-    "net/url_request_context/extensions/0x?/http_cache/simple_backend",
-    "net/url_request_context/extensions/0x?/http_network_session",
-    "net/url_request_context/extensions/0x?/sdch_manager",
-    "net/url_request_context/isolated_media",
-    "net/url_request_context/isolated_media/0x?",
-    "net/url_request_context/isolated_media/0x?/http_cache",
-    "net/url_request_context/isolated_media/0x?/http_cache/memory_backend",
-    "net/url_request_context/isolated_media/0x?/http_cache/simple_backend",
-    "net/url_request_context/isolated_media/0x?/http_network_session",
-    "net/url_request_context/isolated_media/0x?/sdch_manager",
-    "net/url_request_context/main",
-    "net/url_request_context/main/0x?",
-    "net/url_request_context/main/0x?/http_cache",
-    "net/url_request_context/main/0x?/http_cache/memory_backend",
-    "net/url_request_context/main/0x?/http_cache/simple_backend",
-    "net/url_request_context/main/0x?/http_network_session",
-    "net/url_request_context/main/0x?/sdch_manager",
-    "net/url_request_context/main_media",
-    "net/url_request_context/main_media/0x?",
-    "net/url_request_context/main_media/0x?/http_cache",
-    "net/url_request_context/main_media/0x?/http_cache/memory_backend",
-    "net/url_request_context/main_media/0x?/http_cache/simple_backend",
-    "net/url_request_context/main_media/0x?/http_network_session",
-    "net/url_request_context/main_media/0x?/sdch_manager",
-    "net/url_request_context/proxy",
-    "net/url_request_context/proxy/0x?",
-    "net/url_request_context/proxy/0x?/http_cache",
-    "net/url_request_context/proxy/0x?/http_cache/memory_backend",
-    "net/url_request_context/proxy/0x?/http_cache/simple_backend",
-    "net/url_request_context/proxy/0x?/http_network_session",
-    "net/url_request_context/proxy/0x?/sdch_manager",
-    "net/url_request_context/safe_browsing",
-    "net/url_request_context/safe_browsing/0x?",
-    "net/url_request_context/safe_browsing/0x?/http_cache",
-    "net/url_request_context/safe_browsing/0x?/http_cache/memory_backend",
-    "net/url_request_context/safe_browsing/0x?/http_cache/simple_backend",
-    "net/url_request_context/safe_browsing/0x?/http_network_session",
-    "net/url_request_context/safe_browsing/0x?/sdch_manager",
-    "net/url_request_context/system",
-    "net/url_request_context/system/0x?",
-    "net/url_request_context/system/0x?/http_cache",
-    "net/url_request_context/system/0x?/http_cache/memory_backend",
-    "net/url_request_context/system/0x?/http_cache/simple_backend",
-    "net/url_request_context/system/0x?/http_network_session",
-    "net/url_request_context/system/0x?/sdch_manager",
-    "net/url_request_context/unknown",
-    "net/url_request_context/unknown/0x?",
-    "net/url_request_context/unknown/0x?/http_cache",
-    "net/url_request_context/unknown/0x?/http_cache/memory_backend",
-    "net/url_request_context/unknown/0x?/http_cache/simple_backend",
-    "net/url_request_context/unknown/0x?/http_network_session",
-    "net/url_request_context/unknown/0x?/sdch_manager",
+    "net/url_request_context_0x?",
+    "net/url_request_context_0x?/http_cache",
+    "net/url_request_context_0x?/http_network_session",
+    "net/url_request_context_0x?/sdch_manager",
     "web_cache/Image_resources",
     "web_cache/CSS stylesheet_resources",
     "web_cache/Script_resources",
diff --git a/base/trace_event/memory_peak_detector.cc b/base/trace_event/memory_peak_detector.cc
deleted file mode 100644
index c361037..0000000
--- a/base/trace_event/memory_peak_detector.cc
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/memory_peak_detector.h"
-
-#include <stdint.h>
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/threading/sequenced_task_runner_handle.h"
-#include "base/time/time.h"
-#include "base/trace_event/memory_dump_provider_info.h"
-
-namespace base {
-namespace trace_event {
-
-// static
-MemoryPeakDetector* MemoryPeakDetector::GetInstance() {
-  static MemoryPeakDetector* instance = new MemoryPeakDetector();
-  return instance;
-}
-
-MemoryPeakDetector::MemoryPeakDetector()
-    : generation_(0),
-      state_(NOT_INITIALIZED),
-      polling_interval_ms_(0),
-      poll_tasks_count_for_testing_(0) {}
-
-MemoryPeakDetector::~MemoryPeakDetector() {
-  // This is hit only in tests, in which case the test is expected to TearDown()
-  // cleanly and not leave the peak detector running.
-  DCHECK_EQ(NOT_INITIALIZED, state_);
-}
-
-void MemoryPeakDetector::Setup(
-    const GetDumpProvidersFunction& get_dump_providers_function,
-    const scoped_refptr<SequencedTaskRunner>& task_runner,
-    const OnPeakDetectedCallback& on_peak_detected_callback) {
-  DCHECK(!get_dump_providers_function.is_null());
-  DCHECK(task_runner);
-  DCHECK(!on_peak_detected_callback.is_null());
-  DCHECK(state_ == NOT_INITIALIZED || state_ == DISABLED);
-  DCHECK(dump_providers_.empty());
-  get_dump_providers_function_ = get_dump_providers_function;
-  task_runner_ = task_runner;
-  on_peak_detected_callback_ = on_peak_detected_callback;
-  state_ = DISABLED;
-}
-
-void MemoryPeakDetector::TearDown() {
-  if (task_runner_) {
-    task_runner_->PostTask(
-        FROM_HERE,
-        Bind(&MemoryPeakDetector::TearDownInternal, Unretained(this)));
-  }
-  task_runner_ = nullptr;
-}
-
-void MemoryPeakDetector::Start() {
-  task_runner_->PostTask(
-      FROM_HERE, Bind(&MemoryPeakDetector::StartInternal, Unretained(this)));
-}
-
-void MemoryPeakDetector::Stop() {
-  task_runner_->PostTask(
-      FROM_HERE, Bind(&MemoryPeakDetector::StopInternal, Unretained(this)));
-}
-
-void MemoryPeakDetector::NotifyMemoryDumpProvidersChanged() {
-  // It is possible to call this before the first Setup() call, in which case
-  // we want to just make this a noop. The next Start() will fetch the MDP list.
-  if (!task_runner_)
-    return;
-  task_runner_->PostTask(
-      FROM_HERE,
-      Bind(&MemoryPeakDetector::ReloadDumpProvidersAndStartPollingIfNeeded,
-           Unretained(this)));
-}
-
-void MemoryPeakDetector::StartInternal() {
-  DCHECK_EQ(DISABLED, state_);
-  state_ = ENABLED;
-  polling_interval_ms_ = 1;  // TODO(primiano): temporary until next CL.
-
-  // If there are any dump providers available, NotifyMemoryDumpProvidersChanged
-  // will fetch them and start the polling. Otherwise this will remain in the
-  // ENABLED state and the actual polling will start on the next call to
-  // ReloadDumpProvidersAndStartPollingIfNeeded().
-  // Depending on the sandbox model, it is possible that no polling-capable dump
-  // providers will be ever available.
-  ReloadDumpProvidersAndStartPollingIfNeeded();
-}
-
-void MemoryPeakDetector::StopInternal() {
-  DCHECK_NE(NOT_INITIALIZED, state_);
-  state_ = DISABLED;
-  ++generation_;
-  dump_providers_.clear();
-}
-
-void MemoryPeakDetector::TearDownInternal() {
-  StopInternal();
-  get_dump_providers_function_.Reset();
-  on_peak_detected_callback_.Reset();
-  state_ = NOT_INITIALIZED;
-}
-
-void MemoryPeakDetector::ReloadDumpProvidersAndStartPollingIfNeeded() {
-  if (state_ == DISABLED || state_ == NOT_INITIALIZED)
-    return;  // Start() will re-fetch the MDP list later.
-
-  DCHECK((state_ == RUNNING && !dump_providers_.empty()) ||
-         (state_ == ENABLED && dump_providers_.empty()));
-
-  dump_providers_.clear();
-
-  // This is really MemoryDumpManager::GetDumpProvidersForPolling, % testing.
-  get_dump_providers_function_.Run(&dump_providers_);
-
-  if (state_ == ENABLED && !dump_providers_.empty()) {
-    // It's now time to start polling for realz.
-    state_ = RUNNING;
-    task_runner_->PostTask(FROM_HERE,
-                           Bind(&MemoryPeakDetector::PollMemoryAndDetectPeak,
-                                Unretained(this), ++generation_));
-  } else if (state_ == RUNNING && dump_providers_.empty()) {
-    // Will cause the next PollMemoryAndDetectPeak() task to early return.
-    state_ = ENABLED;
-    ++generation_;
-  }
-}
-
-void MemoryPeakDetector::PollMemoryAndDetectPeak(uint32_t expected_generation) {
-  if (state_ != RUNNING || expected_generation != generation_)
-    return;
-
-  // We should never end up in a situation where state_ == RUNNING but all dump
-  // providers are gone.
-  DCHECK(!dump_providers_.empty());
-
-  poll_tasks_count_for_testing_++;
-  uint64_t memory_total = 0;
-  for (const scoped_refptr<MemoryDumpProviderInfo>& mdp_info :
-       dump_providers_) {
-    DCHECK(mdp_info->options.is_fast_polling_supported);
-    uint64_t value = 0;
-    mdp_info->dump_provider->PollFastMemoryTotal(&value);
-    memory_total += value;
-  }
-  ignore_result(memory_total);  // TODO(primiano): temporary until next CL.
-
-  // TODO(primiano): Move actual peak detection logic from the
-  // MemoryDumpScheduler in next CLs.
-
-  SequencedTaskRunnerHandle::Get()->PostDelayedTask(
-      FROM_HERE,
-      Bind(&MemoryPeakDetector::PollMemoryAndDetectPeak, Unretained(this),
-           expected_generation),
-      TimeDelta::FromMilliseconds(polling_interval_ms_));
-}
-
-}  // namespace trace_event
-}  // namespace base
diff --git a/base/trace_event/memory_peak_detector.h b/base/trace_event/memory_peak_detector.h
deleted file mode 100644
index b914295..0000000
--- a/base/trace_event/memory_peak_detector.h
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TRACE_EVENT_MEMORY_PEAK_DETECTOR_H_
-#define BASE_TRACE_EVENT_MEMORY_PEAK_DETECTOR_H_
-
-#include <stdint.h>
-
-#include <memory>
-#include <vector>
-
-#include "base/base_export.h"
-#include "base/callback.h"
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-
-namespace base {
-
-class SequencedTaskRunner;
-
-namespace trace_event {
-
-struct MemoryDumpProviderInfo;
-
-// This class is NOT thread-safe, the caller has to ensure linearization of
-// the calls to the public methods. In any case, the public methods do NOT have
-// to be called from the |task_runner| on which the polling tasks run.
-class BASE_EXPORT MemoryPeakDetector {
- public:
-  using OnPeakDetectedCallback = RepeatingClosure;
-  using DumpProvidersList = std::vector<scoped_refptr<MemoryDumpProviderInfo>>;
-  using GetDumpProvidersFunction = RepeatingCallback<void(DumpProvidersList*)>;
-
-  enum State {
-    NOT_INITIALIZED = 0,  // Before Setup()
-    DISABLED,             // Before Start() or after Stop().
-    ENABLED,              // After Start() but no dump_providers_ are available.
-    RUNNING  // After Start(). The PollMemoryAndDetectPeak() task is scheduled.
-  };
-
-  static MemoryPeakDetector* GetInstance();
-
-  // Configures the peak detector, binding the polling tasks on the given
-  // thread. Setup() can be called several times, provided that: (1) Stop()
-  // is called; (2a) the previous task_runner is flushed or (2b) the task_runner
-  // remains the same.
-  // GetDumpProvidersFunction: is the function that will be invoked to get
-  //   an updated list of polling-capable dump providers. This is really just
-  //   MemoryDumpManager::GetDumpProvidersForPolling, but this extra level of
-  //   indirection allows easier testing.
-  // SequencedTaskRunner: the task runner where PollMemoryAndDetectPeak() will
-  //  be periodically called.
-  // OnPeakDetectedCallback: a callback that will be invoked on the
-  //   given task runner when a memory peak is detected.
-  void Setup(const GetDumpProvidersFunction&,
-             const scoped_refptr<SequencedTaskRunner>&,
-             const OnPeakDetectedCallback&);
-
-  // Releases the |task_runner_| and the bound callbacks.
-  void TearDown();
-
-  // This posts a task onto the passed task runner which refreshes the list of
-  // dump providers via the GetDumpProvidersFunction. If at least one dump
-  // provider is available, this starts immediately polling on the task runner.
-  // If not, the detector remains in the ENABLED state and will start polling
-  // automatically (i.e. without requiring another call to Start()) on the
-  // next call to NotifyMemoryDumpProvidersChanged().
-  void Start();
-
-  // Stops the polling on the task runner (if was active at all). This doesn't
-  // wait for the task runner to drain pending tasks, so it is possible that
-  // a polling will happen concurrently (or in the immediate future) with the
-  // Stop() call. It is responsibility of the caller to drain or synchronize
-  // with the task runner.
-  void Stop();
-
-  // Used by MemoryDumpManager to notify that the list of polling-capable dump
-  // providers has changed. The peak detector will reload the list on the next
-  // polling task. This function can be called before Setup(), in which
-  // case will be just a no-op.
-  void NotifyMemoryDumpProvidersChanged();
-
- private:
-  friend class MemoryPeakDetectorTest;
-
-  MemoryPeakDetector();
-  ~MemoryPeakDetector();
-
-  // All these methods are always called on the |task_runner_|.
-  void StartInternal();
-  void StopInternal();
-  void TearDownInternal();
-  void ReloadDumpProvidersAndStartPollingIfNeeded();
-  void PollMemoryAndDetectPeak(uint32_t expected_generation);
-
-  // It is safe to call these testing methods only on the |task_runner_|.
-  State state_for_testing() const { return state_; }
-  uint32_t poll_tasks_count_for_testing() const {
-    return poll_tasks_count_for_testing_;
-  }
-
-  // The task runner where all the internal calls are posted onto. This field
-  // must be NOT be accessed by the tasks posted on the |task_runner_| because
-  // there might still be outstanding tasks on the |task_runner_| while this
-  // refptr is reset. This can only be safely accessed by the public methods
-  // above, which the client of this class is supposed to call sequentially.
-  scoped_refptr<SequencedTaskRunner> task_runner_;
-
-  // After the Setup() call, the fields below, must be accessed only from
-  // the |task_runner_|.
-
-  // Bound function to get an updated list of polling-capable dump providers.
-  GetDumpProvidersFunction get_dump_providers_function_;
-
-  // The callback to invoke when peaks are detected.
-  OnPeakDetectedCallback on_peak_detected_callback_;
-
-  // List of polling-aware dump providers to invoke upon each poll.
-  DumpProvidersList dump_providers_;
-
-  // The generation is incremented every time the |state_| is changed and causes
-  // PollMemoryAndDetectPeak() to early out if the posted task doesn't match the
-  // most recent |generation_|. This allows to drop on the floor outstanding
-  // PostDelayedTask that refer to an old sequence that was later Stop()-ed or
-  // disabled because of NotifyMemoryDumpProvidersChanged().
-  uint32_t generation_;
-
-  State state_;
-  uint32_t polling_interval_ms_;
-  uint32_t poll_tasks_count_for_testing_;
-
-  DISALLOW_COPY_AND_ASSIGN(MemoryPeakDetector);
-};
-
-}  // namespace trace_event
-}  // namespace base
-
-#endif  // BASE_TRACE_EVENT_MEMORY_PEAK_DETECTOR_H_
diff --git a/base/trace_event/memory_peak_detector_unittest.cc b/base/trace_event/memory_peak_detector_unittest.cc
deleted file mode 100644
index 9a9b922..0000000
--- a/base/trace_event/memory_peak_detector_unittest.cc
+++ /dev/null
@@ -1,381 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/memory_peak_detector.h"
-
-#include <memory>
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/run_loop.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/test/test_timeouts.h"
-#include "base/threading/platform_thread.h"
-#include "base/threading/thread.h"
-#include "base/threading/thread_task_runner_handle.h"
-#include "base/trace_event/memory_dump_provider.h"
-#include "base/trace_event/memory_dump_provider_info.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using ::testing::_;
-using ::testing::Invoke;
-using ::testing::Return;
-
-namespace base {
-namespace trace_event {
-
-namespace {
-
-class MockMemoryDumpProvider : public MemoryDumpProvider {
- public:
-  bool OnMemoryDump(const MemoryDumpArgs&, ProcessMemoryDump*) override {
-    NOTREACHED();
-    return true;
-  }
-
-  MOCK_METHOD1(PollFastMemoryTotal, void(uint64_t*));
-};
-
-// Wrapper to use gmock on a callback.
-struct OnPeakDetectedWrapper {
-  MOCK_METHOD0(OnPeak, void());
-};
-
-}  // namespace
-
-class MemoryPeakDetectorTest : public testing::Test {
- public:
-  struct FriendDeleter {
-    void operator()(MemoryPeakDetector* inst) { delete inst; }
-  };
-
-  MemoryPeakDetectorTest() : testing::Test() {}
-
-  std::unique_ptr<MemoryPeakDetector, FriendDeleter> NewInstance() {
-    return std::unique_ptr<MemoryPeakDetector, FriendDeleter>(
-        new MemoryPeakDetector());
-  }
-
-  void RestartThreadAndReinitializePeakDetector() {
-    bg_thread_.reset(new Thread("Peak Detector Test Thread"));
-    bg_thread_->Start();
-    peak_detector_ = NewInstance();
-    peak_detector_->Setup(
-        Bind(&MemoryPeakDetectorTest::MockGetDumpProviders, Unretained(this)),
-        bg_thread_->task_runner(),
-        Bind(&OnPeakDetectedWrapper::OnPeak, Unretained(&on_peak_callback_)));
-  }
-
-  void SetUp() override {
-    get_mdp_call_count_ = 0;
-    RestartThreadAndReinitializePeakDetector();
-  }
-
-  void TearDown() override {
-    peak_detector_->TearDown();
-    bg_thread_->FlushForTesting();
-    EXPECT_EQ(MemoryPeakDetector::NOT_INITIALIZED, GetPeakDetectorState());
-    dump_providers_.clear();
-  }
-
-  // Calls MemoryPeakDetector::state_for_testing() on the bg thread and returns
-  // the result on the current thread.
-  MemoryPeakDetector::State GetPeakDetectorState() {
-    WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
-                      WaitableEvent::InitialState::NOT_SIGNALED);
-    MemoryPeakDetector::State res = MemoryPeakDetector::NOT_INITIALIZED;
-    auto get_fn = [](MemoryPeakDetector* peak_detector, WaitableEvent* evt,
-                     MemoryPeakDetector::State* res) {
-      *res = peak_detector->state_for_testing();
-      evt->Signal();
-    };
-    bg_thread_->task_runner()->PostTask(
-        FROM_HERE, Bind(get_fn, Unretained(&*peak_detector_), Unretained(&evt),
-                        Unretained(&res)));
-    evt.Wait();
-    return res;
-  }
-
-  // Calls MemoryPeakDetector::poll_tasks_count_for_testing() on the bg thread
-  // and returns the result on the current thread.
-  uint32_t GetNumPollingTasksRan() {
-    uint32_t res = 0;
-    auto get_fn = [](MemoryPeakDetector* peak_detector, WaitableEvent* evt,
-                     uint32_t* res) {
-      *res = peak_detector->poll_tasks_count_for_testing();
-      evt->Signal();
-    };
-
-    WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
-                      WaitableEvent::InitialState::NOT_SIGNALED);
-    bg_thread_->task_runner()->PostTask(
-        FROM_HERE, Bind(get_fn, Unretained(&*peak_detector_), Unretained(&evt),
-                        Unretained(&res)));
-    evt.Wait();
-    return res;
-  }
-
-  // Called on the |bg_thread_|.
-  void MockGetDumpProviders(MemoryPeakDetector::DumpProvidersList* mdps) {
-    get_mdp_call_count_++;
-    *mdps = dump_providers_;
-  }
-
-  uint32_t GetNumGetDumpProvidersCalls() {
-    bg_thread_->FlushForTesting();
-    return get_mdp_call_count_;
-  }
-
-  scoped_refptr<MemoryDumpProviderInfo> CreateMockDumpProvider() {
-    std::unique_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider());
-    MemoryDumpProvider::Options opt;
-    opt.is_fast_polling_supported = true;
-    scoped_refptr<MemoryDumpProviderInfo> mdp_info(
-        new MemoryDumpProviderInfo(mdp.get(), "Mock MDP", nullptr, opt, false));
-
-    // The |mdp| instance will be destroyed together with the |mdp_info|.
-    mdp_info->owned_dump_provider = std::move(mdp);
-    return mdp_info;
-  }
-
-  static MockMemoryDumpProvider& GetMockMDP(
-      const scoped_refptr<MemoryDumpProviderInfo>& mdp_info) {
-    return *static_cast<MockMemoryDumpProvider*>(mdp_info->dump_provider);
-  }
-
- protected:
-  MemoryPeakDetector::DumpProvidersList dump_providers_;
-  uint32_t get_mdp_call_count_;
-  std::unique_ptr<MemoryPeakDetector, FriendDeleter> peak_detector_;
-  std::unique_ptr<Thread> bg_thread_;
-  OnPeakDetectedWrapper on_peak_callback_;
-};
-
-TEST_F(MemoryPeakDetectorTest, GetDumpProvidersFunctionCalled) {
-  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
-  peak_detector_->Start();
-  EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
-  EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
-
-  peak_detector_->Stop();
-  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
-  EXPECT_EQ(0u, GetNumPollingTasksRan());
-}
-
-TEST_F(MemoryPeakDetectorTest, NotifyBeforeInitialize) {
-  peak_detector_->TearDown();
-
-  WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
-                    WaitableEvent::InitialState::NOT_SIGNALED);
-  scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
-  EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
-      .WillRepeatedly(Invoke([&evt](uint64_t*) { evt.Signal(); }));
-  dump_providers_.push_back(mdp);
-  peak_detector_->NotifyMemoryDumpProvidersChanged();
-  EXPECT_EQ(MemoryPeakDetector::NOT_INITIALIZED, GetPeakDetectorState());
-  RestartThreadAndReinitializePeakDetector();
-
-  peak_detector_->Start();
-  EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
-  evt.Wait();  // Wait for a PollFastMemoryTotal() call.
-
-  peak_detector_->Stop();
-  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
-  EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
-  EXPECT_GE(GetNumPollingTasksRan(), 1u);
-}
-
-TEST_F(MemoryPeakDetectorTest, DoubleStop) {
-  peak_detector_->Start();
-  EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
-
-  peak_detector_->Stop();
-  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
-
-  peak_detector_->Stop();
-  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
-
-  EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
-  EXPECT_EQ(0u, GetNumPollingTasksRan());
-}
-
-TEST_F(MemoryPeakDetectorTest, OneDumpProviderRegisteredBeforeStart) {
-  WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
-                    WaitableEvent::InitialState::NOT_SIGNALED);
-  scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
-  EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
-      .WillRepeatedly(Invoke([&evt](uint64_t*) { evt.Signal(); }));
-  dump_providers_.push_back(mdp);
-
-  peak_detector_->Start();
-  evt.Wait();  // Signaled when PollFastMemoryTotal() is called on the MockMDP.
-  EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
-
-  peak_detector_->Stop();
-  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
-  EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
-  EXPECT_GT(GetNumPollingTasksRan(), 0u);
-}
-
-TEST_F(MemoryPeakDetectorTest, ReInitializeAndRebindToNewThread) {
-  WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
-                    WaitableEvent::InitialState::NOT_SIGNALED);
-  scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
-  EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
-      .WillRepeatedly(Invoke([&evt](uint64_t*) { evt.Signal(); }));
-  dump_providers_.push_back(mdp);
-
-  for (int i = 0; i < 5; ++i) {
-    evt.Reset();
-    peak_detector_->Start();
-    evt.Wait();  // Wait for a PollFastMemoryTotal() call.
-    // Check that calling TearDown implicitly does a Stop().
-    peak_detector_->TearDown();
-
-    // Reinitialize and re-bind to a new task runner.
-    RestartThreadAndReinitializePeakDetector();
-  }
-}
-
-TEST_F(MemoryPeakDetectorTest, OneDumpProviderRegisteredOutOfBand) {
-  peak_detector_->Start();
-  EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
-  EXPECT_EQ(1u, GetNumGetDumpProvidersCalls());
-
-  // Check that no poll tasks are posted before any dump provider is registered.
-  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
-  EXPECT_EQ(0u, GetNumPollingTasksRan());
-
-  // Registed the MDP After Start() has been issued and expect that the
-  // PeakDetector transitions ENABLED -> RUNNING on the next
-  // NotifyMemoryDumpProvidersChanged() call.
-  WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
-                    WaitableEvent::InitialState::NOT_SIGNALED);
-  scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
-  EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
-      .WillRepeatedly(Invoke([&evt](uint64_t*) { evt.Signal(); }));
-  dump_providers_.push_back(mdp);
-  peak_detector_->NotifyMemoryDumpProvidersChanged();
-
-  evt.Wait();  // Signaled when PollFastMemoryTotal() is called on the MockMDP.
-  EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
-  EXPECT_EQ(2u, GetNumGetDumpProvidersCalls());
-
-  // Now simulate the unregisration and expect that the PeakDetector transitions
-  // back to ENABLED.
-  dump_providers_.clear();
-  peak_detector_->NotifyMemoryDumpProvidersChanged();
-  EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
-  EXPECT_EQ(3u, GetNumGetDumpProvidersCalls());
-  uint32_t num_poll_tasks = GetNumPollingTasksRan();
-  EXPECT_GT(num_poll_tasks, 0u);
-
-  // At this point, no more polling tasks should be posted.
-  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
-  peak_detector_->Stop();
-  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
-  EXPECT_EQ(num_poll_tasks, GetNumPollingTasksRan());
-}
-
-// Test that a sequence of Start()/Stop() back-to-back doesn't end up creating
-// several outstanding timer tasks and instead respects the polling_interval_ms.
-TEST_F(MemoryPeakDetectorTest, StartStopQuickly) {
-  WaitableEvent evt(WaitableEvent::ResetPolicy::MANUAL,
-                    WaitableEvent::InitialState::NOT_SIGNALED);
-  scoped_refptr<MemoryDumpProviderInfo> mdp = CreateMockDumpProvider();
-  dump_providers_.push_back(mdp);
-  const uint32_t kNumPolls = 20;
-  uint32_t polls_done = 0;
-  EXPECT_CALL(GetMockMDP(mdp), PollFastMemoryTotal(_))
-      .WillRepeatedly(Invoke([&polls_done, &evt, kNumPolls](uint64_t*) {
-        if (++polls_done == kNumPolls)
-          evt.Signal();
-      }));
-
-  const TimeTicks tstart = TimeTicks::Now();
-  for (int i = 0; i < 5; i++) {
-    peak_detector_->Start();
-    peak_detector_->Stop();
-  }
-  peak_detector_->Start();
-  EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
-  evt.Wait();  // Wait for kNumPolls.
-  const double time_ms = (TimeTicks::Now() - tstart).InMillisecondsF();
-
-  // TODO(primiano): this will become config.polling_interval_ms in the next CL.
-  const uint32_t polling_interval_ms = 1;
-  EXPECT_GE(time_ms, kNumPolls * polling_interval_ms);
-  peak_detector_->Stop();
-}
-
-TEST_F(MemoryPeakDetectorTest, RegisterAndUnregisterTwoDumpProviders) {
-  WaitableEvent evt1(WaitableEvent::ResetPolicy::MANUAL,
-                     WaitableEvent::InitialState::NOT_SIGNALED);
-  WaitableEvent evt2(WaitableEvent::ResetPolicy::MANUAL,
-                     WaitableEvent::InitialState::NOT_SIGNALED);
-  scoped_refptr<MemoryDumpProviderInfo> mdp1 = CreateMockDumpProvider();
-  scoped_refptr<MemoryDumpProviderInfo> mdp2 = CreateMockDumpProvider();
-  EXPECT_CALL(GetMockMDP(mdp1), PollFastMemoryTotal(_))
-      .WillRepeatedly(Invoke([&evt1](uint64_t*) { evt1.Signal(); }));
-  EXPECT_CALL(GetMockMDP(mdp2), PollFastMemoryTotal(_))
-      .WillRepeatedly(Invoke([&evt2](uint64_t*) { evt2.Signal(); }));
-
-  // Register only one MDP and start the detector.
-  dump_providers_.push_back(mdp1);
-  peak_detector_->Start();
-  EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
-
-  // Wait for one poll task and then register also the other one.
-  evt1.Wait();
-  dump_providers_.push_back(mdp2);
-  peak_detector_->NotifyMemoryDumpProvidersChanged();
-  evt2.Wait();
-  EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
-
-  // Now unregister the first MDP and check that everything is still running.
-  dump_providers_.erase(dump_providers_.begin());
-  peak_detector_->NotifyMemoryDumpProvidersChanged();
-  EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
-
-  // Now unregister both and check that the detector goes to idle.
-  dump_providers_.clear();
-  peak_detector_->NotifyMemoryDumpProvidersChanged();
-  EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
-
-  // Now re-register both and check that the detector re-activates posting
-  // new polling tasks.
-  uint32_t num_poll_tasks = GetNumPollingTasksRan();
-  evt1.Reset();
-  evt2.Reset();
-  dump_providers_.push_back(mdp1);
-  dump_providers_.push_back(mdp2);
-  peak_detector_->NotifyMemoryDumpProvidersChanged();
-  evt1.Wait();
-  evt2.Wait();
-  EXPECT_EQ(MemoryPeakDetector::RUNNING, GetPeakDetectorState());
-  EXPECT_GT(GetNumPollingTasksRan(), num_poll_tasks);
-
-  // Stop everything, tear down the MDPs, restart the detector and check that
-  // it detector doesn't accidentally try to re-access them.
-  peak_detector_->Stop();
-  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
-  dump_providers_.clear();
-  mdp1 = nullptr;
-  mdp2 = nullptr;
-
-  num_poll_tasks = GetNumPollingTasksRan();
-  peak_detector_->Start();
-  EXPECT_EQ(MemoryPeakDetector::ENABLED, GetPeakDetectorState());
-  PlatformThread::Sleep(TestTimeouts::tiny_timeout());
-
-  peak_detector_->Stop();
-  EXPECT_EQ(MemoryPeakDetector::DISABLED, GetPeakDetectorState());
-  EXPECT_EQ(num_poll_tasks, GetNumPollingTasksRan());
-
-  EXPECT_EQ(6u, GetNumGetDumpProvidersCalls());
-}
-
-}  // namespace trace_event
-}  // namespace base
diff --git a/base/trace_event/trace_config.cc b/base/trace_event/trace_config.cc
index 7ee9a4a..36de107 100644
--- a/base/trace_event/trace_config.cc
+++ b/base/trace_event/trace_config.cc
@@ -11,7 +11,11 @@
 #include "base/json/json_reader.h"
 #include "base/json/json_writer.h"
 #include "base/memory/ptr_util.h"
+#include "base/strings/pattern.h"
 #include "base/strings/string_split.h"
+#include "base/strings/string_tokenizer.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
 #include "base/trace_event/memory_dump_manager.h"
 #include "base/trace_event/memory_dump_request_args.h"
 #include "base/trace_event/trace_event.h"
@@ -33,6 +37,11 @@
 const char kRecordModeParam[] = "record_mode";
 const char kEnableSystraceParam[] = "enable_systrace";
 const char kEnableArgumentFilterParam[] = "enable_argument_filter";
+const char kIncludedCategoriesParam[] = "included_categories";
+const char kExcludedCategoriesParam[] = "excluded_categories";
+const char kSyntheticDelaysParam[] = "synthetic_delays";
+
+const char kSyntheticDelayCategoryFilterPrefix[] = "DELAY(";
 
 // String parameters that is used to parse memory dump config in trace config
 // string.
@@ -139,36 +148,27 @@
     return *this;
 
   predicate_name_ = rhs.predicate_name_;
-  category_filter_ = rhs.category_filter_;
-
+  included_categories_ = rhs.included_categories_;
+  excluded_categories_ = rhs.excluded_categories_;
   if (rhs.args_)
     args_ = rhs.args_->CreateDeepCopy();
 
   return *this;
 }
 
-void TraceConfig::EventFilterConfig::InitializeFromConfigDict(
-    const base::DictionaryValue* event_filter) {
-  category_filter_.InitializeFromConfigDict(*event_filter);
-
-  const base::DictionaryValue* args_dict = nullptr;
-  if (event_filter->GetDictionary(kFilterArgsParam, &args_dict))
-    args_ = args_dict->CreateDeepCopy();
+void TraceConfig::EventFilterConfig::AddIncludedCategory(
+    const std::string& category) {
+  included_categories_.push_back(category);
 }
 
-void TraceConfig::EventFilterConfig::SetCategoryFilter(
-    const TraceConfigCategoryFilter& category_filter) {
-  category_filter_ = category_filter;
+void TraceConfig::EventFilterConfig::AddExcludedCategory(
+    const std::string& category) {
+  excluded_categories_.push_back(category);
 }
 
-void TraceConfig::EventFilterConfig::ToDict(
-    DictionaryValue* filter_dict) const {
-  filter_dict->SetString(kFilterPredicateParam, predicate_name());
-
-  category_filter_.ToDict(filter_dict);
-
-  if (args_)
-    filter_dict->Set(kFilterArgsParam, args_->CreateDeepCopy());
+void TraceConfig::EventFilterConfig::SetArgs(
+    std::unique_ptr<base::DictionaryValue> args) {
+  args_ = std::move(args);
 }
 
 bool TraceConfig::EventFilterConfig::GetArgAsSet(
@@ -186,8 +186,27 @@
 }
 
 bool TraceConfig::EventFilterConfig::IsCategoryGroupEnabled(
-    const StringPiece& category_group_name) const {
-  return category_filter_.IsCategoryGroupEnabled(category_group_name);
+    const char* category_group_name) const {
+  CStringTokenizer category_group_tokens(
+      category_group_name, category_group_name + strlen(category_group_name),
+      ",");
+  while (category_group_tokens.GetNext()) {
+    std::string category_group_token = category_group_tokens.token();
+
+    for (const auto& excluded_category : excluded_categories_) {
+      if (base::MatchPattern(category_group_token, excluded_category)) {
+        return false;
+      }
+    }
+
+    for (const auto& included_category : included_categories_) {
+      if (base::MatchPattern(category_group_token, included_category)) {
+        return true;
+      }
+    }
+  }
+
+  return false;
 }
 
 TraceConfig::TraceConfig() {
@@ -236,8 +255,11 @@
     : record_mode_(tc.record_mode_),
       enable_systrace_(tc.enable_systrace_),
       enable_argument_filter_(tc.enable_argument_filter_),
-      category_filter_(tc.category_filter_),
       memory_dump_config_(tc.memory_dump_config_),
+      included_categories_(tc.included_categories_),
+      disabled_categories_(tc.disabled_categories_),
+      excluded_categories_(tc.excluded_categories_),
+      synthetic_delays_(tc.synthetic_delays_),
       event_filters_(tc.event_filters_) {}
 
 TraceConfig::~TraceConfig() {
@@ -250,14 +272,17 @@
   record_mode_ = rhs.record_mode_;
   enable_systrace_ = rhs.enable_systrace_;
   enable_argument_filter_ = rhs.enable_argument_filter_;
-  category_filter_ = rhs.category_filter_;
   memory_dump_config_ = rhs.memory_dump_config_;
+  included_categories_ = rhs.included_categories_;
+  disabled_categories_ = rhs.disabled_categories_;
+  excluded_categories_ = rhs.excluded_categories_;
+  synthetic_delays_ = rhs.synthetic_delays_;
   event_filters_ = rhs.event_filters_;
   return *this;
 }
 
 const TraceConfig::StringList& TraceConfig::GetSyntheticDelayValues() const {
-  return category_filter_.synthetic_delays();
+  return synthetic_delays_;
 }
 
 std::string TraceConfig::ToString() const {
@@ -273,14 +298,69 @@
 }
 
 std::string TraceConfig::ToCategoryFilterString() const {
-  return category_filter_.ToFilterString();
+  std::string filter_string;
+  WriteCategoryFilterString(included_categories_, &filter_string, true);
+  WriteCategoryFilterString(disabled_categories_, &filter_string, true);
+  WriteCategoryFilterString(excluded_categories_, &filter_string, false);
+  WriteCategoryFilterString(synthetic_delays_, &filter_string);
+  return filter_string;
 }
 
 bool TraceConfig::IsCategoryGroupEnabled(
-    const StringPiece& category_group_name) const {
+    const char* category_group_name) const {
   // TraceLog should call this method only as part of enabling/disabling
   // categories.
-  return category_filter_.IsCategoryGroupEnabled(category_group_name);
+
+  bool had_enabled_by_default = false;
+  DCHECK(category_group_name);
+  std::string category_group_name_str = category_group_name;
+  StringTokenizer category_group_tokens(category_group_name_str, ",");
+  while (category_group_tokens.GetNext()) {
+    std::string category_group_token = category_group_tokens.token();
+    // Don't allow empty tokens, nor tokens with leading or trailing space.
+    DCHECK(!TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+               category_group_token))
+        << "Disallowed category string";
+    if (IsCategoryEnabled(category_group_token.c_str()))
+      return true;
+
+    if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*")))
+      had_enabled_by_default = true;
+  }
+  // Do a second pass to check for explicitly disabled categories
+  // (those explicitly enabled have priority due to first pass).
+  category_group_tokens.Reset();
+  bool category_group_disabled = false;
+  while (category_group_tokens.GetNext()) {
+    std::string category_group_token = category_group_tokens.token();
+    for (const std::string& category : excluded_categories_) {
+      if (MatchPattern(category_group_token, category)) {
+        // Current token of category_group_name is present in excluded_list.
+        // Flag the exclusion and proceed further to check if any of the
+        // remaining categories of category_group_name is not present in the
+        // excluded_ list.
+        category_group_disabled = true;
+        break;
+      }
+      // One of the category of category_group_name is not present in
+      // excluded_ list. So, if it's not a disabled-by-default category,
+      // it has to be included_ list. Enable the category_group_name
+      // for recording.
+      if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*"))) {
+        category_group_disabled = false;
+      }
+    }
+    // One of the categories present in category_group_name is not present in
+    // excluded_ list. Implies this category_group_name group can be enabled
+    // for recording, since one of its groups is enabled for recording.
+    if (!category_group_disabled)
+      break;
+  }
+  // If the category group is not excluded, and there are no included patterns
+  // we consider this category group enabled, as long as it had categories
+  // other than disabled-by-default.
+  return !category_group_disabled && had_enabled_by_default &&
+         included_categories_.empty();
 }
 
 void TraceConfig::Merge(const TraceConfig& config) {
@@ -291,10 +371,28 @@
                 << "set of options.";
   }
 
-  category_filter_.Merge(config.category_filter_);
+  // Keep included patterns only if both filters have an included entry.
+  // Otherwise, one of the filter was specifying "*" and we want to honor the
+  // broadest filter.
+  if (HasIncludedPatterns() && config.HasIncludedPatterns()) {
+    included_categories_.insert(included_categories_.end(),
+                                config.included_categories_.begin(),
+                                config.included_categories_.end());
+  } else {
+    included_categories_.clear();
+  }
 
   memory_dump_config_.Merge(config.memory_dump_config_);
 
+  disabled_categories_.insert(disabled_categories_.end(),
+                              config.disabled_categories_.begin(),
+                              config.disabled_categories_.end());
+  excluded_categories_.insert(excluded_categories_.end(),
+                              config.excluded_categories_.begin(),
+                              config.excluded_categories_.end());
+  synthetic_delays_.insert(synthetic_delays_.end(),
+                           config.synthetic_delays_.begin(),
+                           config.synthetic_delays_.end());
   event_filters_.insert(event_filters_.end(), config.event_filters().begin(),
                         config.event_filters().end());
 }
@@ -303,7 +401,10 @@
   record_mode_ = RECORD_UNTIL_FULL;
   enable_systrace_ = false;
   enable_argument_filter_ = false;
-  category_filter_.Clear();
+  included_categories_.clear();
+  disabled_categories_.clear();
+  excluded_categories_.clear();
+  synthetic_delays_.clear();
   memory_dump_config_.Clear();
   event_filters_.clear();
 }
@@ -334,13 +435,19 @@
   enable_argument_filter_ =
       dict.GetBoolean(kEnableArgumentFilterParam, &val) ? val : false;
 
-  category_filter_.InitializeFromConfigDict(dict);
+  const ListValue* category_list = nullptr;
+  if (dict.GetList(kIncludedCategoriesParam, &category_list))
+    SetCategoriesFromIncludedList(*category_list);
+  if (dict.GetList(kExcludedCategoriesParam, &category_list))
+    SetCategoriesFromExcludedList(*category_list);
+  if (dict.GetList(kSyntheticDelaysParam, &category_list))
+    SetSyntheticDelaysFromList(*category_list);
 
   const base::ListValue* category_event_filters = nullptr;
   if (dict.GetList(kEventFiltersParam, &category_event_filters))
     SetEventFiltersFromConfigList(*category_event_filters);
 
-  if (category_filter_.IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
+  if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
     // If dump triggers not set, the client is using the legacy with just
     // category enabled. So, use the default periodic dump config.
     const DictionaryValue* memory_dump_config = nullptr;
@@ -361,8 +468,37 @@
 
 void TraceConfig::InitializeFromStrings(StringPiece category_filter_string,
                                         StringPiece trace_options_string) {
-  if (!category_filter_string.empty())
-    category_filter_.InitializeFromString(category_filter_string);
+  if (!category_filter_string.empty()) {
+    std::vector<std::string> split = SplitString(
+        category_filter_string, ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+    for (const std::string& category : split) {
+      // Ignore empty categories.
+      if (category.empty())
+        continue;
+      // Synthetic delays are of the form 'DELAY(delay;option;option;...)'.
+      if (StartsWith(category, kSyntheticDelayCategoryFilterPrefix,
+                     CompareCase::SENSITIVE) &&
+          category.back() == ')') {
+        std::string synthetic_category = category.substr(
+            strlen(kSyntheticDelayCategoryFilterPrefix),
+            category.size() - strlen(kSyntheticDelayCategoryFilterPrefix) - 1);
+        size_t name_length = synthetic_category.find(';');
+        if (name_length != std::string::npos && name_length > 0 &&
+            name_length != synthetic_category.size() - 1) {
+          synthetic_delays_.push_back(synthetic_category);
+        }
+      } else if (category.front() == '-') {
+        // Excluded categories start with '-'.
+        // Remove '-' from category string.
+        excluded_categories_.push_back(category.substr(1));
+      } else if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
+                                  TRACE_DISABLED_BY_DEFAULT("")) == 0) {
+        disabled_categories_.push_back(category);
+      } else {
+        included_categories_.push_back(category);
+      }
+    }
+  }
 
   record_mode_ = RECORD_UNTIL_FULL;
   enable_systrace_ = false;
@@ -387,11 +523,64 @@
     }
   }
 
-  if (category_filter_.IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
+  if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
     SetDefaultMemoryDumpConfig();
   }
 }
 
+void TraceConfig::SetCategoriesFromIncludedList(
+    const ListValue& included_list) {
+  included_categories_.clear();
+  for (size_t i = 0; i < included_list.GetSize(); ++i) {
+    std::string category;
+    if (!included_list.GetString(i, &category))
+      continue;
+    if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
+                         TRACE_DISABLED_BY_DEFAULT("")) == 0) {
+      disabled_categories_.push_back(category);
+    } else {
+      included_categories_.push_back(category);
+    }
+  }
+}
+
+void TraceConfig::SetCategoriesFromExcludedList(
+    const ListValue& excluded_list) {
+  excluded_categories_.clear();
+  for (size_t i = 0; i < excluded_list.GetSize(); ++i) {
+    std::string category;
+    if (excluded_list.GetString(i, &category))
+      excluded_categories_.push_back(category);
+  }
+}
+
+void TraceConfig::SetSyntheticDelaysFromList(const ListValue& list) {
+  synthetic_delays_.clear();
+  for (size_t i = 0; i < list.GetSize(); ++i) {
+    std::string delay;
+    if (!list.GetString(i, &delay))
+      continue;
+    // Synthetic delays are of the form "delay;option;option;...".
+    size_t name_length = delay.find(';');
+    if (name_length != std::string::npos && name_length > 0 &&
+        name_length != delay.size() - 1) {
+      synthetic_delays_.push_back(delay);
+    }
+  }
+}
+
+void TraceConfig::AddCategoryToDict(DictionaryValue* dict,
+                                    const char* param,
+                                    const StringList& categories) const {
+  if (categories.empty())
+    return;
+
+  auto list = MakeUnique<ListValue>();
+  for (const std::string& category : categories)
+    list->AppendString(category);
+  dict->Set(param, std::move(list));
+}
+
 void TraceConfig::SetMemoryDumpConfigFromConfigDict(
     const DictionaryValue& memory_dump_config) {
   // Set allowed dump modes.
@@ -484,7 +673,29 @@
         << "Invalid predicate name in category event filter.";
 
     EventFilterConfig new_config(predicate_name);
-    new_config.InitializeFromConfigDict(event_filter);
+    const base::ListValue* included_list = nullptr;
+    CHECK(event_filter->GetList(kIncludedCategoriesParam, &included_list))
+        << "Missing included_categories in category event filter.";
+
+    for (size_t i = 0; i < included_list->GetSize(); ++i) {
+      std::string category;
+      if (included_list->GetString(i, &category))
+        new_config.AddIncludedCategory(category);
+    }
+
+    const base::ListValue* excluded_list = nullptr;
+    if (event_filter->GetList(kExcludedCategoriesParam, &excluded_list)) {
+      for (size_t i = 0; i < excluded_list->GetSize(); ++i) {
+        std::string category;
+        if (excluded_list->GetString(i, &category))
+          new_config.AddExcludedCategory(category);
+      }
+    }
+
+    const base::DictionaryValue* args_dict = nullptr;
+    if (event_filter->GetDictionary(kFilterArgsParam, &args_dict))
+      new_config.SetArgs(args_dict->CreateDeepCopy());
+
     event_filters_.push_back(new_config);
   }
 }
@@ -511,20 +722,50 @@
   dict->SetBoolean(kEnableSystraceParam, enable_systrace_);
   dict->SetBoolean(kEnableArgumentFilterParam, enable_argument_filter_);
 
-  category_filter_.ToDict(dict.get());
+  StringList categories(included_categories_);
+  categories.insert(categories.end(),
+                    disabled_categories_.begin(),
+                    disabled_categories_.end());
+  AddCategoryToDict(dict.get(), kIncludedCategoriesParam, categories);
+  AddCategoryToDict(dict.get(), kExcludedCategoriesParam, excluded_categories_);
+  AddCategoryToDict(dict.get(), kSyntheticDelaysParam, synthetic_delays_);
 
   if (!event_filters_.empty()) {
     std::unique_ptr<base::ListValue> filter_list(new base::ListValue());
     for (const EventFilterConfig& filter : event_filters_) {
       std::unique_ptr<base::DictionaryValue> filter_dict(
           new base::DictionaryValue());
-      filter.ToDict(filter_dict.get());
+      filter_dict->SetString(kFilterPredicateParam, filter.predicate_name());
+
+      std::unique_ptr<base::ListValue> included_categories_list(
+          new base::ListValue());
+      for (const std::string& included_category : filter.included_categories())
+        included_categories_list->AppendString(included_category);
+
+      filter_dict->Set(kIncludedCategoriesParam,
+                       std::move(included_categories_list));
+
+      if (!filter.excluded_categories().empty()) {
+        std::unique_ptr<base::ListValue> excluded_categories_list(
+            new base::ListValue());
+        for (const std::string& excluded_category :
+             filter.excluded_categories())
+          excluded_categories_list->AppendString(excluded_category);
+
+        filter_dict->Set(kExcludedCategoriesParam,
+                         std::move(excluded_categories_list));
+      }
+
+      if (filter.filter_args())
+        filter_dict->Set(kFilterArgsParam,
+                         filter.filter_args()->CreateDeepCopy());
+
       filter_list->Append(std::move(filter_dict));
     }
     dict->Set(kEventFiltersParam, std::move(filter_list));
   }
 
-  if (category_filter_.IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
+  if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
     auto allowed_modes = MakeUnique<ListValue>();
     for (auto dump_mode : memory_dump_config_.allowed_dump_modes)
       allowed_modes->AppendString(MemoryDumpLevelOfDetailToString(dump_mode));
@@ -588,5 +829,59 @@
   return ret;
 }
 
+void TraceConfig::WriteCategoryFilterString(const StringList& values,
+                                            std::string* out,
+                                            bool included) const {
+  bool prepend_comma = !out->empty();
+  int token_cnt = 0;
+  for (const std::string& category : values) {
+    if (token_cnt > 0 || prepend_comma)
+      StringAppendF(out, ",");
+    StringAppendF(out, "%s%s", (included ? "" : "-"), category.c_str());
+    ++token_cnt;
+  }
+}
+
+void TraceConfig::WriteCategoryFilterString(const StringList& delays,
+                                            std::string* out) const {
+  bool prepend_comma = !out->empty();
+  int token_cnt = 0;
+  for (const std::string& category : delays) {
+    if (token_cnt > 0 || prepend_comma)
+      StringAppendF(out, ",");
+    StringAppendF(out, "%s%s)", kSyntheticDelayCategoryFilterPrefix,
+                  category.c_str());
+    ++token_cnt;
+  }
+}
+
+bool TraceConfig::IsCategoryEnabled(const char* category_name) const {
+  // Check the disabled- filters and the disabled-* wildcard first so that a
+  // "*" filter does not include the disabled.
+  for (const std::string& category : disabled_categories_) {
+    if (MatchPattern(category_name, category))
+      return true;
+  }
+
+  if (MatchPattern(category_name, TRACE_DISABLED_BY_DEFAULT("*")))
+    return false;
+
+  for (const std::string& category : included_categories_) {
+    if (MatchPattern(category_name, category))
+      return true;
+  }
+
+  return false;
+}
+
+bool TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+    StringPiece str) {
+  return str.empty() || str.front() == ' ' || str.back() == ' ';
+}
+
+bool TraceConfig::HasIncludedPatterns() const {
+  return !included_categories_.empty();
+}
+
 }  // namespace trace_event
 }  // namespace base
diff --git a/base/trace_event/trace_config.h b/base/trace_event/trace_config.h
index 13b2f5f..717c261 100644
--- a/base/trace_event/trace_config.h
+++ b/base/trace_event/trace_config.h
@@ -17,7 +17,6 @@
 #include "base/gtest_prod_util.h"
 #include "base/strings/string_piece.h"
 #include "base/trace_event/memory_dump_request_args.h"
-#include "base/trace_event/trace_config_category_filter.h"
 #include "base/values.h"
 
 namespace base {
@@ -95,25 +94,26 @@
 
     EventFilterConfig& operator=(const EventFilterConfig& rhs);
 
-    void InitializeFromConfigDict(const base::DictionaryValue* event_filter);
-
-    void SetCategoryFilter(const TraceConfigCategoryFilter& category_filter);
-
-    void ToDict(DictionaryValue* filter_dict) const;
-
+    void AddIncludedCategory(const std::string& category);
+    void AddExcludedCategory(const std::string& category);
+    void SetArgs(std::unique_ptr<base::DictionaryValue> args);
     bool GetArgAsSet(const char* key, std::unordered_set<std::string>*) const;
 
-    bool IsCategoryGroupEnabled(const StringPiece& category_group_name) const;
+    bool IsCategoryGroupEnabled(const char* category_group_name) const;
 
     const std::string& predicate_name() const { return predicate_name_; }
     base::DictionaryValue* filter_args() const { return args_.get(); }
-    const TraceConfigCategoryFilter& category_filter() const {
-      return category_filter_;
+    const StringList& included_categories() const {
+      return included_categories_;
+    }
+    const StringList& excluded_categories() const {
+      return excluded_categories_;
     }
 
    private:
     std::string predicate_name_;
-    TraceConfigCategoryFilter category_filter_;
+    StringList included_categories_;
+    StringList excluded_categories_;
     std::unique_ptr<base::DictionaryValue> args_;
   };
   typedef std::vector<EventFilterConfig> EventFilters;
@@ -231,7 +231,7 @@
   // Returns true if at least one category in the list is enabled by this
   // trace config. This is used to determine if the category filters are
   // enabled in the TRACE_* macros.
-  bool IsCategoryGroupEnabled(const StringPiece& category_group_name) const;
+  bool IsCategoryGroupEnabled(const char* category_group_name) const;
 
   // Merges config with the current TraceConfig
   void Merge(const TraceConfig& config);
@@ -241,10 +241,6 @@
   // Clears and resets the memory dump config.
   void ResetMemoryDumpConfig(const MemoryDumpConfig& memory_dump_config);
 
-  const TraceConfigCategoryFilter& category_filter() const {
-    return category_filter_;
-  }
-
   const MemoryDumpConfig& memory_dump_config() const {
     return memory_dump_config_;
   }
@@ -258,6 +254,15 @@
   FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromValidLegacyFormat);
   FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
                            TraceConfigFromInvalidLegacyStrings);
+  FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromValidString);
+  FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromInvalidString);
+  FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
+                           IsEmptyOrContainsLeadingOrTrailingWhitespace);
+  FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromMemoryConfigString);
+  FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, LegacyStringToMemoryDumpConfig);
+  FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, EmptyMemoryDumpConfigTest);
+  FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
+                           EmptyAndAsteriskCategoryFilterString);
 
   // The default trace config, used when none is provided.
   // Allows all non-disabled-by-default categories through, except if they end
@@ -274,6 +279,13 @@
   void InitializeFromStrings(StringPiece category_filter_string,
                              StringPiece trace_options_string);
 
+  void SetCategoriesFromIncludedList(const ListValue& included_list);
+  void SetCategoriesFromExcludedList(const ListValue& excluded_list);
+  void SetSyntheticDelaysFromList(const ListValue& list);
+  void AddCategoryToDict(DictionaryValue* dict,
+                         const char* param,
+                         const StringList& categories) const;
+
   void SetMemoryDumpConfigFromConfigDict(
       const DictionaryValue& memory_dump_config);
   void SetDefaultMemoryDumpConfig();
@@ -283,14 +295,32 @@
 
   std::string ToTraceOptionsString() const;
 
+  void WriteCategoryFilterString(const StringList& values,
+                                 std::string* out,
+                                 bool included) const;
+  void WriteCategoryFilterString(const StringList& delays,
+                                 std::string* out) const;
+
+  // Returns true if the category is enabled according to this trace config.
+  // This tells whether a category is enabled from the TraceConfig's
+  // perspective. Please refer to IsCategoryGroupEnabled() to determine if a
+  // category is enabled from the tracing runtime's perspective.
+  bool IsCategoryEnabled(const char* category_name) const;
+
+  static bool IsEmptyOrContainsLeadingOrTrailingWhitespace(StringPiece str);
+
+  bool HasIncludedPatterns() const;
+
   TraceRecordMode record_mode_;
   bool enable_systrace_ : 1;
   bool enable_argument_filter_ : 1;
 
-  TraceConfigCategoryFilter category_filter_;
-
   MemoryDumpConfig memory_dump_config_;
 
+  StringList included_categories_;
+  StringList disabled_categories_;
+  StringList excluded_categories_;
+  StringList synthetic_delays_;
   EventFilters event_filters_;
 };
 
diff --git a/base/trace_event/trace_config_category_filter.cc b/base/trace_event/trace_config_category_filter.cc
deleted file mode 100644
index 234db18..0000000
--- a/base/trace_event/trace_config_category_filter.cc
+++ /dev/null
@@ -1,297 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/trace_config_category_filter.h"
-
-#include "base/memory/ptr_util.h"
-#include "base/strings/pattern.h"
-#include "base/strings/string_split.h"
-#include "base/strings/string_tokenizer.h"
-#include "base/strings/string_util.h"
-#include "base/strings/stringprintf.h"
-#include "base/trace_event/trace_event.h"
-
-namespace base {
-namespace trace_event {
-
-namespace {
-const char kIncludedCategoriesParam[] = "included_categories";
-const char kExcludedCategoriesParam[] = "excluded_categories";
-const char kSyntheticDelaysParam[] = "synthetic_delays";
-
-const char kSyntheticDelayCategoryFilterPrefix[] = "DELAY(";
-}
-
-TraceConfigCategoryFilter::TraceConfigCategoryFilter() {}
-
-TraceConfigCategoryFilter::TraceConfigCategoryFilter(
-    const TraceConfigCategoryFilter& other)
-    : included_categories_(other.included_categories_),
-      disabled_categories_(other.disabled_categories_),
-      excluded_categories_(other.excluded_categories_),
-      synthetic_delays_(other.synthetic_delays_) {}
-
-TraceConfigCategoryFilter::~TraceConfigCategoryFilter() {}
-
-TraceConfigCategoryFilter& TraceConfigCategoryFilter::operator=(
-    const TraceConfigCategoryFilter& rhs) {
-  included_categories_ = rhs.included_categories_;
-  disabled_categories_ = rhs.disabled_categories_;
-  excluded_categories_ = rhs.excluded_categories_;
-  synthetic_delays_ = rhs.synthetic_delays_;
-  return *this;
-}
-
-void TraceConfigCategoryFilter::InitializeFromString(
-    const StringPiece& category_filter_string) {
-  std::vector<StringPiece> split = SplitStringPiece(
-      category_filter_string, ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
-  for (const StringPiece& category : split) {
-    // Ignore empty categories.
-    if (category.empty())
-      continue;
-    // Synthetic delays are of the form 'DELAY(delay;option;option;...)'.
-    if (StartsWith(category, kSyntheticDelayCategoryFilterPrefix,
-                   CompareCase::SENSITIVE) &&
-        category.back() == ')') {
-      StringPiece synthetic_category = category.substr(
-          strlen(kSyntheticDelayCategoryFilterPrefix),
-          category.size() - strlen(kSyntheticDelayCategoryFilterPrefix) - 1);
-      size_t name_length = synthetic_category.find(';');
-      if (name_length != std::string::npos && name_length > 0 &&
-          name_length != synthetic_category.size() - 1) {
-        synthetic_delays_.push_back(synthetic_category.as_string());
-      }
-    } else if (category.front() == '-') {
-      // Excluded categories start with '-'.
-      // Remove '-' from category string.
-      excluded_categories_.push_back(category.substr(1).as_string());
-    } else if (category.starts_with(TRACE_DISABLED_BY_DEFAULT(""))) {
-      disabled_categories_.push_back(category.as_string());
-    } else {
-      included_categories_.push_back(category.as_string());
-    }
-  }
-}
-
-void TraceConfigCategoryFilter::InitializeFromConfigDict(
-    const DictionaryValue& dict) {
-  const ListValue* category_list = nullptr;
-  if (dict.GetList(kIncludedCategoriesParam, &category_list))
-    SetCategoriesFromIncludedList(*category_list);
-  if (dict.GetList(kExcludedCategoriesParam, &category_list))
-    SetCategoriesFromExcludedList(*category_list);
-  if (dict.GetList(kSyntheticDelaysParam, &category_list))
-    SetSyntheticDelaysFromList(*category_list);
-}
-
-bool TraceConfigCategoryFilter::IsCategoryGroupEnabled(
-    const StringPiece& category_group_name) const {
-  bool had_enabled_by_default = false;
-  DCHECK(!category_group_name.empty());
-  CStringTokenizer category_group_tokens(category_group_name.begin(),
-                                         category_group_name.end(), ",");
-  while (category_group_tokens.GetNext()) {
-    StringPiece category_group_token = category_group_tokens.token_piece();
-    // Don't allow empty tokens, nor tokens with leading or trailing space.
-    DCHECK(IsCategoryNameAllowed(category_group_token))
-        << "Disallowed category string";
-    if (IsCategoryEnabled(category_group_token))
-      return true;
-
-    if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*")))
-      had_enabled_by_default = true;
-  }
-  // Do a second pass to check for explicitly disabled categories
-  // (those explicitly enabled have priority due to first pass).
-  category_group_tokens.Reset();
-  bool category_group_disabled = false;
-  while (category_group_tokens.GetNext()) {
-    StringPiece category_group_token = category_group_tokens.token_piece();
-    for (const std::string& category : excluded_categories_) {
-      if (MatchPattern(category_group_token, category)) {
-        // Current token of category_group_name is present in excluded_list.
-        // Flag the exclusion and proceed further to check if any of the
-        // remaining categories of category_group_name is not present in the
-        // excluded_ list.
-        category_group_disabled = true;
-        break;
-      }
-      // One of the category of category_group_name is not present in
-      // excluded_ list. So, if it's not a disabled-by-default category,
-      // it has to be included_ list. Enable the category_group_name
-      // for recording.
-      if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*")))
-        category_group_disabled = false;
-    }
-    // One of the categories present in category_group_name is not present in
-    // excluded_ list. Implies this category_group_name group can be enabled
-    // for recording, since one of its groups is enabled for recording.
-    if (!category_group_disabled)
-      break;
-  }
-  // If the category group is not excluded, and there are no included patterns
-  // we consider this category group enabled, as long as it had categories
-  // other than disabled-by-default.
-  return !category_group_disabled && had_enabled_by_default &&
-         included_categories_.empty();
-}
-
-bool TraceConfigCategoryFilter::IsCategoryEnabled(
-    const StringPiece& category_name) const {
-  // Check the disabled- filters and the disabled-* wildcard first so that a
-  // "*" filter does not include the disabled.
-  for (const std::string& category : disabled_categories_) {
-    if (MatchPattern(category_name, category))
-      return true;
-  }
-
-  if (MatchPattern(category_name, TRACE_DISABLED_BY_DEFAULT("*")))
-    return false;
-
-  for (const std::string& category : included_categories_) {
-    if (MatchPattern(category_name, category))
-      return true;
-  }
-
-  return false;
-}
-
-void TraceConfigCategoryFilter::Merge(const TraceConfigCategoryFilter& config) {
-  // Keep included patterns only if both filters have an included entry.
-  // Otherwise, one of the filter was specifying "*" and we want to honor the
-  // broadest filter.
-  if (!included_categories_.empty() && !config.included_categories_.empty()) {
-    included_categories_.insert(included_categories_.end(),
-                                config.included_categories_.begin(),
-                                config.included_categories_.end());
-  } else {
-    included_categories_.clear();
-  }
-
-  disabled_categories_.insert(disabled_categories_.end(),
-                              config.disabled_categories_.begin(),
-                              config.disabled_categories_.end());
-  excluded_categories_.insert(excluded_categories_.end(),
-                              config.excluded_categories_.begin(),
-                              config.excluded_categories_.end());
-  synthetic_delays_.insert(synthetic_delays_.end(),
-                           config.synthetic_delays_.begin(),
-                           config.synthetic_delays_.end());
-}
-
-void TraceConfigCategoryFilter::Clear() {
-  included_categories_.clear();
-  disabled_categories_.clear();
-  excluded_categories_.clear();
-  synthetic_delays_.clear();
-}
-
-void TraceConfigCategoryFilter::ToDict(DictionaryValue* dict) const {
-  StringList categories(included_categories_);
-  categories.insert(categories.end(), disabled_categories_.begin(),
-                    disabled_categories_.end());
-  AddCategoriesToDict(categories, kIncludedCategoriesParam, dict);
-  AddCategoriesToDict(excluded_categories_, kExcludedCategoriesParam, dict);
-  AddCategoriesToDict(synthetic_delays_, kSyntheticDelaysParam, dict);
-}
-
-std::string TraceConfigCategoryFilter::ToFilterString() const {
-  std::string filter_string;
-  WriteCategoryFilterString(included_categories_, &filter_string, true);
-  WriteCategoryFilterString(disabled_categories_, &filter_string, true);
-  WriteCategoryFilterString(excluded_categories_, &filter_string, false);
-  WriteCategoryFilterString(synthetic_delays_, &filter_string);
-  return filter_string;
-}
-
-void TraceConfigCategoryFilter::SetCategoriesFromIncludedList(
-    const ListValue& included_list) {
-  included_categories_.clear();
-  for (size_t i = 0; i < included_list.GetSize(); ++i) {
-    std::string category;
-    if (!included_list.GetString(i, &category))
-      continue;
-    if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
-                         TRACE_DISABLED_BY_DEFAULT("")) == 0) {
-      disabled_categories_.push_back(category);
-    } else {
-      included_categories_.push_back(category);
-    }
-  }
-}
-
-void TraceConfigCategoryFilter::SetCategoriesFromExcludedList(
-    const ListValue& excluded_list) {
-  excluded_categories_.clear();
-  for (size_t i = 0; i < excluded_list.GetSize(); ++i) {
-    std::string category;
-    if (excluded_list.GetString(i, &category))
-      excluded_categories_.push_back(category);
-  }
-}
-
-void TraceConfigCategoryFilter::SetSyntheticDelaysFromList(
-    const ListValue& list) {
-  for (size_t i = 0; i < list.GetSize(); ++i) {
-    std::string delay;
-    if (!list.GetString(i, &delay))
-      continue;
-    // Synthetic delays are of the form "delay;option;option;...".
-    size_t name_length = delay.find(';');
-    if (name_length != std::string::npos && name_length > 0 &&
-        name_length != delay.size() - 1) {
-      synthetic_delays_.push_back(delay);
-    }
-  }
-}
-
-void TraceConfigCategoryFilter::AddCategoriesToDict(
-    const StringList& categories,
-    const char* param,
-    DictionaryValue* dict) const {
-  if (categories.empty())
-    return;
-
-  auto list = MakeUnique<ListValue>();
-  for (const std::string& category : categories)
-    list->AppendString(category);
-  dict->Set(param, std::move(list));
-}
-
-void TraceConfigCategoryFilter::WriteCategoryFilterString(
-    const StringList& values,
-    std::string* out,
-    bool included) const {
-  bool prepend_comma = !out->empty();
-  int token_cnt = 0;
-  for (const std::string& category : values) {
-    if (token_cnt > 0 || prepend_comma)
-      StringAppendF(out, ",");
-    StringAppendF(out, "%s%s", (included ? "" : "-"), category.c_str());
-    ++token_cnt;
-  }
-}
-
-void TraceConfigCategoryFilter::WriteCategoryFilterString(
-    const StringList& delays,
-    std::string* out) const {
-  bool prepend_comma = !out->empty();
-  int token_cnt = 0;
-  for (const std::string& category : delays) {
-    if (token_cnt > 0 || prepend_comma)
-      StringAppendF(out, ",");
-    StringAppendF(out, "%s%s)", kSyntheticDelayCategoryFilterPrefix,
-                  category.c_str());
-    ++token_cnt;
-  }
-}
-
-// static
-bool TraceConfigCategoryFilter::IsCategoryNameAllowed(StringPiece str) {
-  return !str.empty() && str.front() != ' ' && str.back() != ' ';
-}
-
-}  // namespace trace_event
-}  // namespace base
diff --git a/base/trace_event/trace_config_category_filter.h b/base/trace_event/trace_config_category_filter.h
deleted file mode 100644
index 0d7dba0..0000000
--- a/base/trace_event/trace_config_category_filter.h
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TRACE_EVENT_TRACE_CONFIG_CATEGORY_FILTER_H_
-#define BASE_TRACE_EVENT_TRACE_CONFIG_CATEGORY_FILTER_H_
-
-#include <string>
-#include <vector>
-
-#include "base/base_export.h"
-#include "base/strings/string_piece.h"
-#include "base/values.h"
-
-namespace base {
-namespace trace_event {
-
-// Configuration of categories enabled and disabled in TraceConfig.
-class BASE_EXPORT TraceConfigCategoryFilter {
- public:
-  using StringList = std::vector<std::string>;
-
-  TraceConfigCategoryFilter();
-  TraceConfigCategoryFilter(const TraceConfigCategoryFilter& other);
-  ~TraceConfigCategoryFilter();
-
-  TraceConfigCategoryFilter& operator=(const TraceConfigCategoryFilter& rhs);
-
-  // Initializes from category filter string. See TraceConfig constructor for
-  // description of how to write category filter string.
-  void InitializeFromString(const StringPiece& category_filter_string);
-
-  // Initializes TraceConfigCategoryFilter object from the config dictionary.
-  void InitializeFromConfigDict(const DictionaryValue& dict);
-
-  // Merges this with category filter config.
-  void Merge(const TraceConfigCategoryFilter& config);
-  void Clear();
-
-  // Returns true if at least one category in the list is enabled by this
-  // trace config. This is used to determine if the category filters are
-  // enabled in the TRACE_* macros.
-  bool IsCategoryGroupEnabled(const StringPiece& category_group_name) const;
-
-  // Returns true if the category is enabled according to this trace config.
-  // This tells whether a category is enabled from the TraceConfig's
-  // perspective. Please refer to IsCategoryGroupEnabled() to determine if a
-  // category is enabled from the tracing runtime's perspective.
-  bool IsCategoryEnabled(const StringPiece& category_name) const;
-
-  void ToDict(DictionaryValue* dict) const;
-
-  std::string ToFilterString() const;
-
-  // Returns true if category name is a valid string.
-  static bool IsCategoryNameAllowed(StringPiece str);
-
-  const StringList& included_categories() const { return included_categories_; }
-  const StringList& excluded_categories() const { return excluded_categories_; }
-  const StringList& synthetic_delays() const { return synthetic_delays_; }
-
- private:
-  void SetCategoriesFromIncludedList(const ListValue& included_list);
-  void SetCategoriesFromExcludedList(const ListValue& excluded_list);
-  void SetSyntheticDelaysFromList(const ListValue& list);
-
-  void AddCategoriesToDict(const StringList& categories,
-                           const char* param,
-                           DictionaryValue* dict) const;
-
-  void WriteCategoryFilterString(const StringList& values,
-                                 std::string* out,
-                                 bool included) const;
-  void WriteCategoryFilterString(const StringList& delays,
-                                 std::string* out) const;
-
-  StringList included_categories_;
-  StringList disabled_categories_;
-  StringList excluded_categories_;
-  StringList synthetic_delays_;
-};
-
-}  // namespace trace_event
-}  // namespace base
-
-#endif  // BASE_TRACE_EVENT_TRACE_CONFIG_CATEGORY_FILTER_H_
diff --git a/base/trace_event/trace_config_unittest.cc b/base/trace_event/trace_config_unittest.cc
index a856c27..74aa7bd 100644
--- a/base/trace_event/trace_config_unittest.cc
+++ b/base/trace_event/trace_config_unittest.cc
@@ -304,12 +304,10 @@
   CheckDefaultTraceConfigBehavior(tc_asterisk);
 
   // They differ only for internal checking.
-  EXPECT_FALSE(tc_empty.category_filter().IsCategoryEnabled("Category1"));
-  EXPECT_FALSE(
-      tc_empty.category_filter().IsCategoryEnabled("not-excluded-category"));
-  EXPECT_TRUE(tc_asterisk.category_filter().IsCategoryEnabled("Category1"));
-  EXPECT_TRUE(
-      tc_asterisk.category_filter().IsCategoryEnabled("not-excluded-category"));
+  EXPECT_FALSE(tc_empty.IsCategoryEnabled("Category1"));
+  EXPECT_FALSE(tc_empty.IsCategoryEnabled("not-excluded-category"));
+  EXPECT_TRUE(tc_asterisk.IsCategoryEnabled("Category1"));
+  EXPECT_TRUE(tc_asterisk.IsCategoryEnabled("not-excluded-category"));
 }
 
 TEST(TraceConfigTest, DisabledByDefaultCategoryFilterString) {
@@ -404,15 +402,13 @@
                "-exc_pattern*,DELAY(test.Delay1;16),DELAY(test.Delay2;32)",
                tc.ToCategoryFilterString().c_str());
 
-  EXPECT_TRUE(tc.category_filter().IsCategoryEnabled("included"));
-  EXPECT_TRUE(tc.category_filter().IsCategoryEnabled("inc_pattern_category"));
-  EXPECT_TRUE(tc.category_filter().IsCategoryEnabled("disabled-by-default-cc"));
-  EXPECT_FALSE(tc.category_filter().IsCategoryEnabled("excluded"));
-  EXPECT_FALSE(tc.category_filter().IsCategoryEnabled("exc_pattern_category"));
-  EXPECT_FALSE(
-      tc.category_filter().IsCategoryEnabled("disabled-by-default-others"));
-  EXPECT_FALSE(
-      tc.category_filter().IsCategoryEnabled("not-excluded-nor-included"));
+  EXPECT_TRUE(tc.IsCategoryEnabled("included"));
+  EXPECT_TRUE(tc.IsCategoryEnabled("inc_pattern_category"));
+  EXPECT_TRUE(tc.IsCategoryEnabled("disabled-by-default-cc"));
+  EXPECT_FALSE(tc.IsCategoryEnabled("excluded"));
+  EXPECT_FALSE(tc.IsCategoryEnabled("exc_pattern_category"));
+  EXPECT_FALSE(tc.IsCategoryEnabled("disabled-by-default-others"));
+  EXPECT_FALSE(tc.IsCategoryEnabled("not-excluded-nor-included"));
 
   EXPECT_TRUE(tc.IsCategoryGroupEnabled("included"));
   EXPECT_TRUE(tc.IsCategoryGroupEnabled("inc_pattern_category"));
@@ -435,12 +431,10 @@
   const TraceConfig::EventFilterConfig& event_filter = tc.event_filters()[0];
   EXPECT_STREQ("event_whitelist_predicate",
                event_filter.predicate_name().c_str());
-  EXPECT_EQ(1u, event_filter.category_filter().included_categories().size());
-  EXPECT_STREQ("*",
-               event_filter.category_filter().included_categories()[0].c_str());
-  EXPECT_EQ(1u, event_filter.category_filter().excluded_categories().size());
-  EXPECT_STREQ("unfiltered_cat",
-               event_filter.category_filter().excluded_categories()[0].c_str());
+  EXPECT_EQ(1u, event_filter.included_categories().size());
+  EXPECT_STREQ("*", event_filter.included_categories()[0].c_str());
+  EXPECT_EQ(1u, event_filter.excluded_categories().size());
+  EXPECT_STREQ("unfiltered_cat", event_filter.excluded_categories()[0].c_str());
   EXPECT_TRUE(event_filter.filter_args());
 
   std::string json_out;
@@ -455,10 +449,8 @@
 
   const char config_string_2[] = "{\"included_categories\":[\"*\"]}";
   TraceConfig tc2(config_string_2);
-  EXPECT_TRUE(tc2.category_filter().IsCategoryEnabled(
-      "non-disabled-by-default-pattern"));
-  EXPECT_FALSE(
-      tc2.category_filter().IsCategoryEnabled("disabled-by-default-pattern"));
+  EXPECT_TRUE(tc2.IsCategoryEnabled("non-disabled-by-default-pattern"));
+  EXPECT_FALSE(tc2.IsCategoryEnabled("disabled-by-default-pattern"));
   EXPECT_TRUE(tc2.IsCategoryGroupEnabled("non-disabled-by-default-pattern"));
   EXPECT_FALSE(tc2.IsCategoryGroupEnabled("disabled-by-default-pattern"));
 
@@ -546,9 +538,8 @@
       "\"excluded_categories\":[\"category\",\"disabled-by-default-pattern\"]"
     "}";
   tc = TraceConfig(invalid_config_string_2);
-  EXPECT_TRUE(tc.category_filter().IsCategoryEnabled("category"));
-  EXPECT_TRUE(
-      tc.category_filter().IsCategoryEnabled("disabled-by-default-pattern"));
+  EXPECT_TRUE(tc.IsCategoryEnabled("category"));
+  EXPECT_TRUE(tc.IsCategoryEnabled("disabled-by-default-pattern"));
   EXPECT_TRUE(tc.IsCategoryGroupEnabled("category"));
   EXPECT_TRUE(tc.IsCategoryGroupEnabled("disabled-by-default-pattern"));
 }
@@ -600,25 +591,27 @@
   EXPECT_FALSE(tc.IsCategoryGroupEnabled("excluded,disabled-by-default-cc"));
 }
 
-TEST(TraceConfigTest, IsCategoryNameAllowed) {
-  // Test that IsCategoryNameAllowed actually catches categories that are
-  // explicitly forbidden. This method is called in a DCHECK to assert that we
-  // don't have these types of strings as categories.
-  EXPECT_FALSE(
-      TraceConfigCategoryFilter::IsCategoryNameAllowed(" bad_category "));
-  EXPECT_FALSE(
-      TraceConfigCategoryFilter::IsCategoryNameAllowed(" bad_category"));
-  EXPECT_FALSE(
-      TraceConfigCategoryFilter::IsCategoryNameAllowed("bad_category "));
-  EXPECT_FALSE(
-      TraceConfigCategoryFilter::IsCategoryNameAllowed("   bad_category"));
-  EXPECT_FALSE(
-      TraceConfigCategoryFilter::IsCategoryNameAllowed("bad_category   "));
-  EXPECT_FALSE(
-      TraceConfigCategoryFilter::IsCategoryNameAllowed("   bad_category   "));
-  EXPECT_FALSE(TraceConfigCategoryFilter::IsCategoryNameAllowed(""));
-  EXPECT_TRUE(
-      TraceConfigCategoryFilter::IsCategoryNameAllowed("good_category"));
+TEST(TraceConfigTest, IsEmptyOrContainsLeadingOrTrailingWhitespace) {
+  // Test that IsEmptyOrContainsLeadingOrTrailingWhitespace actually catches
+  // categories that are explicitly forbidden.
+  // This method is called in a DCHECK to assert that we don't have these types
+  // of strings as categories.
+  EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+      " bad_category "));
+  EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+      " bad_category"));
+  EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+      "bad_category "));
+  EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+      "   bad_category"));
+  EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+      "bad_category   "));
+  EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+      "   bad_category   "));
+  EXPECT_TRUE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+      ""));
+  EXPECT_FALSE(TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+      "good_category"));
 }
 
 TEST(TraceConfigTest, SetTraceOptionValues) {
@@ -644,20 +637,20 @@
   EXPECT_EQ(tc_str1, tc2.ToString());
 
   EXPECT_TRUE(tc1.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
-  ASSERT_EQ(2u, tc1.memory_dump_config().triggers.size());
+  ASSERT_EQ(2u, tc1.memory_dump_config_.triggers.size());
 
   EXPECT_EQ(200u,
-            tc1.memory_dump_config().triggers[0].min_time_between_dumps_ms);
+            tc1.memory_dump_config_.triggers[0].min_time_between_dumps_ms);
   EXPECT_EQ(MemoryDumpLevelOfDetail::LIGHT,
-            tc1.memory_dump_config().triggers[0].level_of_detail);
+            tc1.memory_dump_config_.triggers[0].level_of_detail);
 
   EXPECT_EQ(2000u,
-            tc1.memory_dump_config().triggers[1].min_time_between_dumps_ms);
+            tc1.memory_dump_config_.triggers[1].min_time_between_dumps_ms);
   EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
-            tc1.memory_dump_config().triggers[1].level_of_detail);
+            tc1.memory_dump_config_.triggers[1].level_of_detail);
   EXPECT_EQ(
       2048u,
-      tc1.memory_dump_config().heap_profiler_options.breakdown_threshold_bytes);
+      tc1.memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes);
 
   std::string tc_str3 =
       TraceConfigMemoryTestUtil::GetTraceConfig_BackgroundTrigger(
@@ -665,20 +658,20 @@
   TraceConfig tc3(tc_str3);
   EXPECT_EQ(tc_str3, tc3.ToString());
   EXPECT_TRUE(tc3.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
-  ASSERT_EQ(1u, tc3.memory_dump_config().triggers.size());
-  EXPECT_EQ(1u, tc3.memory_dump_config().triggers[0].min_time_between_dumps_ms);
+  ASSERT_EQ(1u, tc3.memory_dump_config_.triggers.size());
+  EXPECT_EQ(1u, tc3.memory_dump_config_.triggers[0].min_time_between_dumps_ms);
   EXPECT_EQ(MemoryDumpLevelOfDetail::BACKGROUND,
-            tc3.memory_dump_config().triggers[0].level_of_detail);
+            tc3.memory_dump_config_.triggers[0].level_of_detail);
 
   std::string tc_str4 =
       TraceConfigMemoryTestUtil::GetTraceConfig_PeakDetectionTrigger(
           1 /*heavy_period */);
   TraceConfig tc4(tc_str4);
   EXPECT_EQ(tc_str4, tc4.ToString());
-  ASSERT_EQ(1u, tc4.memory_dump_config().triggers.size());
-  EXPECT_EQ(1u, tc4.memory_dump_config().triggers[0].min_time_between_dumps_ms);
+  ASSERT_EQ(1u, tc4.memory_dump_config_.triggers.size());
+  EXPECT_EQ(1u, tc4.memory_dump_config_.triggers[0].min_time_between_dumps_ms);
   EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
-            tc4.memory_dump_config().triggers[0].level_of_detail);
+            tc4.memory_dump_config_.triggers[0].level_of_detail);
 }
 
 TEST(TraceConfigTest, EmptyMemoryDumpConfigTest) {
@@ -686,22 +679,22 @@
   TraceConfig tc(TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers());
   EXPECT_EQ(TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers(),
             tc.ToString());
-  EXPECT_EQ(0u, tc.memory_dump_config().triggers.size());
-  EXPECT_EQ(
-      TraceConfig::MemoryDumpConfig::HeapProfiler ::
-          kDefaultBreakdownThresholdBytes,
-      tc.memory_dump_config().heap_profiler_options.breakdown_threshold_bytes);
+  EXPECT_EQ(0u, tc.memory_dump_config_.triggers.size());
+  EXPECT_EQ(TraceConfig::MemoryDumpConfig::HeapProfiler
+            ::kDefaultBreakdownThresholdBytes,
+            tc.memory_dump_config_.heap_profiler_options
+            .breakdown_threshold_bytes);
 }
 
 TEST(TraceConfigTest, LegacyStringToMemoryDumpConfig) {
   TraceConfig tc(MemoryDumpManager::kTraceCategory, "");
   EXPECT_TRUE(tc.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
   EXPECT_NE(std::string::npos, tc.ToString().find("memory_dump_config"));
-  EXPECT_EQ(2u, tc.memory_dump_config().triggers.size());
-  EXPECT_EQ(
-      TraceConfig::MemoryDumpConfig::HeapProfiler ::
-          kDefaultBreakdownThresholdBytes,
-      tc.memory_dump_config().heap_profiler_options.breakdown_threshold_bytes);
+  EXPECT_EQ(2u, tc.memory_dump_config_.triggers.size());
+  EXPECT_EQ(TraceConfig::MemoryDumpConfig::HeapProfiler
+            ::kDefaultBreakdownThresholdBytes,
+            tc.memory_dump_config_.heap_profiler_options
+            .breakdown_threshold_bytes);
 }
 
 }  // namespace trace_event
diff --git a/base/trace_event/trace_event_unittest.cc b/base/trace_event/trace_event_unittest.cc
index 85e1e16..82a552a 100644
--- a/base/trace_event/trace_event_unittest.cc
+++ b/base/trace_event/trace_event_unittest.cc
@@ -3088,15 +3088,11 @@
       "{"
       "  \"included_categories\": ["
       "    \"filtered_cat\","
-      "    \"unfiltered_cat\","
-      "    \"" TRACE_DISABLED_BY_DEFAULT("filtered_cat") "\","
-      "    \"" TRACE_DISABLED_BY_DEFAULT("unfiltered_cat") "\"],"
+      "    \"unfiltered_cat\"],"
       "  \"event_filters\": ["
       "     {"
       "       \"filter_predicate\": \"testing_predicate\", "
-      "       \"included_categories\": ["
-      "         \"filtered_cat\","
-      "         \"" TRACE_DISABLED_BY_DEFAULT("filtered_cat") "\"]"
+      "       \"included_categories\": [\"filtered_cat\"]"
       "     }"
       "    "
       "  ]"
@@ -3115,15 +3111,12 @@
   TRACE_EVENT0("filtered_cat", "a mushroom");
   TRACE_EVENT0("unfiltered_cat", "a horse");
 
-  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("filtered_cat"), "a dog");
-  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("unfiltered_cat"), "a pony");
-
   // This is scoped so we can test the end event being filtered.
   { TRACE_EVENT0("filtered_cat", "another cat whoa"); }
 
   EndTraceAndFlush();
 
-  EXPECT_EQ(4u, filter_hits_counter.filter_trace_event_hit_count);
+  EXPECT_EQ(3u, filter_hits_counter.filter_trace_event_hit_count);
   EXPECT_EQ(1u, filter_hits_counter.end_event_hit_count);
 }
 
@@ -3132,14 +3125,12 @@
       "{"
       "  \"included_categories\": ["
       "    \"filtered_cat\","
-      "    \"unfiltered_cat\","
-      "    \"" TRACE_DISABLED_BY_DEFAULT("filtered_cat") "\"],"
+      "    \"unfiltered_cat\"],"
       "  \"event_filters\": ["
       "     {"
       "       \"filter_predicate\": \"%s\", "
-      "       \"included_categories\": ["
-      "         \"filtered_cat\","
-      "         \"" TRACE_DISABLED_BY_DEFAULT("*") "\"], "
+      "       \"included_categories\": [\"*\"], "
+      "       \"excluded_categories\": [\"unfiltered_cat\"], "
       "       \"filter_args\": {"
       "           \"event_name_whitelist\": [\"a snake\", \"a dog\"]"
       "         }"
@@ -3157,16 +3148,12 @@
   TRACE_EVENT0("filtered_cat", "a snake");
   TRACE_EVENT0("filtered_cat", "a mushroom");
   TRACE_EVENT0("unfiltered_cat", "a cat");
-  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("filtered_cat"), "a dog");
-  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("filtered_cat"), "a pony");
 
   EndTraceAndFlush();
 
   EXPECT_TRUE(FindMatchingValue("name", "a snake"));
   EXPECT_FALSE(FindMatchingValue("name", "a mushroom"));
   EXPECT_TRUE(FindMatchingValue("name", "a cat"));
-  EXPECT_TRUE(FindMatchingValue("name", "a dog"));
-  EXPECT_FALSE(FindMatchingValue("name", "a pony"));
 }
 
 TEST_F(TraceEventTestFixture, HeapProfilerFiltering) {
@@ -3174,16 +3161,12 @@
       "{"
       "  \"included_categories\": ["
       "    \"filtered_cat\","
-      "    \"unfiltered_cat\","
-      "    \"" TRACE_DISABLED_BY_DEFAULT("filtered_cat") "\","
-      "    \"" TRACE_DISABLED_BY_DEFAULT("unfiltered_cat") "\"],"
+      "    \"unfiltered_cat\"],"
       "  \"excluded_categories\": [\"excluded_cat\"],"
       "  \"event_filters\": ["
       "     {"
       "       \"filter_predicate\": \"%s\", "
-      "       \"included_categories\": ["
-      "         \"*\","
-      "         \"" TRACE_DISABLED_BY_DEFAULT("filtered_cat") "\"]"
+      "       \"included_categories\": [\"*\"]"
       "     }"
       "  ]"
       "}",
@@ -3197,8 +3180,6 @@
   TRACE_EVENT0("filtered_cat", "a snake");
   TRACE_EVENT0("excluded_cat", "a mushroom");
   TRACE_EVENT0("unfiltered_cat", "a cat");
-  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("filtered_cat"), "a dog");
-  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("unfiltered_cat"), "a pony");
 
   EndTraceAndFlush();
 
@@ -3206,8 +3187,6 @@
   EXPECT_TRUE(FindMatchingValue("name", "a snake"));
   EXPECT_FALSE(FindMatchingValue("name", "a mushroom"));
   EXPECT_TRUE(FindMatchingValue("name", "a cat"));
-  EXPECT_TRUE(FindMatchingValue("name", "a dog"));
-  EXPECT_TRUE(FindMatchingValue("name", "a pony"));
 }
 
 TEST_F(TraceEventTestFixture, ClockSyncEventsAreAlwaysAddedToTrace) {
diff --git a/base/trace_event/trace_log.cc b/base/trace_event/trace_log.cc
index abb0d36..10b090a 100644
--- a/base/trace_event/trace_log.cc
+++ b/base/trace_event/trace_log.cc
@@ -19,10 +19,8 @@
 #include "base/memory/ref_counted_memory.h"
 #include "base/memory/singleton.h"
 #include "base/message_loop/message_loop.h"
-#include "base/process/process_info.h"
 #include "base/process/process_metrics.h"
 #include "base/stl_util.h"
-#include "base/strings/string_piece.h"
 #include "base/strings/string_split.h"
 #include "base/strings/string_tokenizer.h"
 #include "base/strings/stringprintf.h"
@@ -1511,20 +1509,8 @@
                             process_name_);
   }
 
-#if !defined(OS_NACL) && !defined(OS_IOS)
-/*
-  Time process_creation_time = CurrentProcessInfo::CreationTime();
-  if (!process_creation_time.is_null()) {
-    TimeDelta process_uptime = Time::Now() - process_creation_time;
-    InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
-                            current_thread_id, "process_uptime_seconds",
-                            "uptime", process_uptime.InSeconds());
-  }
-*/
-#endif  // !defined(OS_NACL) && !defined(OS_IOS)
-
   if (!process_labels_.empty()) {
-    std::vector<base::StringPiece> labels;
+    std::vector<std::string> labels;
     for (const auto& it : process_labels_)
       labels.push_back(it.second);
     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
diff --git a/base/values.cc b/base/values.cc
index b5e44e6..5cc0d69 100644
--- a/base/values.cc
+++ b/base/values.cc
@@ -69,7 +69,7 @@
           static_cast<const DictionaryValue&>(node));
 
     default:
-      return MakeUnique<Value>(node);
+      return node.CreateDeepCopy();
   }
 }
 
@@ -91,11 +91,11 @@
   InternalCopyConstructFrom(that);
 }
 
-Value::Value(Value&& that) noexcept {
+Value::Value(Value&& that) {
   InternalMoveConstructFrom(std::move(that));
 }
 
-Value::Value() noexcept : type_(Type::NONE) {}
+Value::Value() : type_(Type::NONE) {}
 
 Value::Value(Type type) : type_(type) {
   // Initialize with the default value.
@@ -149,7 +149,7 @@
   DCHECK(IsStringUTF8(*string_value_));
 }
 
-Value::Value(std::string&& in_string) noexcept : type_(Type::STRING) {
+Value::Value(std::string&& in_string) : type_(Type::STRING) {
   string_value_.Init(std::move(in_string));
   DCHECK(IsStringUTF8(*string_value_));
 }
@@ -168,26 +168,32 @@
   binary_value_.Init(in_blob);
 }
 
-Value::Value(std::vector<char>&& in_blob) noexcept : type_(Type::BINARY) {
+Value::Value(std::vector<char>&& in_blob) : type_(Type::BINARY) {
   binary_value_.Init(std::move(in_blob));
 }
 
 Value& Value::operator=(const Value& that) {
-  if (type_ == that.type_) {
-    InternalCopyAssignFromSameType(that);
-  } else {
-    // This is not a self assignment because the type_ doesn't match.
-    InternalCleanup();
-    InternalCopyConstructFrom(that);
+  if (this != &that) {
+    if (type_ == that.type_) {
+      InternalCopyAssignFromSameType(that);
+    } else {
+      InternalCleanup();
+      InternalCopyConstructFrom(that);
+    }
   }
 
   return *this;
 }
 
-Value& Value::operator=(Value&& that) noexcept {
-  DCHECK(this != &that) << "attempt to self move assign.";
-  InternalCleanup();
-  InternalMoveConstructFrom(std::move(that));
+Value& Value::operator=(Value&& that) {
+  if (this != &that) {
+    if (type_ == that.type_) {
+      InternalMoveAssignFromSameType(std::move(that));
+    } else {
+      InternalCleanup();
+      InternalMoveConstructFrom(std::move(that));
+    }
+  }
 
   return *this;
 }
@@ -341,122 +347,112 @@
 }
 
 Value* Value::DeepCopy() const {
-  return new Value(*this);
+  // This method should only be getting called for null Values--all subclasses
+  // need to provide their own implementation;.
+  switch (type()) {
+    case Type::NONE:
+      return CreateNullValue().release();
+
+    case Type::BOOLEAN:
+      return new Value(bool_value_);
+    case Type::INTEGER:
+      return new Value(int_value_);
+    case Type::DOUBLE:
+      return new Value(double_value_);
+    case Type::STRING:
+      return new Value(*string_value_);
+    // For now, make BinaryValues for backward-compatibility. Convert to
+    // Value when that code is deleted.
+    case Type::BINARY:
+      return new Value(*binary_value_);
+
+    // TODO(crbug.com/646113): Clean this up when DictionaryValue and ListValue
+    // are completely inlined.
+    case Type::DICTIONARY: {
+      DictionaryValue* result = new DictionaryValue;
+
+      for (const auto& current_entry : **dict_ptr_) {
+        result->SetWithoutPathExpansion(current_entry.first,
+                                        current_entry.second->CreateDeepCopy());
+      }
+
+      return result;
+    }
+
+    case Type::LIST: {
+      ListValue* result = new ListValue;
+
+      for (const auto& entry : *list_)
+        result->Append(entry->CreateDeepCopy());
+
+      return result;
+    }
+
+    default:
+      NOTREACHED();
+      return nullptr;
+  }
 }
 
 std::unique_ptr<Value> Value::CreateDeepCopy() const {
-  return MakeUnique<Value>(*this);
-}
-
-bool operator==(const Value& lhs, const Value& rhs) {
-  if (lhs.type_ != rhs.type_)
-    return false;
-
-  switch (lhs.type_) {
-    case Value::Type::NONE:
-      return true;
-    case Value::Type::BOOLEAN:
-      return lhs.bool_value_ == rhs.bool_value_;
-    case Value::Type::INTEGER:
-      return lhs.int_value_ == rhs.int_value_;
-    case Value::Type::DOUBLE:
-      return lhs.double_value_ == rhs.double_value_;
-    case Value::Type::STRING:
-      return *lhs.string_value_ == *rhs.string_value_;
-    case Value::Type::BINARY:
-      return *lhs.binary_value_ == *rhs.binary_value_;
-    // TODO(crbug.com/646113): Clean this up when DictionaryValue and ListValue
-    // are completely inlined.
-    case Value::Type::DICTIONARY:
-      if ((*lhs.dict_ptr_)->size() != (*rhs.dict_ptr_)->size())
-        return false;
-      return std::equal(std::begin(**lhs.dict_ptr_), std::end(**lhs.dict_ptr_),
-                        std::begin(**rhs.dict_ptr_),
-                        [](const Value::DictStorage::value_type& u,
-                           const Value::DictStorage::value_type& v) {
-                          return std::tie(u.first, *u.second) ==
-                                 std::tie(v.first, *v.second);
-                        });
-    case Value::Type::LIST:
-      if (lhs.list_->size() != rhs.list_->size())
-        return false;
-      return std::equal(
-          std::begin(*lhs.list_), std::end(*lhs.list_), std::begin(*rhs.list_),
-          [](const Value::ListStorage::value_type& u,
-             const Value::ListStorage::value_type& v) { return *u == *v; });
-  }
-
-  NOTREACHED();
-  return false;
-}
-
-bool operator!=(const Value& lhs, const Value& rhs) {
-  return !(lhs == rhs);
-}
-
-bool operator<(const Value& lhs, const Value& rhs) {
-  if (lhs.type_ != rhs.type_)
-    return lhs.type_ < rhs.type_;
-
-  switch (lhs.type_) {
-    case Value::Type::NONE:
-      return false;
-    case Value::Type::BOOLEAN:
-      return lhs.bool_value_ < rhs.bool_value_;
-    case Value::Type::INTEGER:
-      return lhs.int_value_ < rhs.int_value_;
-    case Value::Type::DOUBLE:
-      return lhs.double_value_ < rhs.double_value_;
-    case Value::Type::STRING:
-      return *lhs.string_value_ < *rhs.string_value_;
-    case Value::Type::BINARY:
-      return *lhs.binary_value_ < *rhs.binary_value_;
-    // TODO(crbug.com/646113): Clean this up when DictionaryValue and ListValue
-    // are completely inlined.
-    case Value::Type::DICTIONARY:
-      return std::lexicographical_compare(
-          std::begin(**lhs.dict_ptr_), std::end(**lhs.dict_ptr_),
-          std::begin(**rhs.dict_ptr_), std::end(**rhs.dict_ptr_),
-          [](const Value::DictStorage::value_type& u,
-             const Value::DictStorage::value_type& v) {
-            return std::tie(u.first, *u.second) < std::tie(v.first, *v.second);
-          });
-    case Value::Type::LIST:
-      return std::lexicographical_compare(
-          std::begin(*lhs.list_), std::end(*lhs.list_), std::begin(*rhs.list_),
-          std::end(*rhs.list_),
-          [](const Value::ListStorage::value_type& u,
-             const Value::ListStorage::value_type& v) { return *u < *v; });
-  }
-
-  NOTREACHED();
-  return false;
-}
-
-bool operator>(const Value& lhs, const Value& rhs) {
-  return rhs < lhs;
-}
-
-bool operator<=(const Value& lhs, const Value& rhs) {
-  return !(rhs < lhs);
-}
-
-bool operator>=(const Value& lhs, const Value& rhs) {
-  return !(lhs < rhs);
+  return WrapUnique(DeepCopy());
 }
 
 bool Value::Equals(const Value* other) const {
-  DCHECK(other);
-  return *this == *other;
+  if (other->type() != type())
+    return false;
+
+  switch (type()) {
+    case Type::NONE:
+      return true;
+    case Type::BOOLEAN:
+      return bool_value_ == other->bool_value_;
+    case Type::INTEGER:
+      return int_value_ == other->int_value_;
+    case Type::DOUBLE:
+      return double_value_ == other->double_value_;
+    case Type::STRING:
+      return *string_value_ == *(other->string_value_);
+    case Type::BINARY:
+      return *binary_value_ == *(other->binary_value_);
+    // TODO(crbug.com/646113): Clean this up when DictionaryValue and ListValue
+    // are completely inlined.
+    case Type::DICTIONARY: {
+      if ((*dict_ptr_)->size() != (*other->dict_ptr_)->size())
+        return false;
+
+      return std::equal(std::begin(**dict_ptr_), std::end(**dict_ptr_),
+                        std::begin(**(other->dict_ptr_)),
+                        [](const DictStorage::value_type& lhs,
+                           const DictStorage::value_type& rhs) {
+                          if (lhs.first != rhs.first)
+                            return false;
+
+                          return lhs.second->Equals(rhs.second.get());
+                        });
+    }
+    case Type::LIST: {
+      if (list_->size() != other->list_->size())
+        return false;
+
+      return std::equal(std::begin(*list_), std::end(*list_),
+                        std::begin(*(other->list_)),
+                        [](const ListStorage::value_type& lhs,
+                           const ListStorage::value_type& rhs) {
+                          return lhs->Equals(rhs.get());
+                        });
+    }
+  }
+
+  NOTREACHED();
+  return false;
 }
 
 // static
 bool Value::Equals(const Value* a, const Value* b) {
-  if ((a == NULL) && (b == NULL))
-    return true;
-  if ((a == NULL) ^ (b == NULL))
-    return false;
-  return *a == *b;
+  if ((a == NULL) && (b == NULL)) return true;
+  if ((a == NULL) ^  (b == NULL)) return false;
+  return a->Equals(b);
 }
 
 void Value::InternalCopyFundamentalValue(const Value& that) {
@@ -498,23 +494,14 @@
       binary_value_.Init(*that.binary_value_);
       return;
     // DictStorage and ListStorage are move-only types due to the presence of
-    // unique_ptrs. This is why the explicit copy of every element is necessary
-    // here.
+    // unique_ptrs. This is why the call to |CreateDeepCopy| is necessary here.
     // TODO(crbug.com/646113): Clean this up when DictStorage and ListStorage
     // can be copied directly.
     case Type::DICTIONARY:
-      dict_ptr_.Init(MakeUnique<DictStorage>());
-      for (const auto& it : **that.dict_ptr_) {
-        (*dict_ptr_)
-            ->emplace_hint((*dict_ptr_)->end(), it.first,
-                           MakeUnique<Value>(*it.second));
-      }
+      dict_ptr_.Init(std::move(*that.CreateDeepCopy()->dict_ptr_));
       return;
     case Type::LIST:
-      list_.Init();
-      list_->reserve(that.list_->size());
-      for (const auto& it : *that.list_)
-        list_->push_back(MakeUnique<Value>(*it));
+      list_.Init(std::move(*that.CreateDeepCopy()->list_));
       return;
   }
 }
@@ -546,8 +533,6 @@
 }
 
 void Value::InternalCopyAssignFromSameType(const Value& that) {
-  // TODO(crbug.com/646113): make this a DCHECK once base::Value does not have
-  // subclasses.
   CHECK_EQ(type_, that.type_);
 
   switch (type_) {
@@ -565,15 +550,40 @@
       *binary_value_ = *that.binary_value_;
       return;
     // DictStorage and ListStorage are move-only types due to the presence of
-    // unique_ptrs. This is why the explicit call to the copy constructor is
-    // necessary here.
+    // unique_ptrs. This is why the call to |CreateDeepCopy| is necessary here.
     // TODO(crbug.com/646113): Clean this up when DictStorage and ListStorage
     // can be copied directly.
     case Type::DICTIONARY:
-      *dict_ptr_ = std::move(*Value(that).dict_ptr_);
+      *dict_ptr_ = std::move(*that.CreateDeepCopy()->dict_ptr_);
       return;
     case Type::LIST:
-      *list_ = std::move(*Value(that).list_);
+      *list_ = std::move(*that.CreateDeepCopy()->list_);
+      return;
+  }
+}
+
+void Value::InternalMoveAssignFromSameType(Value&& that) {
+  CHECK_EQ(type_, that.type_);
+
+  switch (type_) {
+    case Type::NONE:
+    case Type::BOOLEAN:
+    case Type::INTEGER:
+    case Type::DOUBLE:
+      InternalCopyFundamentalValue(that);
+      return;
+
+    case Type::STRING:
+      *string_value_ = std::move(*that.string_value_);
+      return;
+    case Type::BINARY:
+      *binary_value_ = std::move(*that.binary_value_);
+      return;
+    case Type::DICTIONARY:
+      *dict_ptr_ = std::move(*that.dict_ptr_);
+      return;
+    case Type::LIST:
+      *list_ = std::move(*that.list_);
       return;
   }
 }
@@ -1039,7 +1049,8 @@
       }
     }
     // All other cases: Make a copy and hook it up.
-    SetWithoutPathExpansion(it.key(), MakeUnique<Value>(*merge_value));
+    SetWithoutPathExpansion(it.key(),
+                            base::WrapUnique(merge_value->DeepCopy()));
   }
 }
 
@@ -1056,11 +1067,11 @@
 DictionaryValue::Iterator::~Iterator() {}
 
 DictionaryValue* DictionaryValue::DeepCopy() const {
-  return new DictionaryValue(*this);
+  return static_cast<DictionaryValue*>(Value::DeepCopy());
 }
 
 std::unique_ptr<DictionaryValue> DictionaryValue::CreateDeepCopy() const {
-  return MakeUnique<DictionaryValue>(*this);
+  return WrapUnique(DeepCopy());
 }
 
 ///////////////////// ListValue ////////////////////
@@ -1226,7 +1237,7 @@
 
 bool ListValue::Remove(const Value& value, size_t* index) {
   for (auto it = list_->begin(); it != list_->end(); ++it) {
-    if (**it == value) {
+    if ((*it)->Equals(&value)) {
       size_t previous_index = it - list_->begin();
       list_->erase(it);
 
@@ -1294,8 +1305,9 @@
 bool ListValue::AppendIfNotPresent(std::unique_ptr<Value> in_value) {
   DCHECK(in_value);
   for (const auto& entry : *list_) {
-    if (*entry == *in_value)
+    if (entry->Equals(in_value.get())) {
       return false;
+    }
   }
   list_->push_back(std::move(in_value));
   return true;
@@ -1313,7 +1325,7 @@
 ListValue::const_iterator ListValue::Find(const Value& value) const {
   return std::find_if(list_->begin(), list_->end(),
                       [&value](const std::unique_ptr<Value>& entry) {
-                        return *entry == value;
+                        return entry->Equals(&value);
                       });
 }
 
@@ -1323,11 +1335,11 @@
 }
 
 ListValue* ListValue::DeepCopy() const {
-  return new ListValue(*this);
+  return static_cast<ListValue*>(Value::DeepCopy());
 }
 
 std::unique_ptr<ListValue> ListValue::CreateDeepCopy() const {
-  return MakeUnique<ListValue>(*this);
+  return WrapUnique(DeepCopy());
 }
 
 ValueSerializer::~ValueSerializer() {
diff --git a/base/values.h b/base/values.h
index 925152d..35f66df 100644
--- a/base/values.h
+++ b/base/values.h
@@ -74,8 +74,8 @@
                                                              size_t size);
 
   Value(const Value& that);
-  Value(Value&& that) noexcept;
-  Value() noexcept;  // A null value.
+  Value(Value&& that);
+  Value();  // A null value.
   explicit Value(Type type);
   explicit Value(bool in_bool);
   explicit Value(int in_int);
@@ -89,16 +89,16 @@
   // arguments.
   explicit Value(const char* in_string);
   explicit Value(const std::string& in_string);
-  explicit Value(std::string&& in_string) noexcept;
+  explicit Value(std::string&& in_string);
   explicit Value(const char16* in_string);
   explicit Value(const string16& in_string);
   explicit Value(StringPiece in_string);
 
   explicit Value(const std::vector<char>& in_blob);
-  explicit Value(std::vector<char>&& in_blob) noexcept;
+  explicit Value(std::vector<char>&& in_blob);
 
   Value& operator=(const Value& that);
-  Value& operator=(Value&& that) noexcept;
+  Value& operator=(Value&& that);
 
   ~Value();
 
@@ -157,30 +157,15 @@
   // to the copy. The caller gets ownership of the copy, of course.
   // Subclasses return their own type directly in their overrides;
   // this works because C++ supports covariant return types.
-  // DEPRECATED, use Value's copy constructor instead.
-  // TODO(crbug.com/646113): Delete this and migrate callsites.
   Value* DeepCopy() const;
   // Preferred version of DeepCopy. TODO(estade): remove the above.
   std::unique_ptr<Value> CreateDeepCopy() const;
 
-  // Comparison operators so that Values can easily be used with standard
-  // library algorithms and associative containers.
-  BASE_EXPORT friend bool operator==(const Value& lhs, const Value& rhs);
-  BASE_EXPORT friend bool operator!=(const Value& lhs, const Value& rhs);
-  BASE_EXPORT friend bool operator<(const Value& lhs, const Value& rhs);
-  BASE_EXPORT friend bool operator>(const Value& lhs, const Value& rhs);
-  BASE_EXPORT friend bool operator<=(const Value& lhs, const Value& rhs);
-  BASE_EXPORT friend bool operator>=(const Value& lhs, const Value& rhs);
-
   // Compares if two Value objects have equal contents.
-  // DEPRECATED, use operator==(const Value& lhs, const Value& rhs) instead.
-  // TODO(crbug.com/646113): Delete this and migrate callsites.
   bool Equals(const Value* other) const;
 
   // Compares if two Value objects have equal contents. Can handle NULLs.
   // NULLs are considered equal but different from Value::CreateNullValue().
-  // DEPRECATED, use operator==(const Value& lhs, const Value& rhs) instead.
-  // TODO(crbug.com/646113): Delete this and migrate callsites.
   static bool Equals(const Value* a, const Value* b);
 
  protected:
@@ -206,6 +191,7 @@
   void InternalCopyConstructFrom(const Value& that);
   void InternalMoveConstructFrom(Value&& that);
   void InternalCopyAssignFromSameType(const Value& that);
+  void InternalMoveAssignFromSameType(Value&& that);
   void InternalCleanup();
 };
 
@@ -366,8 +352,6 @@
     DictStorage::const_iterator it_;
   };
 
-  // DEPRECATED, use DictionaryValue's copy constructor instead.
-  // TODO(crbug.com/646113): Delete this and migrate callsites.
   DictionaryValue* DeepCopy() const;
   // Preferred version of DeepCopy. TODO(estade): remove the above.
   std::unique_ptr<DictionaryValue> CreateDeepCopy() const;
@@ -484,8 +468,6 @@
   const_iterator begin() const { return list_->begin(); }
   const_iterator end() const { return list_->end(); }
 
-  // DEPRECATED, use ListValue's copy constructor instead.
-  // TODO(crbug.com/646113): Delete this and migrate callsites.
   ListValue* DeepCopy() const;
   // Preferred version of DeepCopy. TODO(estade): remove DeepCopy.
   std::unique_ptr<ListValue> CreateDeepCopy() const;
diff --git a/base/values_unittest.cc b/base/values_unittest.cc
index 6c1f017..3bcdc16 100644
--- a/base/values_unittest.cc
+++ b/base/values_unittest.cc
@@ -8,8 +8,6 @@
 
 #include <limits>
 #include <memory>
-#include <string>
-#include <type_traits>
 #include <utility>
 #include <vector>
 
@@ -20,20 +18,6 @@
 
 namespace base {
 
-TEST(ValuesTest, TestNothrow) {
-  static_assert(std::is_nothrow_move_constructible<Value>::value,
-                "IsNothrowMoveConstructible");
-  static_assert(std::is_nothrow_default_constructible<Value>::value,
-                "IsNothrowDefaultConstructible");
-  static_assert(std::is_nothrow_constructible<Value, std::string&&>::value,
-                "IsNothrowMoveConstructibleFromString");
-  static_assert(
-      std::is_nothrow_constructible<Value, std::vector<char>&&>::value,
-      "IsNothrowMoveConstructibleFromBlob");
-  static_assert(std::is_nothrow_move_assignable<Value>::value,
-                "IsNothrowMoveAssignable");
-}
-
 // Group of tests for the value constructors.
 TEST(ValuesTest, ConstructBool) {
   Value true_value(true);
@@ -695,7 +679,7 @@
   scoped_nested_dictionary->SetString("key", "value");
   original_dict.Set("dictionary", std::move(scoped_nested_dictionary));
 
-  auto copy_dict = MakeUnique<DictionaryValue>(original_dict);
+  std::unique_ptr<DictionaryValue> copy_dict = original_dict.CreateDeepCopy();
   ASSERT_TRUE(copy_dict.get());
   ASSERT_NE(copy_dict.get(), &original_dict);
 
@@ -805,10 +789,10 @@
   std::unique_ptr<Value> null1(Value::CreateNullValue());
   std::unique_ptr<Value> null2(Value::CreateNullValue());
   EXPECT_NE(null1.get(), null2.get());
-  EXPECT_EQ(*null1, *null2);
+  EXPECT_TRUE(null1->Equals(null2.get()));
 
   Value boolean(false);
-  EXPECT_NE(*null1, boolean);
+  EXPECT_FALSE(null1->Equals(&boolean));
 
   DictionaryValue dv;
   dv.SetBoolean("a", false);
@@ -818,29 +802,29 @@
   dv.SetString("d2", ASCIIToUTF16("http://google.com"));
   dv.Set("e", Value::CreateNullValue());
 
-  auto copy = MakeUnique<DictionaryValue>(dv);
-  EXPECT_EQ(dv, *copy);
+  std::unique_ptr<DictionaryValue> copy = dv.CreateDeepCopy();
+  EXPECT_TRUE(dv.Equals(copy.get()));
 
   std::unique_ptr<ListValue> list(new ListValue);
   ListValue* original_list = list.get();
   list->Append(Value::CreateNullValue());
   list->Append(WrapUnique(new DictionaryValue));
-  auto list_copy = MakeUnique<Value>(*list);
+  std::unique_ptr<Value> list_copy(list->CreateDeepCopy());
 
   dv.Set("f", std::move(list));
-  EXPECT_NE(dv, *copy);
+  EXPECT_FALSE(dv.Equals(copy.get()));
   copy->Set("f", std::move(list_copy));
-  EXPECT_EQ(dv, *copy);
+  EXPECT_TRUE(dv.Equals(copy.get()));
 
   original_list->Append(MakeUnique<Value>(true));
-  EXPECT_NE(dv, *copy);
+  EXPECT_FALSE(dv.Equals(copy.get()));
 
   // Check if Equals detects differences in only the keys.
-  copy = MakeUnique<DictionaryValue>(dv);
-  EXPECT_EQ(dv, *copy);
+  copy = dv.CreateDeepCopy();
+  EXPECT_TRUE(dv.Equals(copy.get()));
   copy->Remove("a", NULL);
   copy->SetBoolean("aa", false);
-  EXPECT_NE(dv, *copy);
+  EXPECT_FALSE(dv.Equals(copy.get()));
 }
 
 TEST(ValuesTest, StaticEquals) {
@@ -866,126 +850,6 @@
   EXPECT_FALSE(Value::Equals(NULL, null1.get()));
 }
 
-TEST(ValuesTest, Comparisons) {
-  // Test None Values.
-  Value null1;
-  Value null2;
-  EXPECT_EQ(null1, null2);
-  EXPECT_FALSE(null1 != null2);
-  EXPECT_FALSE(null1 < null2);
-  EXPECT_FALSE(null1 > null2);
-  EXPECT_LE(null1, null2);
-  EXPECT_GE(null1, null2);
-
-  // Test Bool Values.
-  Value bool1(false);
-  Value bool2(true);
-  EXPECT_FALSE(bool1 == bool2);
-  EXPECT_NE(bool1, bool2);
-  EXPECT_LT(bool1, bool2);
-  EXPECT_FALSE(bool1 > bool2);
-  EXPECT_LE(bool1, bool2);
-  EXPECT_FALSE(bool1 >= bool2);
-
-  // Test Int Values.
-  Value int1(1);
-  Value int2(2);
-  EXPECT_FALSE(int1 == int2);
-  EXPECT_NE(int1, int2);
-  EXPECT_LT(int1, int2);
-  EXPECT_FALSE(int1 > int2);
-  EXPECT_LE(int1, int2);
-  EXPECT_FALSE(int1 >= int2);
-
-  // Test Double Values.
-  Value double1(1.0);
-  Value double2(2.0);
-  EXPECT_FALSE(double1 == double2);
-  EXPECT_NE(double1, double2);
-  EXPECT_LT(double1, double2);
-  EXPECT_FALSE(double1 > double2);
-  EXPECT_LE(double1, double2);
-  EXPECT_FALSE(double1 >= double2);
-
-  // Test String Values.
-  Value string1("1");
-  Value string2("2");
-  EXPECT_FALSE(string1 == string2);
-  EXPECT_NE(string1, string2);
-  EXPECT_LT(string1, string2);
-  EXPECT_FALSE(string1 > string2);
-  EXPECT_LE(string1, string2);
-  EXPECT_FALSE(string1 >= string2);
-
-  // Test Binary Values.
-  Value binary1(std::vector<char>{0x01});
-  Value binary2(std::vector<char>{0x02});
-  EXPECT_FALSE(binary1 == binary2);
-  EXPECT_NE(binary1, binary2);
-  EXPECT_LT(binary1, binary2);
-  EXPECT_FALSE(binary1 > binary2);
-  EXPECT_LE(binary1, binary2);
-  EXPECT_FALSE(binary1 >= binary2);
-
-  // Test Empty List Values.
-  ListValue null_list1;
-  ListValue null_list2;
-  EXPECT_EQ(null_list1, null_list2);
-  EXPECT_FALSE(null_list1 != null_list2);
-  EXPECT_FALSE(null_list1 < null_list2);
-  EXPECT_FALSE(null_list1 > null_list2);
-  EXPECT_LE(null_list1, null_list2);
-  EXPECT_GE(null_list1, null_list2);
-
-  // Test Non Empty List Values.
-  ListValue int_list1;
-  ListValue int_list2;
-  int_list1.AppendInteger(1);
-  int_list2.AppendInteger(2);
-  EXPECT_FALSE(int_list1 == int_list2);
-  EXPECT_NE(int_list1, int_list2);
-  EXPECT_LT(int_list1, int_list2);
-  EXPECT_FALSE(int_list1 > int_list2);
-  EXPECT_LE(int_list1, int_list2);
-  EXPECT_FALSE(int_list1 >= int_list2);
-
-  // Test Empty Dict Values.
-  DictionaryValue null_dict1;
-  DictionaryValue null_dict2;
-  EXPECT_EQ(null_dict1, null_dict2);
-  EXPECT_FALSE(null_dict1 != null_dict2);
-  EXPECT_FALSE(null_dict1 < null_dict2);
-  EXPECT_FALSE(null_dict1 > null_dict2);
-  EXPECT_LE(null_dict1, null_dict2);
-  EXPECT_GE(null_dict1, null_dict2);
-
-  // Test Non Empty Dict Values.
-  DictionaryValue int_dict1;
-  DictionaryValue int_dict2;
-  int_dict1.SetInteger("key", 1);
-  int_dict2.SetInteger("key", 2);
-  EXPECT_FALSE(int_dict1 == int_dict2);
-  EXPECT_NE(int_dict1, int_dict2);
-  EXPECT_LT(int_dict1, int_dict2);
-  EXPECT_FALSE(int_dict1 > int_dict2);
-  EXPECT_LE(int_dict1, int_dict2);
-  EXPECT_FALSE(int_dict1 >= int_dict2);
-
-  // Test Values of different types.
-  std::vector<Value> values = {null1,   bool1,   int1,      double1,
-                               string1, binary1, int_dict1, int_list1};
-  for (size_t i = 0; i < values.size(); ++i) {
-    for (size_t j = i + 1; j < values.size(); ++j) {
-      EXPECT_FALSE(values[i] == values[j]);
-      EXPECT_NE(values[i], values[j]);
-      EXPECT_LT(values[i], values[j]);
-      EXPECT_FALSE(values[i] > values[j]);
-      EXPECT_LE(values[i], values[j]);
-      EXPECT_FALSE(values[i] >= values[j]);
-    }
-  }
-}
-
 TEST(ValuesTest, DeepCopyCovariantReturnTypes) {
   DictionaryValue original_dict;
   std::unique_ptr<Value> scoped_null(Value::CreateNullValue());
@@ -1021,25 +885,25 @@
   scoped_list->Append(std::move(scoped_list_element_1));
   original_dict.Set("list", std::move(scoped_list));
 
-  auto copy_dict = MakeUnique<Value>(original_dict);
-  auto copy_null = MakeUnique<Value>(*original_null);
-  auto copy_bool = MakeUnique<Value>(*original_bool);
-  auto copy_int = MakeUnique<Value>(*original_int);
-  auto copy_double = MakeUnique<Value>(*original_double);
-  auto copy_string = MakeUnique<Value>(*original_string);
-  auto copy_string16 = MakeUnique<Value>(*original_string16);
-  auto copy_binary = MakeUnique<Value>(*original_binary);
-  auto copy_list = MakeUnique<Value>(*original_list);
+  std::unique_ptr<Value> copy_dict = original_dict.CreateDeepCopy();
+  std::unique_ptr<Value> copy_null = original_null->CreateDeepCopy();
+  std::unique_ptr<Value> copy_bool = original_bool->CreateDeepCopy();
+  std::unique_ptr<Value> copy_int = original_int->CreateDeepCopy();
+  std::unique_ptr<Value> copy_double = original_double->CreateDeepCopy();
+  std::unique_ptr<Value> copy_string = original_string->CreateDeepCopy();
+  std::unique_ptr<Value> copy_string16 = original_string16->CreateDeepCopy();
+  std::unique_ptr<Value> copy_binary = original_binary->CreateDeepCopy();
+  std::unique_ptr<Value> copy_list = original_list->CreateDeepCopy();
 
-  EXPECT_EQ(original_dict, *copy_dict);
-  EXPECT_EQ(*original_null, *copy_null);
-  EXPECT_EQ(*original_bool, *copy_bool);
-  EXPECT_EQ(*original_int, *copy_int);
-  EXPECT_EQ(*original_double, *copy_double);
-  EXPECT_EQ(*original_string, *copy_string);
-  EXPECT_EQ(*original_string16, *copy_string16);
-  EXPECT_EQ(*original_binary, *copy_binary);
-  EXPECT_EQ(*original_list, *copy_list);
+  EXPECT_TRUE(original_dict.Equals(copy_dict.get()));
+  EXPECT_TRUE(original_null->Equals(copy_null.get()));
+  EXPECT_TRUE(original_bool->Equals(copy_bool.get()));
+  EXPECT_TRUE(original_int->Equals(copy_int.get()));
+  EXPECT_TRUE(original_double->Equals(copy_double.get()));
+  EXPECT_TRUE(original_string->Equals(copy_string.get()));
+  EXPECT_TRUE(original_string16->Equals(copy_string16.get()));
+  EXPECT_TRUE(original_binary->Equals(copy_binary.get()));
+  EXPECT_TRUE(original_list->Equals(copy_list.get()));
 }
 
 TEST(ValuesTest, RemoveEmptyChildren) {
@@ -1204,27 +1068,27 @@
   }
 
   Value value1("value1");
-  dict.Set("key1", MakeUnique<Value>(value1));
+  dict.Set("key1", value1.CreateDeepCopy());
   bool seen1 = false;
   for (DictionaryValue::Iterator it(dict); !it.IsAtEnd(); it.Advance()) {
     EXPECT_FALSE(seen1);
     EXPECT_EQ("key1", it.key());
-    EXPECT_EQ(value1, it.value());
+    EXPECT_TRUE(value1.Equals(&it.value()));
     seen1 = true;
   }
   EXPECT_TRUE(seen1);
 
   Value value2("value2");
-  dict.Set("key2", MakeUnique<Value>(value2));
+  dict.Set("key2", value2.CreateDeepCopy());
   bool seen2 = seen1 = false;
   for (DictionaryValue::Iterator it(dict); !it.IsAtEnd(); it.Advance()) {
     if (it.key() == "key1") {
       EXPECT_FALSE(seen1);
-      EXPECT_EQ(value1, it.value());
+      EXPECT_TRUE(value1.Equals(&it.value()));
       seen1 = true;
     } else if (it.key() == "key2") {
       EXPECT_FALSE(seen2);
-      EXPECT_EQ(value2, it.value());
+      EXPECT_TRUE(value2.Equals(&it.value()));
       seen2 = true;
     } else {
       ADD_FAILURE();
@@ -1248,21 +1112,21 @@
   DictionaryValue dict_value;
   ListValue list_value;
 
-  main_dict.Set("bool", MakeUnique<Value>(bool_value));
-  main_dict.Set("int", MakeUnique<Value>(int_value));
-  main_dict.Set("double", MakeUnique<Value>(double_value));
-  main_dict.Set("string", MakeUnique<Value>(string_value));
-  main_dict.Set("binary", MakeUnique<Value>(binary_value));
-  main_dict.Set("dict", MakeUnique<Value>(dict_value));
-  main_dict.Set("list", MakeUnique<Value>(list_value));
+  main_dict.Set("bool", bool_value.CreateDeepCopy());
+  main_dict.Set("int", int_value.CreateDeepCopy());
+  main_dict.Set("double", double_value.CreateDeepCopy());
+  main_dict.Set("string", string_value.CreateDeepCopy());
+  main_dict.Set("binary", binary_value.CreateDeepCopy());
+  main_dict.Set("dict", dict_value.CreateDeepCopy());
+  main_dict.Set("list", list_value.CreateDeepCopy());
 
-  main_list.Append(MakeUnique<Value>(bool_value));
-  main_list.Append(MakeUnique<Value>(int_value));
-  main_list.Append(MakeUnique<Value>(double_value));
-  main_list.Append(MakeUnique<Value>(string_value));
-  main_list.Append(MakeUnique<Value>(binary_value));
-  main_list.Append(MakeUnique<Value>(dict_value));
-  main_list.Append(MakeUnique<Value>(list_value));
+  main_list.Append(bool_value.CreateDeepCopy());
+  main_list.Append(int_value.CreateDeepCopy());
+  main_list.Append(double_value.CreateDeepCopy());
+  main_list.Append(string_value.CreateDeepCopy());
+  main_list.Append(binary_value.CreateDeepCopy());
+  main_list.Append(dict_value.CreateDeepCopy());
+  main_list.Append(list_value.CreateDeepCopy());
 
   EXPECT_TRUE(main_dict.Get("bool", NULL));
   EXPECT_TRUE(main_dict.Get("int", NULL));
diff --git a/base/win/scoped_comptr.h b/base/win/scoped_comptr.h
index d4aaa84..9442672 100644
--- a/base/win/scoped_comptr.h
+++ b/base/win/scoped_comptr.h
@@ -5,18 +5,19 @@
 #ifndef BASE_WIN_SCOPED_COMPTR_H_
 #define BASE_WIN_SCOPED_COMPTR_H_
 
-#include <objbase.h>
 #include <unknwn.h>
 
 #include "base/logging.h"
+#include "base/memory/ref_counted.h"
 
 namespace base {
 namespace win {
 
-// DEPRECATED: Use Microsoft::WRL::ComPtr instead.
 // A fairly minimalistic smart class for COM interface pointers.
+// Uses scoped_refptr for the basic smart pointer functionality
+// and adds a few IUnknown specific services.
 template <class Interface, const IID* interface_id = &__uuidof(Interface)>
-class ScopedComPtr {
+class ScopedComPtr : public scoped_refptr<Interface> {
  public:
   // Utility template to prevent users of ScopedComPtr from calling AddRef
   // and/or Release() without going through the ScopedComPtr class.
@@ -27,17 +28,16 @@
     STDMETHOD_(ULONG, Release)() = 0;
   };
 
+  typedef scoped_refptr<Interface> ParentClass;
+
   ScopedComPtr() {
   }
 
-  explicit ScopedComPtr(Interface* p) : ptr_(p) {
-    if (ptr_)
-      ptr_->AddRef();
+  explicit ScopedComPtr(Interface* p) : ParentClass(p) {
   }
 
-  ScopedComPtr(const ScopedComPtr<Interface, interface_id>& p) : ptr_(p.get()) {
-    if (ptr_)
-      ptr_->AddRef();
+  ScopedComPtr(const ScopedComPtr<Interface, interface_id>& p)
+      : ParentClass(p) {
   }
 
   ~ScopedComPtr() {
@@ -46,37 +46,31 @@
     static_assert(
         sizeof(ScopedComPtr<Interface, interface_id>) == sizeof(Interface*),
         "ScopedComPtrSize");
-    Release();
   }
 
-  Interface* get() const { return ptr_; }
-
-  explicit operator bool() const { return ptr_ != nullptr; }
-
   // Explicit Release() of the held object.  Useful for reuse of the
   // ScopedComPtr instance.
   // Note that this function equates to IUnknown::Release and should not
   // be confused with e.g. unique_ptr::release().
   void Release() {
-    Interface* temp = ptr_;
-    if (temp) {
-      ptr_ = nullptr;
-      temp->Release();
+    if (this->ptr_ != NULL) {
+      this->ptr_->Release();
+      this->ptr_ = NULL;
     }
   }
 
   // Sets the internal pointer to NULL and returns the held object without
   // releasing the reference.
   Interface* Detach() {
-    Interface* p = ptr_;
-    ptr_ = nullptr;
+    Interface* p = this->ptr_;
+    this->ptr_ = NULL;
     return p;
   }
 
   // Accepts an interface pointer that has already been addref-ed.
   void Attach(Interface* p) {
-    DCHECK(!ptr_);
-    ptr_ = p;
+    DCHECK(!this->ptr_);
+    this->ptr_ = p;
   }
 
   // Retrieves the pointer address.
@@ -84,8 +78,8 @@
   // The function DCHECKs on the current value being NULL.
   // Usage: Foo(p.Receive());
   Interface** Receive() {
-    DCHECK(!ptr_) << "Object leak. Pointer must be NULL";
-    return &ptr_;
+    DCHECK(!this->ptr_) << "Object leak. Pointer must be NULL";
+    return &this->ptr_;
   }
 
   // A convenience for whenever a void pointer is needed as an out argument.
@@ -95,51 +89,50 @@
 
   template <class Query>
   HRESULT QueryInterface(Query** p) {
-    DCHECK(p);
-    DCHECK(ptr_);
+    DCHECK(p != NULL);
+    DCHECK(this->ptr_ != NULL);
     // IUnknown already has a template version of QueryInterface
     // so the iid parameter is implicit here. The only thing this
     // function adds are the DCHECKs.
-    return ptr_->QueryInterface(IID_PPV_ARGS(p));
+    return this->ptr_->QueryInterface(p);
   }
 
   // QI for times when the IID is not associated with the type.
   HRESULT QueryInterface(const IID& iid, void** obj) {
-    DCHECK(obj);
-    DCHECK(ptr_);
-    return ptr_->QueryInterface(iid, obj);
+    DCHECK(obj != NULL);
+    DCHECK(this->ptr_ != NULL);
+    return this->ptr_->QueryInterface(iid, obj);
   }
 
   // Queries |other| for the interface this object wraps and returns the
   // error code from the other->QueryInterface operation.
   HRESULT QueryFrom(IUnknown* object) {
-    DCHECK(object);
-    return object->QueryInterface(IID_PPV_ARGS(Receive()));
+    DCHECK(object != NULL);
+    return object->QueryInterface(Receive());
   }
 
   // Convenience wrapper around CoCreateInstance
-  HRESULT CreateInstance(const CLSID& clsid,
-                         IUnknown* outer = nullptr,
+  HRESULT CreateInstance(const CLSID& clsid, IUnknown* outer = NULL,
                          DWORD context = CLSCTX_ALL) {
-    DCHECK(!ptr_);
+    DCHECK(!this->ptr_);
     HRESULT hr = ::CoCreateInstance(clsid, outer, context, *interface_id,
-                                    reinterpret_cast<void**>(&ptr_));
+                                    reinterpret_cast<void**>(&this->ptr_));
     return hr;
   }
 
   // Checks if the identity of |other| and this object is the same.
   bool IsSameObject(IUnknown* other) {
-    if (!other && !ptr_)
+    if (!other && !this->ptr_)
       return true;
 
-    if (!other || !ptr_)
+    if (!other || !this->ptr_)
       return false;
 
     ScopedComPtr<IUnknown> my_identity;
-    QueryInterface(IID_PPV_ARGS(my_identity.Receive()));
+    QueryInterface(my_identity.Receive());
 
     ScopedComPtr<IUnknown> other_identity;
-    other->QueryInterface(IID_PPV_ARGS(other_identity.Receive()));
+    other->QueryInterface(other_identity.Receive());
 
     return my_identity == other_identity;
   }
@@ -155,115 +148,20 @@
   // by statically casting the ScopedComPtr instance to the wrapped interface
   // and then making the call... but generally that shouldn't be necessary.
   BlockIUnknownMethods* operator->() const {
-    DCHECK(ptr_);
-    return reinterpret_cast<BlockIUnknownMethods*>(ptr_);
+    DCHECK(this->ptr_ != NULL);
+    return reinterpret_cast<BlockIUnknownMethods*>(this->ptr_);
   }
 
-  ScopedComPtr<Interface, interface_id>& operator=(Interface* rhs) {
-    // AddRef first so that self assignment should work
-    if (rhs)
-      rhs->AddRef();
-    Interface* old_ptr = ptr_;
-    ptr_ = rhs;
-    if (old_ptr)
-      old_ptr->Release();
-    return *this;
-  }
-
-  ScopedComPtr<Interface, interface_id>& operator=(
-      const ScopedComPtr<Interface, interface_id>& rhs) {
-    return *this = rhs.ptr_;
-  }
-
-  Interface& operator*() const {
-    DCHECK(ptr_);
-    return *ptr_;
-  }
-
-  bool operator==(const ScopedComPtr<Interface, interface_id>& rhs) const {
-    return ptr_ == rhs.get();
-  }
-
-  template <typename U>
-  bool operator==(const ScopedComPtr<U>& rhs) const {
-    return ptr_ == rhs.get();
-  }
-
-  template <typename U>
-  bool operator==(const U* rhs) const {
-    return ptr_ == rhs;
-  }
-
-  bool operator!=(const ScopedComPtr<Interface, interface_id>& rhs) const {
-    return ptr_ != rhs.get();
-  }
-
-  template <typename U>
-  bool operator!=(const ScopedComPtr<U>& rhs) const {
-    return ptr_ != rhs.get();
-  }
-
-  template <typename U>
-  bool operator!=(const U* rhs) const {
-    return ptr_ != rhs;
-  }
-
-  void swap(ScopedComPtr<Interface, interface_id>& r) {
-    Interface* tmp = ptr_;
-    ptr_ = r.ptr_;
-    r.ptr_ = tmp;
-  }
+  // Pull in operator=() from the parent class.
+  using scoped_refptr<Interface>::operator=;
 
   // static methods
+
   static const IID& iid() {
     return *interface_id;
   }
-
- private:
-  Interface* ptr_ = nullptr;
 };
 
-template <typename T, typename U>
-bool operator==(const T* lhs, const ScopedComPtr<U>& rhs) {
-  return lhs == rhs.get();
-}
-
-template <typename T>
-bool operator==(const ScopedComPtr<T>& lhs, std::nullptr_t null) {
-  return !static_cast<bool>(lhs);
-}
-
-template <typename T>
-bool operator==(std::nullptr_t null, const ScopedComPtr<T>& rhs) {
-  return !static_cast<bool>(rhs);
-}
-
-template <typename T, typename U>
-bool operator!=(const T* lhs, const ScopedComPtr<U>& rhs) {
-  return !operator==(lhs, rhs);
-}
-
-template <typename T>
-bool operator!=(const ScopedComPtr<T>& lhs, std::nullptr_t null) {
-  return !operator==(lhs, null);
-}
-
-template <typename T>
-bool operator!=(std::nullptr_t null, const ScopedComPtr<T>& rhs) {
-  return !operator==(null, rhs);
-}
-
-template <typename T>
-std::ostream& operator<<(std::ostream& out, const ScopedComPtr<T>& p) {
-  return out << p.get();
-}
-
-// Helper to make IID_PPV_ARGS work with ScopedComPtr.
-template <typename T>
-void** IID_PPV_ARGS_Helper(base::win::ScopedComPtr<T>* pp) throw() {
-  return pp->ReceiveVoid();
-}
-
 }  // namespace win
 }  // namespace base