Revert "libchrome: Uprev the library to r456626 from Chromium"
am: e5b2c6fa6f

Change-Id: I819960740d65600c933289f480ab6a9e53a88c62
diff --git a/Android.bp b/Android.bp
index ecf12a6..ed7e283 100644
--- a/Android.bp
+++ b/Android.bp
@@ -77,19 +77,14 @@
     "base/callback_internal.cc",
     "base/command_line.cc",
     "base/cpu.cc",
-    "base/debug/activity_tracker.cc",
     "base/debug/alias.cc",
     "base/debug/debugger.cc",
     "base/debug/debugger_posix.cc",
-    "base/debug/dump_without_crashing.cc",
-    "base/debug/profiler.cc",
     "base/debug/stack_trace.cc",
     "base/debug/stack_trace_posix.cc",
     "base/debug/task_annotator.cc",
     "base/environment.cc",
-    "base/feature_list.cc",
     "base/files/file.cc",
-    "base/files/file_descriptor_watcher_posix.cc",
     "base/files/file_enumerator.cc",
     "base/files/file_enumerator_posix.cc",
     "base/files/file_path.cc",
@@ -120,8 +115,6 @@
     "base/memory/aligned_memory.cc",
     "base/memory/ref_counted.cc",
     "base/memory/ref_counted_memory.cc",
-    "base/memory/shared_memory_helper.cc",
-    "base/memory/shared_memory_tracker.cc",
     "base/memory/singleton.cc",
     "base/memory/weak_ptr.cc",
     "base/message_loop/incoming_task_queue.cc",
@@ -132,7 +125,6 @@
     "base/message_loop/message_pump_libevent.cc",
     "base/metrics/bucket_ranges.cc",
     "base/metrics/field_trial.cc",
-    "base/metrics/field_trial_param_associator.cc",
     "base/metrics/metrics_hashes.cc",
     "base/metrics/histogram_base.cc",
     "base/metrics/histogram.cc",
@@ -147,15 +139,12 @@
     "base/metrics/statistics_recorder.cc",
     "base/pending_task.cc",
     "base/pickle.cc",
-    "base/posix/global_descriptors.cc",
     "base/posix/file_descriptor_shuffle.cc",
     "base/posix/safe_strerror.cc",
     "base/process/kill.cc",
     "base/process/kill_posix.cc",
     "base/process/launch.cc",
     "base/process/launch_posix.cc",
-    "base/process/memory.cc",
-    "base/process/memory_linux.cc",
     "base/process/process_handle.cc",
     "base/process/process_handle_posix.cc",
     "base/process/process_iterator.cc",
@@ -169,9 +158,8 @@
     "base/rand_util_posix.cc",
     "base/run_loop.cc",
     "base/sequence_checker_impl.cc",
-    "base/sequence_token.cc",
     "base/sequenced_task_runner.cc",
-    "base/sha1.cc",
+    "base/sha1_portable.cc",
     "base/strings/pattern.cc",
     "base/strings/safe_sprintf.cc",
     "base/strings/string16.cc",
@@ -183,7 +171,7 @@
     "base/strings/string_util_constants.cc",
     "base/strings/utf_string_conversions.cc",
     "base/strings/utf_string_conversion_utils.cc",
-    "base/synchronization/atomic_flag.cc",
+    "base/synchronization/cancellation_flag.cc",
     "base/synchronization/condition_variable_posix.cc",
     "base/synchronization/lock.cc",
     "base/synchronization/lock_impl_posix.cc",
@@ -195,12 +183,11 @@
     "base/task/cancelable_task_tracker.cc",
     "base/task_runner.cc",
     "base/task_scheduler/scheduler_lock_impl.cc",
-    "base/task_scheduler/scoped_set_task_priority_for_current_thread.cc",
     "base/task_scheduler/sequence.cc",
     "base/task_scheduler/sequence_sort_key.cc",
     "base/task_scheduler/task.cc",
     "base/task_scheduler/task_traits.cc",
-    "base/third_party/dynamic_annotations/dynamic_annotations.c",
+    "base/test/trace_event_analyzer.cc",
     "base/third_party/icu/icu_utf.cc",
     "base/third_party/nspr/prtime.cc",
     "base/threading/non_thread_safe_impl.cc",
@@ -213,6 +200,7 @@
     "base/threading/thread_checker_impl.cc",
     "base/threading/thread_collision_warner.cc",
     "base/threading/thread_id_name_manager.cc",
+    "base/threading/thread_local_posix.cc",
     "base/threading/thread_local_storage.cc",
     "base/threading/thread_local_storage_posix.cc",
     "base/threading/thread_restrictions.cc",
@@ -227,13 +215,10 @@
     "base/time/time_posix.cc",
     "base/timer/elapsed_timer.cc",
     "base/timer/timer.cc",
-    "base/trace_event/category_registry.cc",
-    "base/trace_event/event_name_filter.cc",
     "base/trace_event/heap_profiler_allocation_context.cc",
     "base/trace_event/heap_profiler_allocation_context_tracker.cc",
     "base/trace_event/heap_profiler_allocation_register.cc",
     "base/trace_event/heap_profiler_allocation_register_posix.cc",
-    "base/trace_event/heap_profiler_event_filter.cc",
     "base/trace_event/heap_profiler_heap_dump_writer.cc",
     "base/trace_event/heap_profiler_stack_frame_deduplicator.cc",
     "base/trace_event/heap_profiler_type_name_deduplicator.cc",
@@ -242,22 +227,20 @@
     "base/trace_event/memory_allocator_dump_guid.cc",
     "base/trace_event/memory_dump_manager.cc",
     "base/trace_event/memory_dump_request_args.cc",
-    "base/trace_event/memory_dump_scheduler.cc",
     "base/trace_event/memory_dump_session_state.cc",
     "base/trace_event/memory_infra_background_whitelist.cc",
-    "base/trace_event/memory_usage_estimator.cc",
     "base/trace_event/process_memory_dump.cc",
     "base/trace_event/process_memory_maps.cc",
     "base/trace_event/process_memory_totals.cc",
     "base/trace_event/trace_buffer.cc",
     "base/trace_event/trace_config.cc",
     "base/trace_event/trace_event_argument.cc",
-    "base/trace_event/trace_event_filter.cc",
     "base/trace_event/trace_event_impl.cc",
     "base/trace_event/trace_event_memory_overhead.cc",
     "base/trace_event/trace_event_synthetic_delay.cc",
     "base/trace_event/trace_log.cc",
     "base/trace_event/trace_log_constants.cc",
+    "base/trace_event/trace_sampling_thread.cc",
     "base/tracked_objects.cc",
     "base/tracking_info.cc",
     "base/values.cc",
@@ -381,7 +364,6 @@
     host_supported: true,
 
     srcs: [
-        "base/test/gtest_util.cc",
         "base/test/simple_test_clock.cc",
         "base/test/simple_test_tick_clock.cc",
         "base/test/test_file_util.cc",
@@ -434,13 +416,12 @@
         "base/cancelable_callback_unittest.cc",
         "base/command_line_unittest.cc",
         "base/cpu_unittest.cc",
-        "base/debug/activity_tracker_unittest.cc",
         "base/debug/debugger_unittest.cc",
         "base/debug/leak_tracker_unittest.cc",
         "base/debug/task_annotator_unittest.cc",
         "base/environment_unittest.cc",
+        "base/file_version_info_unittest.cc",
         "base/files/dir_reader_posix_unittest.cc",
-        "base/files/file_descriptor_watcher_posix_unittest.cc",
         "base/files/file_path_watcher_unittest.cc",
         "base/files/file_path_unittest.cc",
         "base/files/file_unittest.cc",
@@ -495,7 +476,6 @@
         "base/scoped_generic_unittest.cc",
         "base/security_unittest.cc",
         "base/sequence_checker_unittest.cc",
-	"base/sequence_token_unittest.cc",
         "base/sha1_unittest.cc",
         "base/stl_util_unittest.cc",
         "base/strings/pattern_unittest.cc",
@@ -507,7 +487,7 @@
         "base/strings/string_util_unittest.cc",
         "base/strings/sys_string_conversions_unittest.cc",
         "base/strings/utf_string_conversions_unittest.cc",
-        "base/synchronization/atomic_flag_unittest.cc",
+        "base/synchronization/cancellation_flag_unittest.cc",
         "base/synchronization/condition_variable_unittest.cc",
         "base/synchronization/lock_unittest.cc",
         "base/synchronization/waitable_event_unittest.cc",
@@ -516,22 +496,19 @@
         "base/task/cancelable_task_tracker_unittest.cc",
         "base/task_runner_util_unittest.cc",
         "base/task_scheduler/scheduler_lock_unittest.cc",
-        "base/task_scheduler/scoped_set_task_priority_for_current_thread_unittest.cc",
         "base/task_scheduler/sequence_sort_key_unittest.cc",
         "base/task_scheduler/sequence_unittest.cc",
         "base/task_scheduler/task_traits.cc",
         "base/template_util_unittest.cc",
-        "base/test/mock_entropy_provider.cc",
         "base/test/multiprocess_test.cc",
+        "base/test/multiprocess_test_android.cc",
         "base/test/opaque_ref_counted.cc",
-        "base/test/scoped_feature_list.cc",
         "base/test/scoped_locale.cc",
         "base/test/sequenced_worker_pool_owner.cc",
         "base/test/test_file_util.cc",
         "base/test/test_file_util_linux.cc",
         "base/test/test_file_util_posix.cc",
         "base/test/test_io_thread.cc",
-        "base/test/test_mock_time_task_runner.cc",
         "base/test/test_pending_task.cc",
         "base/test/test_simple_task_runner.cc",
         "base/test/test_switches.cc",
@@ -552,17 +529,14 @@
         "base/time/time_unittest.cc",
         "base/timer/hi_res_timer_manager_unittest.cc",
         "base/timer/timer_unittest.cc",
-        "base/trace_event/event_name_filter_unittest.cc",
         "base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc",
         "base/trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc",
         "base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc",
         "base/trace_event/memory_allocator_dump_unittest.cc",
         "base/trace_event/memory_dump_manager_unittest.cc",
-        "base/trace_event/memory_usage_estimator_unittest.cc",
         "base/trace_event/process_memory_dump_unittest.cc",
         "base/trace_event/trace_config_unittest.cc",
         "base/trace_event/trace_event_argument_unittest.cc",
-        "base/trace_event/trace_event_filter_test_utils.cc",
         "base/trace_event/trace_event_synthetic_delay_unittest.cc",
         "base/trace_event/trace_event_unittest.cc",
         "base/tracked_objects_unittest.cc",
diff --git a/SConstruct b/SConstruct
index 49cef6f..72e022e 100644
--- a/SConstruct
+++ b/SConstruct
@@ -270,6 +270,7 @@
     'name' : 'crypto',
     'sources' : """
                 hmac.cc
+                hmac_nss.cc
                 nss_key_util.cc
                 nss_util.cc
                 openssl_util.cc
@@ -282,9 +283,9 @@
                 secure_hash.cc
                 secure_util.cc
                 sha2.cc
-                signature_creator.cc
-                signature_verifier.cc
-                symmetric_key.cc
+                signature_creator_nss.cc
+                signature_verifier_nss.cc
+                symmetric_key_nss.cc
                 third_party/nss/rsawrapr.c
                 third_party/nss/sha512.cc
                 """,
@@ -308,7 +309,7 @@
                 linux/seccomp-bpf/trap.cc
 
                 linux/seccomp-bpf-helpers/baseline_policy.cc
-    linux/seccomp-bpf-helpers/sigsys_handlers.cc
+                linux/seccomp-bpf-helpers/sigsys_handlers.cc
                 linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
                 linux/seccomp-bpf-helpers/syscall_sets.cc
 
diff --git a/base/BUILD.gn b/base/BUILD.gn
index f84856d..c147989 100644
--- a/base/BUILD.gn
+++ b/base/BUILD.gn
@@ -13,22 +13,18 @@
 # unpredictably for the various build types, we prefer a slightly different
 # style. Instead, there are big per-platform blocks of inclusions and
 # exclusions. If a given file has an inclusion or exclusion rule that applies
-# for multiple conditions, prefer to duplicate it in both lists. This makes it
+# for multiple conditions, perfer to duplicate it in both lists. This makes it
 # a bit easier to see which files apply in which cases rather than having a
 # huge sequence of random-looking conditionals.
 
 import("//build/buildflag_header.gni")
 import("//build/config/allocator.gni")
-import("//build/config/arm.gni")
 import("//build/config/chromecast_build.gni")
-import("//build/config/clang/clang.gni")
 import("//build/config/compiler/compiler.gni")
-import("//build/config/dcheck_always_on.gni")
 import("//build/config/nacl/config.gni")
 import("//build/config/sysroot.gni")
 import("//build/config/ui.gni")
 import("//build/nocompile.gni")
-import("//testing/libfuzzer/fuzzer_test.gni")
 import("//testing/test.gni")
 
 declare_args() {
@@ -36,20 +32,16 @@
   # See //base/build_time.cc and //build/write_build_date_header.py for more
   # details and the expected format.
   override_build_date = "N/A"
-
-  # Turn on memory profiling in the task profiler when the heap shim is
-  # available. Profiling can then be enabled at runtime by passing the command
-  # line flag --enable-heap-profiling=task-profiler.
-  enable_memory_task_profiler = use_experimental_allocator_shim
-
-  # Partition alloc is included by default except iOS.
-  use_partition_alloc = !is_ios
 }
 
 if (is_android) {
   import("//build/config/android/rules.gni")
 }
 
+if (is_win) {
+  import("//build/config/win/visual_studio_version.gni")
+}
+
 config("base_flags") {
   if (is_clang) {
     cflags = [
@@ -149,11 +141,6 @@
     "allocator/allocator_check.h",
     "allocator/allocator_extension.cc",
     "allocator/allocator_extension.h",
-    "allocator/allocator_interception_mac.h",
-    "allocator/allocator_interception_mac.mm",
-    "allocator/allocator_shim.h",
-    "allocator/malloc_zone_functions_mac.cc",
-    "allocator/malloc_zone_functions_mac.h",
     "android/animation_frame_time_histogram.cc",
     "android/animation_frame_time_histogram.h",
     "android/apk_assets.cc",
@@ -175,29 +162,23 @@
     "android/context_utils.cc",
     "android/context_utils.h",
     "android/cpu_features.cc",
-    "android/cpu_features.h",
     "android/cxa_demangle_stub.cc",
-    "android/early_trace_event_binding.cc",
-    "android/early_trace_event_binding.h",
     "android/event_log.cc",
     "android/event_log.h",
     "android/field_trial_list.cc",
     "android/field_trial_list.h",
+    "android/fifo_utils.cc",
+    "android/fifo_utils.h",
     "android/important_file_writer_android.cc",
     "android/important_file_writer_android.h",
-    "android/java_exception_reporter.cc",
-    "android/java_exception_reporter.h",
     "android/java_handler_thread.cc",
     "android/java_handler_thread.h",
-    "android/java_message_handler_factory.h",
     "android/java_runtime.cc",
     "android/java_runtime.h",
     "android/jni_android.cc",
     "android/jni_android.h",
     "android/jni_array.cc",
     "android/jni_array.h",
-    "android/jni_generator/jni_generator_helper.h",
-    "android/jni_int_wrapper.h",
     "android/jni_registrar.cc",
     "android/jni_registrar.h",
     "android/jni_string.cc",
@@ -225,24 +206,16 @@
     "android/record_user_action.h",
     "android/scoped_java_ref.cc",
     "android/scoped_java_ref.h",
-    "android/statistics_recorder_android.cc",
-    "android/statistics_recorder_android.h",
     "android/sys_utils.cc",
     "android/sys_utils.h",
-    "android/throw_uncaught_exception.cc",
-    "android/throw_uncaught_exception.h",
-    "android/time_utils.cc",
-    "android/time_utils.h",
+    "android/thread_utils.h",
     "android/trace_event_binding.cc",
     "android/trace_event_binding.h",
-    "android/unguessable_token_android.cc",
-    "android/unguessable_token_android.h",
     "at_exit.cc",
     "at_exit.h",
     "atomic_ref_count.h",
     "atomic_sequence_num.h",
     "atomicops.h",
-    "atomicops_internals_atomicword_compat.h",
     "atomicops_internals_portable.h",
     "atomicops_internals_x86_msvc.h",
     "auto_reset.h",
@@ -265,34 +238,25 @@
     "build_time.cc",
     "build_time.h",
     "callback.h",
-    "callback_forward.h",
     "callback_helpers.cc",
     "callback_helpers.h",
     "callback_internal.cc",
     "callback_internal.h",
-    "callback_list.h",
     "cancelable_callback.h",
     "command_line.cc",
     "command_line.h",
     "compiler_specific.h",
     "containers/adapters.h",
-    "containers/flat_set.h",
     "containers/hash_tables.h",
     "containers/linked_list.h",
     "containers/mru_cache.h",
+    "containers/scoped_ptr_hash_map.h",
     "containers/small_map.h",
     "containers/stack_container.h",
     "cpu.cc",
     "cpu.h",
     "critical_closure.h",
     "critical_closure_internal_ios.mm",
-
-    # This file depends on files from the "debug/allocator" target,
-    # but this target does not depend on "debug/allocator".
-    "debug/activity_analyzer.cc",
-    "debug/activity_analyzer.h",
-    "debug/activity_tracker.cc",
-    "debug/activity_tracker.h",
     "debug/alias.cc",
     "debug/alias.h",
     "debug/asan_invalid_access.cc",
@@ -309,6 +273,10 @@
     "debug/dump_without_crashing.h",
     "debug/gdi_debug_util_win.cc",
     "debug/gdi_debug_util_win.h",
+
+    # This file depends on files from the "debug/allocator" target,
+    # but this target does not depend on "debug/allocator" (see
+    # allocator.gyp for details).
     "debug/leak_annotations.h",
     "debug/leak_tracker.h",
     "debug/proc_maps_linux.cc",
@@ -322,18 +290,13 @@
     "debug/stack_trace_win.cc",
     "debug/task_annotator.cc",
     "debug/task_annotator.h",
-    "debug/thread_heap_usage_tracker.cc",
-    "debug/thread_heap_usage_tracker.h",
     "deferred_sequenced_task_runner.cc",
     "deferred_sequenced_task_runner.h",
     "environment.cc",
     "environment.h",
-    "event_types.h",
     "feature_list.cc",
     "feature_list.h",
     "file_descriptor_posix.h",
-    "file_descriptor_store.cc",
-    "file_descriptor_store.h",
     "file_version_info.h",
     "file_version_info_mac.h",
     "file_version_info_mac.mm",
@@ -343,9 +306,6 @@
     "files/dir_reader_linux.h",
     "files/dir_reader_posix.h",
     "files/file.cc",
-    "files/file.h",
-    "files/file_descriptor_watcher_posix.cc",
-    "files/file_descriptor_watcher_posix.h",
     "files/file_enumerator.cc",
     "files/file_enumerator.h",
     "files/file_enumerator_posix.cc",
@@ -385,10 +345,6 @@
     "files/memory_mapped_file_win.cc",
     "files/scoped_file.cc",
     "files/scoped_file.h",
-    "files/scoped_platform_handle.cc",
-    "files/scoped_platform_handle.h",
-    "files/scoped_platform_handle_posix.cc",
-    "files/scoped_platform_handle_win.cc",
     "files/scoped_temp_dir.cc",
     "files/scoped_temp_dir.h",
     "format_macros.h",
@@ -398,7 +354,6 @@
     "hash.cc",
     "hash.h",
     "id_map.h",
-    "ios/block_types.h",
     "ios/crb_protocol_observers.h",
     "ios/crb_protocol_observers.mm",
     "ios/device_util.h",
@@ -449,8 +404,8 @@
     "mac/dispatch_source_mach.h",
     "mac/foundation_util.h",
     "mac/foundation_util.mm",
+    "mac/launch_services_util.cc",
     "mac/launch_services_util.h",
-    "mac/launch_services_util.mm",
     "mac/launchd.cc",
     "mac/launchd.h",
     "mac/mac_logging.h",
@@ -465,8 +420,6 @@
     "mac/mach_port_util.h",
     "mac/objc_property_releaser.h",
     "mac/objc_property_releaser.mm",
-    "mac/objc_release_properties.h",
-    "mac/objc_release_properties.mm",
     "mac/os_crash_dumps.cc",
     "mac/os_crash_dumps.h",
     "mac/scoped_aedesc.h",
@@ -474,7 +427,6 @@
     "mac/scoped_block.h",
     "mac/scoped_cftyperef.h",
     "mac/scoped_dispatch_object.h",
-    "mac/scoped_ionotificationportref.h",
     "mac/scoped_ioobject.h",
     "mac/scoped_ioplugininterface.h",
     "mac/scoped_launch_data.h",
@@ -506,12 +458,6 @@
     "memory/free_deleter.h",
     "memory/linked_ptr.h",
     "memory/manual_constructor.h",
-    "memory/memory_coordinator_client.cc",
-    "memory/memory_coordinator_client.h",
-    "memory/memory_coordinator_client_registry.cc",
-    "memory/memory_coordinator_client_registry.h",
-    "memory/memory_coordinator_proxy.cc",
-    "memory/memory_coordinator_proxy.h",
     "memory/memory_pressure_listener.cc",
     "memory/memory_pressure_listener.h",
     "memory/memory_pressure_monitor.cc",
@@ -526,7 +472,7 @@
     "memory/raw_scoped_refptr_mismatch_checker.h",
     "memory/ref_counted.cc",
     "memory/ref_counted.h",
-    "memory/ref_counted_delete_on_sequence.h",
+    "memory/ref_counted_delete_on_message_loop.h",
     "memory/ref_counted_memory.cc",
     "memory/ref_counted_memory.h",
     "memory/scoped_policy.h",
@@ -536,8 +482,6 @@
     "memory/shared_memory_handle.h",
     "memory/shared_memory_handle_mac.cc",
     "memory/shared_memory_handle_win.cc",
-    "memory/shared_memory_helper.cc",
-    "memory/shared_memory_helper.h",
     "memory/shared_memory_mac.cc",
     "memory/shared_memory_nacl.cc",
     "memory/shared_memory_posix.cc",
@@ -568,15 +512,10 @@
     "message_loop/message_pump_mac.mm",
     "message_loop/message_pump_win.cc",
     "message_loop/message_pump_win.h",
-    "message_loop/timer_slack.h",
     "metrics/bucket_ranges.cc",
     "metrics/bucket_ranges.h",
     "metrics/field_trial.cc",
     "metrics/field_trial.h",
-    "metrics/field_trial_param_associator.cc",
-    "metrics/field_trial_param_associator.h",
-    "metrics/field_trial_params.cc",
-    "metrics/field_trial_params.h",
     "metrics/histogram.cc",
     "metrics/histogram.h",
     "metrics/histogram_base.cc",
@@ -584,11 +523,7 @@
     "metrics/histogram_delta_serialization.cc",
     "metrics/histogram_delta_serialization.h",
     "metrics/histogram_flattener.h",
-    "metrics/histogram_functions.cc",
-    "metrics/histogram_functions.h",
     "metrics/histogram_macros.h",
-    "metrics/histogram_macros_internal.h",
-    "metrics/histogram_macros_local.h",
     "metrics/histogram_samples.cc",
     "metrics/histogram_samples.h",
     "metrics/histogram_snapshot_manager.cc",
@@ -612,7 +547,6 @@
     "metrics/user_metrics.cc",
     "metrics/user_metrics.h",
     "metrics/user_metrics_action.h",
-    "native_library.cc",
     "native_library.h",
     "native_library_ios.mm",
     "native_library_mac.mm",
@@ -626,8 +560,6 @@
     "numerics/safe_conversions_impl.h",
     "numerics/safe_math.h",
     "numerics/safe_math_impl.h",
-    "numerics/saturated_arithmetic.h",
-    "numerics/saturated_arithmetic_arm.h",
     "observer_list.h",
     "observer_list_threadsafe.h",
     "optional.h",
@@ -643,18 +575,23 @@
     "pickle.h",
     "posix/eintr_wrapper.h",
     "posix/file_descriptor_shuffle.cc",
-    "posix/file_descriptor_shuffle.h",
     "posix/global_descriptors.cc",
     "posix/global_descriptors.h",
     "posix/safe_strerror.cc",
     "posix/safe_strerror.h",
     "posix/unix_domain_socket_linux.cc",
     "posix/unix_domain_socket_linux.h",
-    "post_task_and_reply_with_result_internal.h",
     "power_monitor/power_monitor.cc",
     "power_monitor/power_monitor.h",
     "power_monitor/power_monitor_device_source.cc",
     "power_monitor/power_monitor_device_source.h",
+    "power_monitor/power_monitor_device_source_android.cc",
+    "power_monitor/power_monitor_device_source_android.h",
+    "power_monitor/power_monitor_device_source_chromeos.cc",
+    "power_monitor/power_monitor_device_source_ios.mm",
+    "power_monitor/power_monitor_device_source_mac.mm",
+    "power_monitor/power_monitor_device_source_posix.cc",
+    "power_monitor/power_monitor_device_source_win.cc",
     "power_monitor/power_monitor_source.cc",
     "power_monitor/power_monitor_source.h",
     "power_monitor/power_observer.h",
@@ -680,7 +617,6 @@
     "process/port_provider_mac.h",
     "process/process.h",
     "process/process_handle.cc",
-    "process/process_handle.h",
 
     #"process/process_handle_freebsd.cc",  # Unused in Chromium build.
     "process/process_handle_linux.cc",
@@ -703,7 +639,6 @@
     #"process/process_iterator_openbsd.cc",  # Unused in Chromium build.
     "process/process_iterator_win.cc",
     "process/process_linux.cc",
-    "process/process_mac.cc",
     "process/process_metrics.cc",
     "process/process_metrics.h",
 
@@ -736,7 +671,6 @@
     "rand_util_win.cc",
     "run_loop.cc",
     "run_loop.h",
-    "scoped_clear_errno.h",
     "scoped_generic.h",
     "scoped_native_library.cc",
     "scoped_native_library.h",
@@ -744,8 +678,6 @@
     "sequence_checker.h",
     "sequence_checker_impl.cc",
     "sequence_checker_impl.h",
-    "sequence_token.cc",
-    "sequence_token.h",
     "sequenced_task_runner.cc",
     "sequenced_task_runner.h",
     "sequenced_task_runner_helpers.h",
@@ -793,8 +725,7 @@
     "sync_socket.h",
     "sync_socket_posix.cc",
     "sync_socket_win.cc",
-    "synchronization/atomic_flag.cc",
-    "synchronization/atomic_flag.h",
+    "synchronization/cancellation_flag.cc",
     "synchronization/cancellation_flag.h",
     "synchronization/condition_variable.h",
     "synchronization/condition_variable_posix.cc",
@@ -820,9 +751,6 @@
     "sys_info.h",
     "sys_info_android.cc",
     "sys_info_chromeos.cc",
-    "sys_info_internal.h",
-    "syslog_logging.cc",
-    "syslog_logging.h",
 
     #"sys_info_freebsd.cc",  # Unused in Chromium build.
     "sys_info_ios.mm",
@@ -841,28 +769,20 @@
     "task_runner_util.h",
     "task_scheduler/delayed_task_manager.cc",
     "task_scheduler/delayed_task_manager.h",
-    "task_scheduler/initialization_util.cc",
-    "task_scheduler/initialization_util.h",
-    "task_scheduler/post_task.cc",
-    "task_scheduler/post_task.h",
     "task_scheduler/priority_queue.cc",
     "task_scheduler/priority_queue.h",
     "task_scheduler/scheduler_lock.h",
     "task_scheduler/scheduler_lock_impl.cc",
     "task_scheduler/scheduler_lock_impl.h",
-    "task_scheduler/scheduler_single_thread_task_runner_manager.cc",
-    "task_scheduler/scheduler_single_thread_task_runner_manager.h",
+    "task_scheduler/scheduler_service_thread.cc",
+    "task_scheduler/scheduler_service_thread.h",
     "task_scheduler/scheduler_worker.cc",
     "task_scheduler/scheduler_worker.h",
     "task_scheduler/scheduler_worker_pool.h",
     "task_scheduler/scheduler_worker_pool_impl.cc",
     "task_scheduler/scheduler_worker_pool_impl.h",
-    "task_scheduler/scheduler_worker_pool_params.cc",
-    "task_scheduler/scheduler_worker_pool_params.h",
     "task_scheduler/scheduler_worker_stack.cc",
     "task_scheduler/scheduler_worker_stack.h",
-    "task_scheduler/scoped_set_task_priority_for_current_thread.cc",
-    "task_scheduler/scoped_set_task_priority_for_current_thread.h",
     "task_scheduler/sequence.cc",
     "task_scheduler/sequence.h",
     "task_scheduler/sequence_sort_key.cc",
@@ -875,12 +795,9 @@
     "task_scheduler/task_scheduler_impl.h",
     "task_scheduler/task_tracker.cc",
     "task_scheduler/task_tracker.h",
-    "task_scheduler/task_tracker_posix.cc",
-    "task_scheduler/task_tracker_posix.h",
     "task_scheduler/task_traits.cc",
     "task_scheduler/task_traits.h",
     "template_util.h",
-    "test/malloc_wrapper.h",
     "third_party/dmg_fp/dmg_fp.h",
     "third_party/dmg_fp/dtoa_wrapper.cc",
     "third_party/dmg_fp/g_fmt.cc",
@@ -889,7 +806,6 @@
     "third_party/nspr/prtime.cc",
     "third_party/nspr/prtime.h",
     "third_party/superfasthash/superfasthash.c",
-    "third_party/valgrind/memcheck.h",
     "threading/non_thread_safe.h",
     "threading/non_thread_safe_impl.cc",
     "threading/non_thread_safe_impl.h",
@@ -919,10 +835,13 @@
     "threading/thread_id_name_manager.cc",
     "threading/thread_id_name_manager.h",
     "threading/thread_local.h",
+    "threading/thread_local_android.cc",
+    "threading/thread_local_posix.cc",
     "threading/thread_local_storage.cc",
     "threading/thread_local_storage.h",
     "threading/thread_local_storage_posix.cc",
     "threading/thread_local_storage_win.cc",
+    "threading/thread_local_win.cc",
     "threading/thread_restrictions.cc",
     "threading/thread_restrictions.h",
     "threading/thread_task_runner_handle.cc",
@@ -956,15 +875,9 @@
     "timer/mock_timer.h",
     "timer/timer.cc",
     "timer/timer.h",
-    "trace_event/auto_open_close_event.cc",
-    "trace_event/auto_open_close_event.h",
     "trace_event/blame_context.cc",
     "trace_event/blame_context.h",
-    "trace_event/category_registry.cc",
-    "trace_event/category_registry.h",
     "trace_event/common/trace_event_common.h",
-    "trace_event/event_name_filter.cc",
-    "trace_event/event_name_filter.h",
     "trace_event/heap_profiler.h",
     "trace_event/heap_profiler_allocation_context.cc",
     "trace_event/heap_profiler_allocation_context.h",
@@ -974,8 +887,6 @@
     "trace_event/heap_profiler_allocation_register.h",
     "trace_event/heap_profiler_allocation_register_posix.cc",
     "trace_event/heap_profiler_allocation_register_win.cc",
-    "trace_event/heap_profiler_event_filter.cc",
-    "trace_event/heap_profiler_event_filter.h",
     "trace_event/heap_profiler_heap_dump_writer.cc",
     "trace_event/heap_profiler_heap_dump_writer.h",
     "trace_event/heap_profiler_stack_frame_deduplicator.cc",
@@ -984,8 +895,6 @@
     "trace_event/heap_profiler_type_name_deduplicator.h",
     "trace_event/java_heap_dump_provider_android.cc",
     "trace_event/java_heap_dump_provider_android.h",
-    "trace_event/malloc_dump_provider.cc",
-    "trace_event/malloc_dump_provider.h",
     "trace_event/memory_allocator_dump.cc",
     "trace_event/memory_allocator_dump.h",
     "trace_event/memory_allocator_dump_guid.cc",
@@ -995,14 +904,10 @@
     "trace_event/memory_dump_provider.h",
     "trace_event/memory_dump_request_args.cc",
     "trace_event/memory_dump_request_args.h",
-    "trace_event/memory_dump_scheduler.cc",
-    "trace_event/memory_dump_scheduler.h",
     "trace_event/memory_dump_session_state.cc",
     "trace_event/memory_dump_session_state.h",
     "trace_event/memory_infra_background_whitelist.cc",
     "trace_event/memory_infra_background_whitelist.h",
-    "trace_event/memory_usage_estimator.cc",
-    "trace_event/memory_usage_estimator.h",
     "trace_event/process_memory_dump.cc",
     "trace_event/process_memory_dump.h",
     "trace_event/process_memory_maps.cc",
@@ -1011,7 +916,6 @@
     "trace_event/process_memory_totals.h",
     "trace_event/trace_buffer.cc",
     "trace_event/trace_buffer.h",
-    "trace_event/trace_category.h",
     "trace_event/trace_config.cc",
     "trace_event/trace_config.h",
     "trace_event/trace_event.h",
@@ -1020,8 +924,6 @@
     "trace_event/trace_event_argument.h",
     "trace_event/trace_event_etw_export_win.cc",
     "trace_event/trace_event_etw_export_win.h",
-    "trace_event/trace_event_filter.cc",
-    "trace_event/trace_event_filter.h",
     "trace_event/trace_event_impl.cc",
     "trace_event/trace_event_impl.h",
     "trace_event/trace_event_memory_overhead.cc",
@@ -1033,15 +935,17 @@
     "trace_event/trace_log.cc",
     "trace_event/trace_log.h",
     "trace_event/trace_log_constants.cc",
+    "trace_event/trace_sampling_thread.cc",
+    "trace_event/trace_sampling_thread.h",
     "trace_event/tracing_agent.cc",
     "trace_event/tracing_agent.h",
+    "trace_event/winheap_dump_provider_win.cc",
+    "trace_event/winheap_dump_provider_win.h",
     "tracked_objects.cc",
     "tracked_objects.h",
     "tracking_info.cc",
     "tracking_info.h",
     "tuple.h",
-    "unguessable_token.cc",
-    "unguessable_token.h",
     "value_conversions.cc",
     "value_conversions.h",
     "values.cc",
@@ -1103,7 +1007,6 @@
     "win/wrapped_window_proc.h",
   ]
 
-  all_dependent_configs = []
   defines = []
   data = []
 
@@ -1129,48 +1032,14 @@
   ]
 
   # Needed for <atomic> if using newer C++ library than sysroot
-  if (!use_sysroot && (is_android || (is_linux && !is_chromecast))) {
+  if (!use_sysroot && (is_android || is_linux)) {
     libs = [ "atomic" ]
   }
 
   if (use_experimental_allocator_shim) {
-    # TODO(primiano): support other platforms, currently this works only on
-    # Linux/CrOS/Android. http://crbug.com/550886 .
-    sources += [
-      "allocator/allocator_shim.cc",
-      "allocator/allocator_shim.h",
-      "allocator/allocator_shim_internals.h",
-      "allocator/allocator_shim_override_cpp_symbols.h",
-      "allocator/allocator_shim_override_libc_symbols.h",
-    ]
-    if (is_win) {
-      sources += [
-        "allocator/allocator_shim_default_dispatch_to_winheap.cc",
-        "allocator/allocator_shim_override_ucrt_symbols_win.h",
-        "allocator/winheap_stubs_win.cc",
-        "allocator/winheap_stubs_win.h",
-      ]
-    } else if (is_linux && use_allocator == "tcmalloc") {
-      sources += [
-        "allocator/allocator_shim_default_dispatch_to_tcmalloc.cc",
-        "allocator/allocator_shim_override_glibc_weak_symbols.h",
-      ]
-      deps += [ "//base/allocator:tcmalloc" ]
-    } else if (is_linux && use_allocator == "none") {
-      sources += [ "allocator/allocator_shim_default_dispatch_to_glibc.cc" ]
-    } else if (is_android && use_allocator == "none") {
-      sources += [
-        "allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc",
-        "allocator/allocator_shim_override_linker_wrapped_symbols.h",
-      ]
-      all_dependent_configs += [ "//base/allocator:wrap_malloc_symbols" ]
-    } else if (is_mac) {
-      sources += [
-        "allocator/allocator_shim_default_dispatch_to_mac_zoned_malloc.cc",
-        "allocator/allocator_shim_default_dispatch_to_mac_zoned_malloc.h",
-        "allocator/allocator_shim_override_mac_symbols.h",
-      ]
-    }
+    # The allocator shim is part of the base API. This is to allow clients of
+    # base should to install hooks into the allocator path.
+    public_deps += [ "//base/allocator:unified_allocator_shim" ]
   }
 
   # Allow more direct string conversions on platforms with native utf8
@@ -1181,7 +1050,10 @@
 
   # Android.
   if (is_android) {
-    sources -= [ "debug/stack_trace_posix.cc" ]
+    sources -= [
+      "debug/stack_trace_posix.cc",
+      "power_monitor/power_monitor_device_source_posix.cc",
+    ]
 
     # Android uses some Linux sources, put those back.
     set_sources_assignment_filter([])
@@ -1189,14 +1061,14 @@
       "debug/proc_maps_linux.cc",
       "files/file_path_watcher_linux.cc",
       "posix/unix_domain_socket_linux.cc",
-      "power_monitor/power_monitor_device_source_android.cc",
-      "power_monitor/power_monitor_device_source_android.h",
       "process/internal_linux.cc",
       "process/memory_linux.cc",
       "process/process_handle_linux.cc",
       "process/process_iterator_linux.cc",
       "process/process_metrics_linux.cc",
       "sys_info_linux.cc",
+      "trace_event/malloc_dump_provider.cc",
+      "trace_event/malloc_dump_provider.h",
     ]
     set_sources_assignment_filter(sources_assignment_filter)
 
@@ -1213,7 +1085,7 @@
 
   # Chromeos.
   if (is_chromeos) {
-    sources += [ "power_monitor/power_monitor_device_source_chromeos.cc" ]
+    sources -= [ "power_monitor/power_monitor_device_source_posix.cc" ]
   }
 
   # NaCl.
@@ -1247,10 +1119,7 @@
       "memory/discardable_memory_allocator.h",
       "memory/discardable_shared_memory.cc",
       "memory/discardable_shared_memory.h",
-      "memory/shared_memory_helper.cc",
-      "memory/shared_memory_helper.h",
       "memory/shared_memory_posix.cc",
-      "native_library.cc",
       "native_library_posix.cc",
       "path_service.cc",
       "process/kill.cc",
@@ -1267,8 +1136,6 @@
       "synchronization/read_write_lock_posix.cc",
       "sys_info.cc",
       "sys_info_posix.cc",
-      "task_scheduler/initialization_util.cc",
-      "task_scheduler/initialization_util.h",
       "trace_event/trace_event_system_stats_monitor.cc",
     ]
 
@@ -1280,8 +1147,6 @@
       configs += [ ":nacl_nonsfi_warnings" ]
     } else {
       sources -= [
-        "files/file_descriptor_watcher_posix.cc",
-        "files/file_descriptor_watcher_posix.h",
         "files/file_util.cc",
         "files/file_util.h",
         "files/file_util_posix.cc",
@@ -1294,8 +1159,6 @@
         "process/launch.h",
         "process/launch_posix.cc",
         "rand_util_posix.cc",
-        "task_scheduler/task_tracker_posix.cc",
-        "task_scheduler/task_tracker_posix.h",
       ]
     }
   } else {
@@ -1307,37 +1170,16 @@
       "rand_util_nacl.cc",
       "synchronization/read_write_lock_nacl.cc",
     ]
-
-    if (use_partition_alloc) {
-      # Add stuff that doesn't work in NaCl.
-      sources += [
-        # PartitionAlloc uses SpinLock, which doesn't work in NaCl (see below).
-        "allocator/partition_allocator/address_space_randomization.cc",
-        "allocator/partition_allocator/address_space_randomization.h",
-        "allocator/partition_allocator/oom.h",
-        "allocator/partition_allocator/page_allocator.cc",
-        "allocator/partition_allocator/page_allocator.h",
-        "allocator/partition_allocator/partition_alloc.cc",
-        "allocator/partition_allocator/partition_alloc.h",
-        "allocator/partition_allocator/spin_lock.cc",
-        "allocator/partition_allocator/spin_lock.h",
-      ]
-    }
   }
 
   # Windows.
   if (is_win) {
     sources += [
-      "power_monitor/power_monitor_device_source_win.cc",
       "profiler/win32_stack_frame_unwinder.cc",
       "profiler/win32_stack_frame_unwinder.h",
     ]
 
     sources -= [
-      "file_descriptor_store.cc",
-      "file_descriptor_store.h",
-      "memory/shared_memory_helper.cc",
-      "memory/shared_memory_helper.h",
       "message_loop/message_pump_libevent.cc",
       "strings/string16.cc",
     ]
@@ -1345,7 +1187,6 @@
     deps += [
       "//base/trace_event/etw_manifest:chrome_events_win",
       "//base/win:base_win_features",
-      "//base/win:eventlog_messages",
     ]
 
     if (is_component_build) {
@@ -1360,60 +1201,63 @@
 
       # These runtime files are copied to the output directory by the
       # vs_toolchain script that runs as part of toolchain configuration.
-      data += [
-        "$root_out_dir/msvcp140${vcrt_suffix}.dll",
-        "$root_out_dir/vccorlib140${vcrt_suffix}.dll",
-        "$root_out_dir/vcruntime140${vcrt_suffix}.dll",
+      if (visual_studio_version == "2015") {
+        data += [
+          "$root_out_dir/msvcp140${vcrt_suffix}.dll",
+          "$root_out_dir/vccorlib140${vcrt_suffix}.dll",
+          "$root_out_dir/vcruntime140${vcrt_suffix}.dll",
 
-        # Universal Windows 10 CRT files
-        "$root_out_dir/api-ms-win-core-console-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-core-datetime-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-core-debug-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-core-errorhandling-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-core-file-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-core-file-l1-2-0.dll",
-        "$root_out_dir/api-ms-win-core-file-l2-1-0.dll",
-        "$root_out_dir/api-ms-win-core-handle-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-core-heap-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-core-interlocked-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-core-libraryloader-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-core-localization-l1-2-0.dll",
-        "$root_out_dir/api-ms-win-core-memory-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-core-namedpipe-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-core-processenvironment-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-core-processthreads-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-core-processthreads-l1-1-1.dll",
-        "$root_out_dir/api-ms-win-core-profile-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-core-rtlsupport-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-core-string-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-core-synch-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-core-synch-l1-2-0.dll",
-        "$root_out_dir/api-ms-win-core-sysinfo-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-core-timezone-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-core-util-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-crt-conio-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-crt-convert-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-crt-environment-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-crt-filesystem-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-crt-heap-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-crt-locale-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-crt-math-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-crt-multibyte-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-crt-private-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-crt-process-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-crt-runtime-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-crt-stdio-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-crt-string-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-crt-time-l1-1-0.dll",
-        "$root_out_dir/api-ms-win-crt-utility-l1-1-0.dll",
-        "$root_out_dir/ucrtbase${vcrt_suffix}.dll",
-      ]
+          # Universal Windows 10 CRT files
+          "$root_out_dir/api-ms-win-core-console-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-datetime-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-debug-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-errorhandling-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-file-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-file-l1-2-0.dll",
+          "$root_out_dir/api-ms-win-core-file-l2-1-0.dll",
+          "$root_out_dir/api-ms-win-core-handle-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-heap-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-interlocked-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-libraryloader-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-localization-l1-2-0.dll",
+          "$root_out_dir/api-ms-win-core-memory-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-namedpipe-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-processenvironment-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-processthreads-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-processthreads-l1-1-1.dll",
+          "$root_out_dir/api-ms-win-core-profile-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-rtlsupport-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-string-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-synch-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-synch-l1-2-0.dll",
+          "$root_out_dir/api-ms-win-core-sysinfo-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-timezone-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-util-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-conio-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-convert-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-environment-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-filesystem-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-heap-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-locale-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-math-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-multibyte-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-private-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-process-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-runtime-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-stdio-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-string-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-time-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-utility-l1-1-0.dll",
+          "$root_out_dir/ucrtbase${vcrt_suffix}.dll",
+        ]
+      } else {
+        data += [
+          "$root_out_dir/msvcp120${vcrt_suffix}.dll",
+          "$root_out_dir/msvcr120${vcrt_suffix}.dll",
+        ]
+      }
       if (is_asan) {
-        if (current_cpu == "x64") {
-          data += [ "$clang_base_path/lib/clang/$clang_version/lib/windows/clang_rt.asan_dynamic-x86_64.dll" ]
-        } else {
-          data += [ "$clang_base_path/lib/clang/$clang_version/lib/windows/clang_rt.asan_dynamic-i386.dll" ]
-        }
+        data += [ "//third_party/llvm-build/Release+Asserts/lib/clang/$clang_version/lib/windows/clang_rt.asan_dynamic-i386.dll" ]
       }
     }
 
@@ -1427,7 +1271,7 @@
       "userenv.lib",
       "winmm.lib",
     ]
-    all_dependent_configs += [ ":base_win_linker_flags" ]
+    all_dependent_configs = [ ":base_win_linker_flags" ]
   } else if (!is_nacl || is_nacl_nonsfi) {
     # Non-Windows.
     deps += [ "//base/third_party/libevent" ]
@@ -1436,10 +1280,9 @@
   # Desktop Mac.
   if (is_mac) {
     sources += [
-      "mac/scoped_typeref.h",
-      "power_monitor/power_monitor_device_source_mac.mm",
+      "trace_event/malloc_dump_provider.cc",
+      "trace_event/malloc_dump_provider.h",
     ]
-
     libs = [
       "ApplicationServices.framework",
       "AppKit.framework",
@@ -1470,6 +1313,11 @@
 
   # Linux.
   if (is_linux) {
+    sources += [
+      "trace_event/malloc_dump_provider.cc",
+      "trace_event/malloc_dump_provider.h",
+    ]
+
     if (is_asan || is_lsan || is_msan || is_tsan) {
       # For llvm-sanitizer.
       data += [ "//third_party/llvm-build/Release+Asserts/lib/libstdc++.so.6" ]
@@ -1484,7 +1332,7 @@
     defines += [ "USE_SYMBOLIZE" ]
 
     configs += linux_configs
-    all_dependent_configs += linux_configs
+    all_dependent_configs = linux_configs
 
     # These dependencies are not required on Android, and in the case
     # of xdg_mime must be excluded due to licensing restrictions.
@@ -1558,8 +1406,6 @@
       "mac/mach_logging.h",
       "mac/objc_property_releaser.h",
       "mac/objc_property_releaser.mm",
-      "mac/objc_release_properties.h",
-      "mac/objc_release_properties.mm",
       "mac/scoped_block.h",
       "mac/scoped_mach_port.cc",
       "mac/scoped_mach_port.h",
@@ -1575,7 +1421,6 @@
       "memory/shared_memory_posix.cc",
       "message_loop/message_pump_mac.h",
       "message_loop/message_pump_mac.mm",
-      "power_monitor/power_monitor_device_source_ios.mm",
       "process/memory_stubs.cc",
       "strings/sys_string_conversions_mac.mm",
       "threading/platform_thread_mac.mm",
@@ -1585,17 +1430,6 @@
     set_sources_assignment_filter(sources_assignment_filter)
   }
 
-  if (is_posix && !is_mac && !is_ios && !is_android && !is_chromeos) {
-    sources += [ "power_monitor/power_monitor_device_source_posix.cc" ]
-  }
-
-  if (is_posix && !is_mac && !is_nacl) {
-    sources += [
-      "memory/shared_memory_tracker.cc",
-      "memory/shared_memory_tracker.h",
-    ]
-  }
-
   if (!use_glib) {
     sources -= [
       "message_loop/message_pump_glib.cc",
@@ -1603,7 +1437,7 @@
     ]
   }
 
-  if (using_sanitizer) {
+  if (is_asan || is_lsan || is_msan || is_tsan) {
     data += [ "//tools/valgrind/asan/" ]
     if (is_win) {
       data +=
@@ -1625,10 +1459,7 @@
 buildflag_header("debugging_flags") {
   header = "debugging_flags.h"
   header_dir = "base/debug"
-  flags = [
-    "ENABLE_PROFILING=$enable_profiling",
-    "ENABLE_MEMORY_TASK_PROFILER=$enable_memory_task_profiler",
-  ]
+  flags = [ "ENABLE_PROFILING=$enable_profiling" ]
 }
 
 # This is the subset of files from base that should not be used with a dynamic
@@ -1638,20 +1469,10 @@
   sources = [
     "base_switches.cc",
     "base_switches.h",
-    "task_scheduler/switches.cc",
-    "task_scheduler/switches.h",
     "win/pe_image.cc",
     "win/pe_image.h",
   ]
 
-  if (is_win) {
-    # Disable sanitizer coverage in win/pe_image.cc. It is called by the sandbox
-    # before sanitizer coverage can initialize. http://crbug.com/484711
-    configs -= [ "//build/config/sanitizers:default_sanitizer_flags" ]
-    configs +=
-        [ "//build/config/sanitizers:default_sanitizer_flags_but_coverage" ]
-  }
-
   if (!is_debug) {
     configs -= [ "//build/config/compiler:default_optimization" ]
     configs += [ "//build/config/compiler:optimize_max" ]
@@ -1672,14 +1493,12 @@
     "i18n/case_conversion.h",
     "i18n/char_iterator.cc",
     "i18n/char_iterator.h",
-    "i18n/character_encoding.cc",
-    "i18n/character_encoding.h",
-    "i18n/encoding_detection.cc",
-    "i18n/encoding_detection.h",
     "i18n/file_util_icu.cc",
     "i18n/file_util_icu.h",
     "i18n/i18n_constants.cc",
     "i18n/i18n_constants.h",
+    "i18n/icu_encoding_detection.cc",
+    "i18n/icu_encoding_detection.h",
     "i18n/icu_string_conversions.cc",
     "i18n/icu_string_conversions.h",
     "i18n/icu_util.cc",
@@ -1706,7 +1525,6 @@
   defines = [ "BASE_I18N_IMPLEMENTATION" ]
   configs += [ "//build/config/compiler:wexit_time_destructors" ]
   public_deps = [
-    "//third_party/ced",
     "//third_party/icu",
   ]
   deps = [
@@ -1732,7 +1550,6 @@
     "message_loop/message_pump_perftest.cc",
 
     # "test/run_all_unittests.cc",
-    "json/json_perftest.cc",
     "threading/thread_perftest.cc",
   ]
   deps = [
@@ -1893,7 +1710,6 @@
 
 test("base_unittests") {
   sources = [
-    "allocator/malloc_zone_functions_mac_unittest.cc",
     "allocator/tcmalloc_unittest.cc",
     "android/application_status_listener_unittest.cc",
     "android/content_uri_utils_unittest.cc",
@@ -1904,14 +1720,12 @@
     "android/path_utils_unittest.cc",
     "android/scoped_java_ref_unittest.cc",
     "android/sys_utils_unittest.cc",
-    "android/unguessable_token_android_unittest.cc",
     "at_exit_unittest.cc",
     "atomicops_unittest.cc",
     "barrier_closure_unittest.cc",
     "base64_unittest.cc",
     "base64url_unittest.cc",
     "big_endian_unittest.cc",
-    "bind_helpers_unittest.cc",
     "bind_unittest.cc",
     "bit_cast_unittest.cc",
     "bits_unittest.cc",
@@ -1922,28 +1736,24 @@
     "cancelable_callback_unittest.cc",
     "command_line_unittest.cc",
     "containers/adapters_unittest.cc",
-    "containers/flat_set_unittest.cc",
     "containers/hash_tables_unittest.cc",
     "containers/linked_list_unittest.cc",
     "containers/mru_cache_unittest.cc",
+    "containers/scoped_ptr_hash_map_unittest.cc",
     "containers/small_map_unittest.cc",
     "containers/stack_container_unittest.cc",
     "cpu_unittest.cc",
-    "debug/activity_analyzer_unittest.cc",
-    "debug/activity_tracker_unittest.cc",
     "debug/crash_logging_unittest.cc",
     "debug/debugger_unittest.cc",
     "debug/leak_tracker_unittest.cc",
     "debug/proc_maps_linux_unittest.cc",
     "debug/stack_trace_unittest.cc",
     "debug/task_annotator_unittest.cc",
-    "debug/thread_heap_usage_tracker_unittest.cc",
     "deferred_sequenced_task_runner_unittest.cc",
     "environment_unittest.cc",
     "feature_list_unittest.cc",
     "file_version_info_win_unittest.cc",
     "files/dir_reader_posix_unittest.cc",
-    "files/file_descriptor_watcher_posix_unittest.cc",
     "files/file_locking_unittest.cc",
     "files/file_path_unittest.cc",
     "files/file_path_watcher_unittest.cc",
@@ -1953,7 +1763,6 @@
     "files/file_util_unittest.cc",
     "files/important_file_writer_unittest.cc",
     "files/memory_mapped_file_unittest.cc",
-    "files/scoped_platform_handle_unittest.cc",
     "files/scoped_temp_dir_unittest.cc",
     "gmock_unittest.cc",
     "guid_unittest.cc",
@@ -1961,7 +1770,6 @@
     "i18n/break_iterator_unittest.cc",
     "i18n/case_conversion_unittest.cc",
     "i18n/char_iterator_unittest.cc",
-    "i18n/character_encoding_unittest.cc",
     "i18n/file_util_icu_unittest.cc",
     "i18n/icu_string_conversions_unittest.cc",
     "i18n/message_formatter_unittest.cc",
@@ -1989,7 +1797,6 @@
     "mac/mac_util_unittest.mm",
     "mac/mach_port_broker_unittest.cc",
     "mac/objc_property_releaser_unittest.mm",
-    "mac/objc_release_properties_unittest.mm",
     "mac/scoped_nsobject_unittest.mm",
     "mac/scoped_objc_class_swizzler_unittest.mm",
     "mac/scoped_sending_event_unittest.mm",
@@ -1997,11 +1804,9 @@
     "memory/aligned_memory_unittest.cc",
     "memory/discardable_shared_memory_unittest.cc",
     "memory/linked_ptr_unittest.cc",
-    "memory/memory_coordinator_client_registry_unittest.cc",
     "memory/memory_pressure_listener_unittest.cc",
     "memory/memory_pressure_monitor_chromeos_unittest.cc",
     "memory/memory_pressure_monitor_mac_unittest.cc",
-    "memory/memory_pressure_monitor_unittest.cc",
     "memory/memory_pressure_monitor_win_unittest.cc",
     "memory/ptr_util_unittest.cc",
     "memory/ref_counted_memory_unittest.cc",
@@ -2016,13 +1821,10 @@
     "message_loop/message_loop_unittest.cc",
     "message_loop/message_pump_glib_unittest.cc",
     "message_loop/message_pump_io_ios_unittest.cc",
-    "message_loop/message_pump_mac_unittest.cc",
     "metrics/bucket_ranges_unittest.cc",
-    "metrics/field_trial_params_unittest.cc",
     "metrics/field_trial_unittest.cc",
     "metrics/histogram_base_unittest.cc",
     "metrics/histogram_delta_serialization_unittest.cc",
-    "metrics/histogram_functions_unittest.cc",
     "metrics/histogram_macros_unittest.cc",
     "metrics/histogram_snapshot_manager_unittest.cc",
     "metrics/histogram_unittest.cc",
@@ -2036,12 +1838,10 @@
     "metrics/statistics_recorder_unittest.cc",
     "native_library_unittest.cc",
     "numerics/safe_numerics_unittest.cc",
-    "numerics/saturated_arithmetic_unittest.cc",
     "observer_list_unittest.cc",
     "optional_unittest.cc",
     "os_compat_android_unittest.cc",
     "path_service_unittest.cc",
-    "pending_task_unittest.cc",
     "pickle_unittest.cc",
     "posix/file_descriptor_shuffle_unittest.cc",
     "posix/unix_domain_socket_linux_unittest.cc",
@@ -2062,8 +1862,6 @@
     "scoped_native_library_unittest.cc",
     "security_unittest.cc",
     "sequence_checker_unittest.cc",
-    "sequence_token_unittest.cc",
-    "sequenced_task_runner_unittest.cc",
     "sha1_unittest.cc",
     "stl_util_unittest.cc",
     "strings/nullable_string16_unittest.cc",
@@ -2083,7 +1881,7 @@
     "strings/utf_string_conversions_unittest.cc",
     "supports_user_data_unittest.cc",
     "sync_socket_unittest.cc",
-    "synchronization/atomic_flag_unittest.cc",
+    "synchronization/cancellation_flag_unittest.cc",
     "synchronization/condition_variable_unittest.cc",
     "synchronization/lock_unittest.cc",
     "synchronization/read_write_lock_unittest.cc",
@@ -2097,33 +1895,27 @@
     "task_scheduler/delayed_task_manager_unittest.cc",
     "task_scheduler/priority_queue_unittest.cc",
     "task_scheduler/scheduler_lock_unittest.cc",
-    "task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc",
+    "task_scheduler/scheduler_service_thread_unittest.cc",
     "task_scheduler/scheduler_worker_pool_impl_unittest.cc",
     "task_scheduler/scheduler_worker_stack_unittest.cc",
     "task_scheduler/scheduler_worker_unittest.cc",
-    "task_scheduler/scoped_set_task_priority_for_current_thread_unittest.cc",
     "task_scheduler/sequence_sort_key_unittest.cc",
     "task_scheduler/sequence_unittest.cc",
     "task_scheduler/task_scheduler_impl_unittest.cc",
-    "task_scheduler/task_tracker_posix_unittest.cc",
     "task_scheduler/task_tracker_unittest.cc",
-    "task_scheduler/task_traits_unittest.cc",
-    "task_scheduler/task_unittest.cc",
     "task_scheduler/test_task_factory.cc",
     "task_scheduler/test_task_factory.h",
     "task_scheduler/test_utils.h",
     "template_util_unittest.cc",
     "test/histogram_tester_unittest.cc",
-    "test/mock_callback_unittest.cc",
-    "test/scoped_mock_time_message_loop_task_runner_unittest.cc",
-    "test/scoped_task_scheduler_unittest.cc",
+    "test/icu_test_util.cc",
+    "test/icu_test_util.h",
     "test/test_pending_task_unittest.cc",
     "test/test_reg_util_win_unittest.cc",
     "test/trace_event_analyzer_unittest.cc",
     "test/user_action_tester_unittest.cc",
     "threading/non_thread_safe_unittest.cc",
     "threading/platform_thread_unittest.cc",
-    "threading/post_task_and_reply_impl_unittest.cc",
     "threading/sequenced_task_runner_handle_unittest.cc",
     "threading/sequenced_worker_pool_unittest.cc",
     "threading/simple_thread_unittest.cc",
@@ -2132,7 +1924,6 @@
     "threading/thread_id_name_manager_unittest.cc",
     "threading/thread_local_storage_unittest.cc",
     "threading/thread_local_unittest.cc",
-    "threading/thread_task_runner_handle_unittest.cc",
     "threading/thread_unittest.cc",
     "threading/watchdog_unittest.cc",
     "threading/worker_pool_posix_unittest.cc",
@@ -2145,7 +1936,6 @@
     "timer/timer_unittest.cc",
     "tools_sanity_unittest.cc",
     "trace_event/blame_context_unittest.cc",
-    "trace_event/event_name_filter_unittest.cc",
     "trace_event/heap_profiler_allocation_context_tracker_unittest.cc",
     "trace_event/heap_profiler_allocation_register_unittest.cc",
     "trace_event/heap_profiler_heap_dump_writer_unittest.cc",
@@ -2154,19 +1944,15 @@
     "trace_event/java_heap_dump_provider_android_unittest.cc",
     "trace_event/memory_allocator_dump_unittest.cc",
     "trace_event/memory_dump_manager_unittest.cc",
-    "trace_event/memory_usage_estimator_unittest.cc",
     "trace_event/process_memory_dump_unittest.cc",
-    "trace_event/trace_category_unittest.cc",
     "trace_event/trace_config_unittest.cc",
     "trace_event/trace_event_argument_unittest.cc",
-    "trace_event/trace_event_filter_test_utils.cc",
-    "trace_event/trace_event_filter_test_utils.h",
     "trace_event/trace_event_synthetic_delay_unittest.cc",
     "trace_event/trace_event_system_stats_monitor_unittest.cc",
     "trace_event/trace_event_unittest.cc",
+    "trace_event/winheap_dump_provider_win_unittest.cc",
     "tracked_objects_unittest.cc",
     "tuple_unittest.cc",
-    "unguessable_token_unittest.cc",
     "values_unittest.cc",
     "version_unittest.cc",
     "vlog_unittest.cc",
@@ -2200,9 +1986,7 @@
     ":base",
     ":i18n",
     ":message_loop_tests",
-    "//base/allocator:features",
-    "//base/test:native_library_test_utils",
-    "//base/test:run_all_base_unittests",
+    "//base/test:run_all_unittests",
     "//base/test:test_support",
     "//base/third_party/dynamic_annotations",
     "//testing/gmock",
@@ -2210,10 +1994,6 @@
     "//third_party/icu",
   ]
 
-  data_deps = [
-    "//base/test:test_shared_library",
-  ]
-
   if (is_ios || is_mac) {
     deps += [ ":base_unittests_arc" ]
   }
@@ -2236,15 +2016,10 @@
   }
 
   if (is_android) {
-    sources -= [
-      "process/process_unittest.cc",
-      "process/process_util_unittest.cc",
-    ]
     deps += [
       ":base_java",
       ":base_java_unittest_support",
       "//base/android/jni_generator:jni_generator_tests",
-      "//base/test:test_support_java",
     ]
   }
 
@@ -2267,7 +2042,6 @@
       "mac/bind_objc_block_unittest.mm",
       "mac/foundation_util_unittest.mm",
       "mac/objc_property_releaser_unittest.mm",
-      "mac/objc_release_properties_unittest.mm",
       "mac/scoped_nsobject_unittest.mm",
       "strings/sys_string_conversions_mac_unittest.mm",
     ]
@@ -2276,10 +2050,6 @@
     # TODO(GYP): dep on copy_test_data_ios action.
   }
 
-  if (use_partition_alloc) {
-    sources += [ "allocator/partition_allocator/partition_alloc_unittest.cc" ]
-  }
-
   if (is_mac) {
     libs = [
       "CoreFoundation.framework",
@@ -2294,6 +2064,10 @@
 
     deps += [ "//base/test:malloc_wrapper" ]
 
+    if (use_glib) {
+      configs += [ "//build/config/linux:glib" ]
+    }
+
     if (!is_component_build) {
       # Set rpath to find libmalloc_wrapper.so even in a non-component build.
       configs += [ "//build/config/gcc:rpath_for_built_shared_libraries" ]
@@ -2382,7 +2156,6 @@
       "callback_list_unittest.nc",
       "callback_unittest.nc",
       "memory/weak_ptr_unittest.nc",
-      "metrics/histogram_unittest.nc",
     ]
 
     deps = [
@@ -2394,6 +2167,7 @@
 }
 
 if (is_android) {
+  # GYP: //base.gyp:base_jni_headers
   generate_jni("base_jni_headers") {
     sources = [
       "android/java/src/org/chromium/base/AnimationFrameTimeHistogram.java",
@@ -2405,12 +2179,10 @@
       "android/java/src/org/chromium/base/ContentUriUtils.java",
       "android/java/src/org/chromium/base/ContextUtils.java",
       "android/java/src/org/chromium/base/CpuFeatures.java",
-      "android/java/src/org/chromium/base/EarlyTraceEvent.java",
       "android/java/src/org/chromium/base/EventLog.java",
       "android/java/src/org/chromium/base/FieldTrialList.java",
       "android/java/src/org/chromium/base/ImportantFileWriterAndroid.java",
       "android/java/src/org/chromium/base/JNIUtils.java",
-      "android/java/src/org/chromium/base/JavaExceptionReporter.java",
       "android/java/src/org/chromium/base/JavaHandlerThread.java",
       "android/java/src/org/chromium/base/LocaleUtils.java",
       "android/java/src/org/chromium/base/MemoryPressureListener.java",
@@ -2420,14 +2192,10 @@
       "android/java/src/org/chromium/base/SysUtils.java",
       "android/java/src/org/chromium/base/SystemMessageHandler.java",
       "android/java/src/org/chromium/base/ThreadUtils.java",
-      "android/java/src/org/chromium/base/ThrowUncaughtException.java",
-      "android/java/src/org/chromium/base/TimeUtils.java",
       "android/java/src/org/chromium/base/TraceEvent.java",
-      "android/java/src/org/chromium/base/UnguessableToken.java",
       "android/java/src/org/chromium/base/library_loader/LibraryLoader.java",
       "android/java/src/org/chromium/base/metrics/RecordHistogram.java",
       "android/java/src/org/chromium/base/metrics/RecordUserAction.java",
-      "android/java/src/org/chromium/base/metrics/StatisticsRecorderAndroid.java",
     ]
 
     public_deps = [
@@ -2437,11 +2205,13 @@
     jni_package = "base"
   }
 
+  # GYP: //base.gyp:android_runtime_jni_headers
   generate_jar_jni("android_runtime_jni_headers") {
     jni_package = "base"
     classes = [ "java/lang/Runtime.class" ]
   }
 
+  # GYP: //base.gyp:base_java
   android_library("base_java") {
     srcjar_deps = [
       ":base_android_java_enums_srcjar",
@@ -2450,7 +2220,6 @@
     ]
 
     deps = [
-      "//third_party/android_tools:android_support_annotations_java",
       "//third_party/android_tools:android_support_multidex_java",
       "//third_party/jsr-305:jsr_305_javalib",
     ]
@@ -2471,24 +2240,22 @@
       "android/java/src/org/chromium/base/ContentUriUtils.java",
       "android/java/src/org/chromium/base/ContextUtils.java",
       "android/java/src/org/chromium/base/CpuFeatures.java",
-      "android/java/src/org/chromium/base/EarlyTraceEvent.java",
       "android/java/src/org/chromium/base/EventLog.java",
       "android/java/src/org/chromium/base/FieldTrialList.java",
       "android/java/src/org/chromium/base/FileUtils.java",
       "android/java/src/org/chromium/base/ImportantFileWriterAndroid.java",
       "android/java/src/org/chromium/base/JNIUtils.java",
-      "android/java/src/org/chromium/base/JavaExceptionReporter.java",
       "android/java/src/org/chromium/base/JavaHandlerThread.java",
       "android/java/src/org/chromium/base/LocaleUtils.java",
       "android/java/src/org/chromium/base/Log.java",
       "android/java/src/org/chromium/base/MemoryPressureListener.java",
-      "android/java/src/org/chromium/base/NonThreadSafe.java",
       "android/java/src/org/chromium/base/ObserverList.java",
       "android/java/src/org/chromium/base/PackageUtils.java",
       "android/java/src/org/chromium/base/PathService.java",
       "android/java/src/org/chromium/base/PathUtils.java",
       "android/java/src/org/chromium/base/PerfTraceEvent.java",
       "android/java/src/org/chromium/base/PowerMonitor.java",
+      "android/java/src/org/chromium/base/PowerStatusReceiver.java",
       "android/java/src/org/chromium/base/Promise.java",
       "android/java/src/org/chromium/base/ResourceExtractor.java",
       "android/java/src/org/chromium/base/SecureRandomInitializer.java",
@@ -2496,10 +2263,7 @@
       "android/java/src/org/chromium/base/SysUtils.java",
       "android/java/src/org/chromium/base/SystemMessageHandler.java",
       "android/java/src/org/chromium/base/ThreadUtils.java",
-      "android/java/src/org/chromium/base/ThrowUncaughtException.java",
-      "android/java/src/org/chromium/base/TimeUtils.java",
       "android/java/src/org/chromium/base/TraceEvent.java",
-      "android/java/src/org/chromium/base/UnguessableToken.java",
       "android/java/src/org/chromium/base/VisibleForTesting.java",
       "android/java/src/org/chromium/base/annotations/AccessedByNative.java",
       "android/java/src/org/chromium/base/annotations/CalledByNative.java",
@@ -2519,10 +2283,8 @@
       "android/java/src/org/chromium/base/library_loader/ModernLinker.java",
       "android/java/src/org/chromium/base/library_loader/NativeLibraryPreloader.java",
       "android/java/src/org/chromium/base/library_loader/ProcessInitException.java",
-      "android/java/src/org/chromium/base/metrics/CachedMetrics.java",
       "android/java/src/org/chromium/base/metrics/RecordHistogram.java",
       "android/java/src/org/chromium/base/metrics/RecordUserAction.java",
-      "android/java/src/org/chromium/base/metrics/StatisticsRecorderAndroid.java",
       "android/java/src/org/chromium/base/multidex/ChromiumMultiDexInstaller.java",
     ]
 
@@ -2535,46 +2297,34 @@
     ]
   }
 
+  # GYP: //base.gyp:base_javatests
   android_library("base_javatests") {
-    testonly = true
     deps = [
       ":base_java",
       ":base_java_test_support",
-      "//third_party/android_support_test_runner:runner_java",
-      "//third_party/junit:junit",
     ]
     java_files = [
       "android/javatests/src/org/chromium/base/AdvancedMockContextTest.java",
       "android/javatests/src/org/chromium/base/ApiCompatibilityUtilsTest.java",
       "android/javatests/src/org/chromium/base/CommandLineInitUtilTest.java",
       "android/javatests/src/org/chromium/base/CommandLineTest.java",
-
-      # TODO(nona): move to Junit once that is built for Android N.
-      "android/javatests/src/org/chromium/base/LocaleUtilsTest.java",
       "android/javatests/src/org/chromium/base/ObserverListTest.java",
       "android/javatests/src/org/chromium/base/metrics/RecordHistogramTest.java",
     ]
   }
 
+  # GYP: //base.gyp:base_java_test_support
   android_library("base_java_test_support") {
-    testonly = true
     deps = [
       ":base_java",
       "//testing/android/reporter:reporter_java",
-      "//third_party/android_support_test_runner:exposed_instrumentation_api_publish_java",
-      "//third_party/android_support_test_runner:runner_java",
-      "//third_party/hamcrest:hamcrest_core_java",
-      "//third_party/junit",
     ]
     java_files = [
       "test/android/javatests/src/org/chromium/base/test/BaseActivityInstrumentationTestCase.java",
       "test/android/javatests/src/org/chromium/base/test/BaseChromiumInstrumentationTestRunner.java",
       "test/android/javatests/src/org/chromium/base/test/BaseInstrumentationTestRunner.java",
-      "test/android/javatests/src/org/chromium/base/test/BaseJUnit4ClassRunner.java",
-      "test/android/javatests/src/org/chromium/base/test/BaseChromiumAndroidJUnitRunner.java",
       "test/android/javatests/src/org/chromium/base/test/BaseTestResult.java",
       "test/android/javatests/src/org/chromium/base/test/util/AdvancedMockContext.java",
-      "test/android/javatests/src/org/chromium/base/test/util/CallbackHelper.java",
       "test/android/javatests/src/org/chromium/base/test/util/CommandLineFlags.java",
       "test/android/javatests/src/org/chromium/base/test/util/DisableIf.java",
       "test/android/javatests/src/org/chromium/base/test/util/DisableIfSkipCheck.java",
@@ -2586,7 +2336,6 @@
       "test/android/javatests/src/org/chromium/base/test/util/InstrumentationUtils.java",
       "test/android/javatests/src/org/chromium/base/test/util/IntegrationTest.java",
       "test/android/javatests/src/org/chromium/base/test/util/Manual.java",
-      "test/android/javatests/src/org/chromium/base/test/util/Matchers.java",
       "test/android/javatests/src/org/chromium/base/test/util/MetricsUtils.java",
       "test/android/javatests/src/org/chromium/base/test/util/MinAndroidSdkLevel.java",
       "test/android/javatests/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheck.java",
@@ -2608,11 +2357,25 @@
     ]
   }
 
+  # TODO(jbudorick): Remove this once we roll to robolectric 3.0 and pull
+  # in the multidex shadow library. crbug.com/522043
+  # GYP: //base.gyp:base_junit_test_support
+  java_library("base_junit_test_support") {
+    testonly = true
+    java_files = [ "test/android/junit/src/org/chromium/base/test/shadows/ShadowMultiDex.java" ]
+    deps = [
+      "//third_party/android_tools:android_support_multidex_java",
+      "//third_party/robolectric:android-all-4.3_r2-robolectric-0",
+      "//third_party/robolectric:robolectric_java",
+    ]
+    srcjar_deps = [ ":base_build_config_gen" ]
+  }
+
+  # GYP: //base.gyp:base_junit_tests
   junit_binary("base_junit_tests") {
     java_files = [
       "android/junit/src/org/chromium/base/BaseChromiumApplicationTest.java",
       "android/junit/src/org/chromium/base/LogTest.java",
-      "android/junit/src/org/chromium/base/NonThreadSafeTest.java",
       "android/junit/src/org/chromium/base/PromiseTest.java",
       "test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java",
       "test/android/junit/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheckTest.java",
@@ -2622,10 +2385,14 @@
     deps = [
       ":base_java",
       ":base_java_test_support",
+      ":base_junit_test_support",
     ]
-    srcjar_deps = [ ":base_build_config_gen" ]
   }
 
+  # GYP: //base.gyp:base_java_application_state
+  # GYP: //base.gyp:base_java_library_load_from_apk_status_codes
+  # GYP: //base.gyp:base_java_library_process_type
+  # GYP: //base.gyp:base_java_memory_pressure_level
   java_cpp_enum("base_android_java_enums_srcjar") {
     sources = [
       "android/application_status_listener.h",
@@ -2635,6 +2402,7 @@
     ]
   }
 
+  # GYP: //base/base.gyp:base_build_config_gen
   java_cpp_template("base_build_config_gen") {
     sources = [
       "android/java/templates/BuildConfig.template",
@@ -2642,11 +2410,12 @@
     package_name = "org/chromium/base"
 
     defines = []
-    if (is_java_debug || dcheck_always_on) {
-      defines += [ "_DCHECK_IS_ON" ]
+    if (!is_java_debug) {
+      defines += [ "NDEBUG" ]
     }
   }
 
+  # GYP: //base/base.gyp:base_native_libraries_gen
   java_cpp_template("base_native_libraries_gen") {
     sources = [
       "android/java/templates/NativeLibraries.template",
@@ -2654,23 +2423,12 @@
     package_name = "org/chromium/base/library_loader"
   }
 
+  # GYP: //base.gyp:base_java_unittest_support
   android_library("base_java_unittest_support") {
-    testonly = true
     deps = [
       ":base_java",
     ]
-    java_files = [
-      "test/android/java/src/org/chromium/base/ContentUriTestUtils.java",
-      "test/android/java/src/org/chromium/base/TestSystemMessageHandler.java",
-    ]
+    java_files =
+        [ "test/android/java/src/org/chromium/base/ContentUriTestUtils.java" ]
   }
 }
-
-fuzzer_test("base_json_correctness_fuzzer") {
-  sources = [
-    "json/correctness_fuzzer.cc",
-  ]
-  deps = [
-    ":base",
-  ]
-}
diff --git a/base/DEPS b/base/DEPS
index 4b25f3f..c0e95a0 100644
--- a/base/DEPS
+++ b/base/DEPS
@@ -2,7 +2,6 @@
   "+jni",
   "+third_party/ashmem",
   "+third_party/apple_apsl",
-  "+third_party/ced",
   "+third_party/lss",
   "+third_party/modp_b64",
   "+third_party/tcmalloc",
diff --git a/base/allocator/BUILD.gn b/base/allocator/BUILD.gn
index 8cdb061..490b8e8 100644
--- a/base/allocator/BUILD.gn
+++ b/base/allocator/BUILD.gn
@@ -13,10 +13,8 @@
   enable_debugallocation = is_debug
 }
 
-# The Windows-only allocator shim is only enabled for Release static builds, and
-# is mutually exclusive with the generalized shim.
-win_use_allocator_shim = is_win && !is_component_build && !is_debug &&
-                         !use_experimental_allocator_shim && !is_asan
+# Allocator shim is only enabled for Release static builds.
+win_use_allocator_shim = is_win && !is_component_build && !is_debug
 
 # This "allocator" meta-target will forward to the default allocator according
 # to the build settings.
@@ -95,8 +93,6 @@
     sources = [
       "allocator_shim_win.cc",
       "allocator_shim_win.h",
-      "winheap_stubs_win.cc",
-      "winheap_stubs_win.h",
     ]
     configs += [ ":allocator_shim_define" ]
   }
@@ -222,14 +218,6 @@
       ":tcmalloc_flags",
     ]
 
-    # Thumb mode disabled due to bug in clang integrated assembler
-    # TODO(https://llvm.org/bugs/show_bug.cgi?id=31058)
-    configs -= [ "//build/config/compiler:compiler_arm_thumb" ]
-    configs += [ "//build/config/compiler:compiler_arm" ]
-
-    # TODO(crbug.com/633719) Make tcmalloc work with AFDO if possible.
-    configs -= [ "//build/config/compiler:afdo" ]
-
     deps = []
 
     if (enable_profiling) {
@@ -288,22 +276,52 @@
 
 buildflag_header("features") {
   header = "features.h"
-  flags = [
-    "USE_EXPERIMENTAL_ALLOCATOR_SHIM=$use_experimental_allocator_shim",
-    "ENABLE_WIN_ALLOCATOR_SHIM_TESTS=($use_experimental_allocator_shim || $win_use_allocator_shim)",
-  ]
+  flags = [ "USE_EXPERIMENTAL_ALLOCATOR_SHIM=$use_experimental_allocator_shim" ]
 }
 
-# Used to shim malloc symbols on Android. see //base/allocator/README.md.
-config("wrap_malloc_symbols") {
-  ldflags = [
-    "-Wl,-wrap,calloc",
-    "-Wl,-wrap,free",
-    "-Wl,-wrap,malloc",
-    "-Wl,-wrap,memalign",
-    "-Wl,-wrap,posix_memalign",
-    "-Wl,-wrap,pvalloc",
-    "-Wl,-wrap,realloc",
-    "-Wl,-wrap,valloc",
-  ]
+if (use_experimental_allocator_shim) {
+  # Used to shim malloc symbols on Android. see //base/allocator/README.md.
+  config("wrap_malloc_symbols") {
+    ldflags = [
+      "-Wl,-wrap,calloc",
+      "-Wl,-wrap,free",
+      "-Wl,-wrap,malloc",
+      "-Wl,-wrap,memalign",
+      "-Wl,-wrap,posix_memalign",
+      "-Wl,-wrap,pvalloc",
+      "-Wl,-wrap,realloc",
+      "-Wl,-wrap,valloc",
+    ]
+  }
+
+  source_set("unified_allocator_shim") {
+    # TODO(primiano): support other platforms, currently this works only on
+    # Linux/CrOS/Android. http://crbug.com/550886 .
+    configs += [ "//base:base_implementation" ]  # for BASE_EXPORT
+    visibility = [ "//base:base" ]
+    sources = [
+      "allocator_shim.cc",
+      "allocator_shim.h",
+      "allocator_shim_internals.h",
+      "allocator_shim_override_cpp_symbols.h",
+      "allocator_shim_override_libc_symbols.h",
+    ]
+    if (is_linux && use_allocator == "tcmalloc") {
+      sources += [
+        "allocator_shim_default_dispatch_to_tcmalloc.cc",
+        "allocator_shim_override_glibc_weak_symbols.h",
+      ]
+      deps = [
+        ":tcmalloc",
+      ]
+    } else if (is_linux && use_allocator == "none") {
+      sources += [ "allocator_shim_default_dispatch_to_glibc.cc" ]
+    } else if (is_android && use_allocator == "none") {
+      sources += [
+        "allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc",
+        "allocator_shim_override_linker_wrapped_symbols.h",
+      ]
+      all_dependent_configs = [ ":wrap_malloc_symbols" ]
+    }
+  }
 }
diff --git a/base/allocator/README.md b/base/allocator/README.md
index a211732..164df51 100644
--- a/base/allocator/README.md
+++ b/base/allocator/README.md
@@ -189,8 +189,8 @@
 - [Unified allocator shim doc - Feb 2016][url-allocator-shim]
 - [Allocator cleanup doc - Jan 2016][url-allocator-cleanup]
 - [Proposal to use PartitionAlloc as default allocator](https://crbug.com/339604)
-- [Memory-Infra: Tools to profile memory usage in Chrome](/docs/memory-infra/README.md)
+- [Memory-Infra: Tools to profile memory usage in Chrome](components/tracing/docs/memory_infra.md)
 
 [url-allocator-cleanup]: https://docs.google.com/document/d/1V77Kgp_4tfaaWPEZVxNevoD02wXiatnAv7Ssgr0hmjg/edit?usp=sharing
-[url-memory-infra-heap-profiler]: /docs/memory-infra/heap_profiler.md
+[url-memory-infra-heap-profiler]: components/tracing/docs/heap_profiler.md
 [url-allocator-shim]: https://docs.google.com/document/d/1yKlO1AO4XjpDad9rjcBOI15EKdAGsuGO_IeZy0g0kxo/edit?usp=sharing
diff --git a/base/allocator/allocator.gyp b/base/allocator/allocator.gyp
new file mode 100644
index 0000000..674d4d6
--- /dev/null
+++ b/base/allocator/allocator.gyp
@@ -0,0 +1,450 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'target_defaults': {
+    'variables': {
+      # This code gets run a lot and debugged rarely, so it should be fast
+      # by default. See http://crbug.com/388949.
+      'debug_optimize': '2',
+      'win_debug_Optimization': '0',
+      # Run time checks are incompatible with any level of optimizations.
+      'win_debug_RuntimeChecks': '0',
+    },
+  },
+  'variables': {
+    'tcmalloc_dir': '../../third_party/tcmalloc/chromium',
+    'use_vtable_verify%': 0,
+    # Provide a way to force disable debugallocation in Debug builds
+    # e.g. for profiling (it's more rare to profile Debug builds,
+    # but people sometimes need to do that).
+    'disable_debugallocation%': 0,
+  },
+  'targets': [
+    # The only targets that should depend on allocator are 'base' and
+    # executables that don't depend, directly or indirectly, on base (a few).
+    # All the other targets get a transitive dependency on this target via base.
+    {
+      'target_name': 'allocator',
+      'variables': {
+        'conditions': [
+          ['use_allocator!="none" or (OS=="win" and win_use_allocator_shim==1)', {
+            'allocator_target_type%': 'static_library',
+          }, {
+            'allocator_target_type%': 'none',
+          }],
+        ],
+      },
+      'type': '<(allocator_target_type)',
+      'toolsets': ['host', 'target'],
+      'conditions': [
+        ['OS=="win" and win_use_allocator_shim==1', {
+          'msvs_settings': {
+            # TODO(sgk):  merge this with build/common.gypi settings
+            'VCLibrarianTool': {
+              'AdditionalOptions': ['/ignore:4006,4221'],
+            },
+            'VCLinkerTool': {
+              'AdditionalOptions': ['/ignore:4006'],
+            },
+          },
+          'include_dirs': [
+            '../..',
+          ],
+          'sources': [
+            'allocator_shim_win.cc',
+            'allocator_shim_win.h',
+          ],
+          'configurations': {
+            'Debug_Base': {
+              'msvs_settings': {
+                'VCCLCompilerTool': {
+                  'RuntimeLibrary': '0',
+                },
+              },
+            },
+          },
+        }],  # OS=="win"
+        ['use_allocator=="tcmalloc"', {
+          # Disable the heap checker in tcmalloc.
+          'defines': [
+            'NO_HEAP_CHECK',
+          ],
+          'dependencies': [
+            '../third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
+          ],
+          # The order of this include_dirs matters, as tc-malloc has its own
+          # base/ mini-fork. Do not factor these out of this conditions section.
+          'include_dirs': [
+            '.',
+            '<(tcmalloc_dir)/src/base',
+            '<(tcmalloc_dir)/src',
+            '../..',
+          ],
+          'sources': [
+            # Generated for our configuration from tcmalloc's build
+            # and checked in.
+            '<(tcmalloc_dir)/src/config.h',
+            '<(tcmalloc_dir)/src/config_android.h',
+            '<(tcmalloc_dir)/src/config_linux.h',
+            '<(tcmalloc_dir)/src/config_win.h',
+
+            # all tcmalloc native and forked files
+            '<(tcmalloc_dir)/src/addressmap-inl.h',
+            '<(tcmalloc_dir)/src/base/abort.cc',
+            '<(tcmalloc_dir)/src/base/abort.h',
+            '<(tcmalloc_dir)/src/base/arm_instruction_set_select.h',
+            '<(tcmalloc_dir)/src/base/atomicops-internals-arm-generic.h',
+            '<(tcmalloc_dir)/src/base/atomicops-internals-arm-v6plus.h',
+            '<(tcmalloc_dir)/src/base/atomicops-internals-linuxppc.h',
+            '<(tcmalloc_dir)/src/base/atomicops-internals-macosx.h',
+            '<(tcmalloc_dir)/src/base/atomicops-internals-windows.h',
+            '<(tcmalloc_dir)/src/base/atomicops-internals-x86.cc',
+            '<(tcmalloc_dir)/src/base/atomicops-internals-x86.h',
+            '<(tcmalloc_dir)/src/base/atomicops.h',
+            '<(tcmalloc_dir)/src/base/commandlineflags.h',
+            '<(tcmalloc_dir)/src/base/cycleclock.h',
+            # We don't list dynamic_annotations.c since its copy is already
+            # present in the dynamic_annotations target.
+            '<(tcmalloc_dir)/src/base/dynamic_annotations.h',
+            '<(tcmalloc_dir)/src/base/elf_mem_image.cc',
+            '<(tcmalloc_dir)/src/base/elf_mem_image.h',
+            '<(tcmalloc_dir)/src/base/elfcore.h',
+            '<(tcmalloc_dir)/src/base/googleinit.h',
+            '<(tcmalloc_dir)/src/base/linux_syscall_support.h',
+            '<(tcmalloc_dir)/src/base/linuxthreads.cc',
+            '<(tcmalloc_dir)/src/base/linuxthreads.h',
+            '<(tcmalloc_dir)/src/base/logging.cc',
+            '<(tcmalloc_dir)/src/base/logging.h',
+            '<(tcmalloc_dir)/src/base/low_level_alloc.cc',
+            '<(tcmalloc_dir)/src/base/low_level_alloc.h',
+            '<(tcmalloc_dir)/src/base/simple_mutex.h',
+            '<(tcmalloc_dir)/src/base/spinlock.cc',
+            '<(tcmalloc_dir)/src/base/spinlock.h',
+            '<(tcmalloc_dir)/src/base/spinlock_internal.cc',
+            '<(tcmalloc_dir)/src/base/spinlock_internal.h',
+            '<(tcmalloc_dir)/src/base/spinlock_linux-inl.h',
+            '<(tcmalloc_dir)/src/base/spinlock_posix-inl.h',
+            '<(tcmalloc_dir)/src/base/spinlock_win32-inl.h',
+            '<(tcmalloc_dir)/src/base/stl_allocator.h',
+            '<(tcmalloc_dir)/src/base/synchronization_profiling.h',
+            '<(tcmalloc_dir)/src/base/sysinfo.cc',
+            '<(tcmalloc_dir)/src/base/sysinfo.h',
+            '<(tcmalloc_dir)/src/base/thread_annotations.h',
+            '<(tcmalloc_dir)/src/base/thread_lister.c',
+            '<(tcmalloc_dir)/src/base/thread_lister.h',
+            '<(tcmalloc_dir)/src/base/vdso_support.cc',
+            '<(tcmalloc_dir)/src/base/vdso_support.h',
+            '<(tcmalloc_dir)/src/central_freelist.cc',
+            '<(tcmalloc_dir)/src/central_freelist.h',
+            '<(tcmalloc_dir)/src/common.cc',
+            '<(tcmalloc_dir)/src/common.h',
+            '<(tcmalloc_dir)/src/debugallocation.cc',
+            '<(tcmalloc_dir)/src/free_list.cc',
+            '<(tcmalloc_dir)/src/free_list.h',
+            '<(tcmalloc_dir)/src/getpc.h',
+            '<(tcmalloc_dir)/src/gperftools/heap-checker.h',
+            '<(tcmalloc_dir)/src/gperftools/heap-profiler.h',
+            '<(tcmalloc_dir)/src/gperftools/malloc_extension.h',
+            '<(tcmalloc_dir)/src/gperftools/malloc_extension_c.h',
+            '<(tcmalloc_dir)/src/gperftools/malloc_hook.h',
+            '<(tcmalloc_dir)/src/gperftools/malloc_hook_c.h',
+            '<(tcmalloc_dir)/src/gperftools/profiler.h',
+            '<(tcmalloc_dir)/src/gperftools/stacktrace.h',
+            '<(tcmalloc_dir)/src/gperftools/tcmalloc.h',
+            '<(tcmalloc_dir)/src/heap-checker-bcad.cc',
+            '<(tcmalloc_dir)/src/heap-checker.cc',
+            '<(tcmalloc_dir)/src/heap-profile-table.cc',
+            '<(tcmalloc_dir)/src/heap-profile-table.h',
+            '<(tcmalloc_dir)/src/heap-profiler.cc',
+            '<(tcmalloc_dir)/src/internal_logging.cc',
+            '<(tcmalloc_dir)/src/internal_logging.h',
+            '<(tcmalloc_dir)/src/libc_override.h',
+            '<(tcmalloc_dir)/src/libc_override_gcc_and_weak.h',
+            '<(tcmalloc_dir)/src/libc_override_glibc.h',
+            '<(tcmalloc_dir)/src/libc_override_osx.h',
+            '<(tcmalloc_dir)/src/libc_override_redefine.h',
+            '<(tcmalloc_dir)/src/linked_list.h',
+            '<(tcmalloc_dir)/src/malloc_extension.cc',
+            '<(tcmalloc_dir)/src/malloc_hook-inl.h',
+            '<(tcmalloc_dir)/src/malloc_hook.cc',
+            '<(tcmalloc_dir)/src/malloc_hook_mmap_freebsd.h',
+            '<(tcmalloc_dir)/src/malloc_hook_mmap_linux.h',
+            '<(tcmalloc_dir)/src/maybe_threads.cc',
+            '<(tcmalloc_dir)/src/maybe_threads.h',
+            '<(tcmalloc_dir)/src/memfs_malloc.cc',
+            '<(tcmalloc_dir)/src/memory_region_map.cc',
+            '<(tcmalloc_dir)/src/memory_region_map.h',
+            '<(tcmalloc_dir)/src/packed-cache-inl.h',
+            '<(tcmalloc_dir)/src/page_heap.cc',
+            '<(tcmalloc_dir)/src/page_heap.h',
+            '<(tcmalloc_dir)/src/page_heap_allocator.h',
+            '<(tcmalloc_dir)/src/pagemap.h',
+            '<(tcmalloc_dir)/src/profile-handler.cc',
+            '<(tcmalloc_dir)/src/profile-handler.h',
+            '<(tcmalloc_dir)/src/profiledata.cc',
+            '<(tcmalloc_dir)/src/profiledata.h',
+            '<(tcmalloc_dir)/src/profiler.cc',
+            '<(tcmalloc_dir)/src/raw_printer.cc',
+            '<(tcmalloc_dir)/src/raw_printer.h',
+            '<(tcmalloc_dir)/src/sampler.cc',
+            '<(tcmalloc_dir)/src/sampler.h',
+            '<(tcmalloc_dir)/src/span.cc',
+            '<(tcmalloc_dir)/src/span.h',
+            '<(tcmalloc_dir)/src/stack_trace_table.cc',
+            '<(tcmalloc_dir)/src/stack_trace_table.h',
+            '<(tcmalloc_dir)/src/stacktrace.cc',
+            '<(tcmalloc_dir)/src/stacktrace_arm-inl.h',
+            '<(tcmalloc_dir)/src/stacktrace_config.h',
+            '<(tcmalloc_dir)/src/stacktrace_generic-inl.h',
+            '<(tcmalloc_dir)/src/stacktrace_libunwind-inl.h',
+            '<(tcmalloc_dir)/src/stacktrace_powerpc-inl.h',
+            '<(tcmalloc_dir)/src/stacktrace_win32-inl.h',
+            '<(tcmalloc_dir)/src/stacktrace_with_context.cc',
+            '<(tcmalloc_dir)/src/stacktrace_x86-inl.h',
+            '<(tcmalloc_dir)/src/static_vars.cc',
+            '<(tcmalloc_dir)/src/static_vars.h',
+            '<(tcmalloc_dir)/src/symbolize.cc',
+            '<(tcmalloc_dir)/src/symbolize.h',
+            '<(tcmalloc_dir)/src/system-alloc.cc',
+            '<(tcmalloc_dir)/src/system-alloc.h',
+            '<(tcmalloc_dir)/src/tcmalloc.cc',
+            '<(tcmalloc_dir)/src/tcmalloc_guard.h',
+            '<(tcmalloc_dir)/src/thread_cache.cc',
+            '<(tcmalloc_dir)/src/thread_cache.h',
+
+            'debugallocation_shim.cc',
+          ],
+          # sources! means that these are not compiled directly.
+          'sources!': [
+            # We simply don't use these, but list them above so that IDE
+            # users can view the full available source for reference, etc.
+            '<(tcmalloc_dir)/src/addressmap-inl.h',
+            '<(tcmalloc_dir)/src/base/atomicops-internals-linuxppc.h',
+            '<(tcmalloc_dir)/src/base/atomicops-internals-macosx.h',
+            '<(tcmalloc_dir)/src/base/atomicops-internals-x86-msvc.h',
+            '<(tcmalloc_dir)/src/base/atomicops-internals-x86.h',
+            '<(tcmalloc_dir)/src/base/atomicops.h',
+            '<(tcmalloc_dir)/src/base/commandlineflags.h',
+            '<(tcmalloc_dir)/src/base/cycleclock.h',
+            '<(tcmalloc_dir)/src/base/elf_mem_image.h',
+            '<(tcmalloc_dir)/src/base/elfcore.h',
+            '<(tcmalloc_dir)/src/base/googleinit.h',
+            '<(tcmalloc_dir)/src/base/linux_syscall_support.h',
+            '<(tcmalloc_dir)/src/base/simple_mutex.h',
+            '<(tcmalloc_dir)/src/base/spinlock_linux-inl.h',
+            '<(tcmalloc_dir)/src/base/spinlock_posix-inl.h',
+            '<(tcmalloc_dir)/src/base/spinlock_win32-inl.h',
+            '<(tcmalloc_dir)/src/base/stl_allocator.h',
+            '<(tcmalloc_dir)/src/base/thread_annotations.h',
+            '<(tcmalloc_dir)/src/getpc.h',
+            '<(tcmalloc_dir)/src/gperftools/heap-checker.h',
+            '<(tcmalloc_dir)/src/gperftools/heap-profiler.h',
+            '<(tcmalloc_dir)/src/gperftools/malloc_extension.h',
+            '<(tcmalloc_dir)/src/gperftools/malloc_extension_c.h',
+            '<(tcmalloc_dir)/src/gperftools/malloc_hook.h',
+            '<(tcmalloc_dir)/src/gperftools/malloc_hook_c.h',
+            '<(tcmalloc_dir)/src/gperftools/profiler.h',
+            '<(tcmalloc_dir)/src/gperftools/stacktrace.h',
+            '<(tcmalloc_dir)/src/gperftools/tcmalloc.h',
+            '<(tcmalloc_dir)/src/heap-checker-bcad.cc',
+            '<(tcmalloc_dir)/src/heap-checker.cc',
+            '<(tcmalloc_dir)/src/libc_override.h',
+            '<(tcmalloc_dir)/src/libc_override_gcc_and_weak.h',
+            '<(tcmalloc_dir)/src/libc_override_glibc.h',
+            '<(tcmalloc_dir)/src/libc_override_osx.h',
+            '<(tcmalloc_dir)/src/libc_override_redefine.h',
+            '<(tcmalloc_dir)/src/malloc_hook_mmap_freebsd.h',
+            '<(tcmalloc_dir)/src/malloc_hook_mmap_linux.h',
+            '<(tcmalloc_dir)/src/memfs_malloc.cc',
+            '<(tcmalloc_dir)/src/packed-cache-inl.h',
+            '<(tcmalloc_dir)/src/page_heap_allocator.h',
+            '<(tcmalloc_dir)/src/pagemap.h',
+            '<(tcmalloc_dir)/src/stacktrace_arm-inl.h',
+            '<(tcmalloc_dir)/src/stacktrace_config.h',
+            '<(tcmalloc_dir)/src/stacktrace_generic-inl.h',
+            '<(tcmalloc_dir)/src/stacktrace_libunwind-inl.h',
+            '<(tcmalloc_dir)/src/stacktrace_powerpc-inl.h',
+            '<(tcmalloc_dir)/src/stacktrace_win32-inl.h',
+            '<(tcmalloc_dir)/src/stacktrace_with_context.cc',
+            '<(tcmalloc_dir)/src/stacktrace_x86-inl.h',
+            '<(tcmalloc_dir)/src/tcmalloc_guard.h',
+
+            # Included by debugallocation_shim.cc.
+            '<(tcmalloc_dir)/src/debugallocation.cc',
+            '<(tcmalloc_dir)/src/tcmalloc.cc',
+          ],
+          'variables': {
+            'clang_warning_flags': [
+              # tcmalloc initializes some fields in the wrong order.
+              '-Wno-reorder',
+              # tcmalloc contains some unused local template specializations.
+              '-Wno-unused-function',
+              # tcmalloc uses COMPILE_ASSERT without static_assert but with
+              # typedefs.
+              '-Wno-unused-local-typedefs',
+              # for magic2_ in debugallocation.cc (only built in Debug builds)
+              # typedefs.
+              '-Wno-unused-private-field',
+            ],
+          },
+          'conditions': [
+            ['OS=="linux" or OS=="freebsd" or OS=="solaris" or OS=="android"', {
+              'sources!': [
+                '<(tcmalloc_dir)/src/system-alloc.h',
+              ],
+              # We enable all warnings by default, but upstream disables a few.
+              # Keep "-Wno-*" flags in sync with upstream by comparing against:
+              # http://code.google.com/p/google-perftools/source/browse/trunk/Makefile.am
+              'cflags': [
+                '-Wno-sign-compare',
+                '-Wno-unused-result',
+              ],
+              'link_settings': {
+                'ldflags': [
+                  # Don't let linker rip this symbol out, otherwise the heap&cpu
+                  # profilers will not initialize properly on startup.
+                  '-Wl,-uIsHeapProfilerRunning,-uProfilerStart',
+                  # Do the same for heap leak checker.
+                  '-Wl,-u_Z21InitialMallocHook_NewPKvj,-u_Z22InitialMallocHook_MMapPKvS0_jiiix,-u_Z22InitialMallocHook_SbrkPKvi',
+                  '-Wl,-u_Z21InitialMallocHook_NewPKvm,-u_Z22InitialMallocHook_MMapPKvS0_miiil,-u_Z22InitialMallocHook_SbrkPKvl',
+                  '-Wl,-u_ZN15HeapLeakChecker12IgnoreObjectEPKv,-u_ZN15HeapLeakChecker14UnIgnoreObjectEPKv',
+                ],
+              },
+              # Compiling tcmalloc with -fvisibility=default is only necessary when
+              # not using the allocator shim, which provides the correct visibility
+              # annotations for those symbols which need to be exported (see
+              # //base/allocator/allocator_shim_override_glibc_weak_symbols.h and
+              # //base/allocator/allocator_shim_internals.h for the definition of
+              # SHIM_ALWAYS_EXPORT).
+              'conditions': [
+                ['use_experimental_allocator_shim==0', {
+                  'cflags!': [
+                    '-fvisibility=hidden',
+                  ],
+                }],
+              ],
+            }],
+            ['profiling!=1', {
+              'sources!': [
+                # cpuprofiler
+                '<(tcmalloc_dir)/src/base/thread_lister.c',
+                '<(tcmalloc_dir)/src/base/thread_lister.h',
+                '<(tcmalloc_dir)/src/profile-handler.cc',
+                '<(tcmalloc_dir)/src/profile-handler.h',
+                '<(tcmalloc_dir)/src/profiledata.cc',
+                '<(tcmalloc_dir)/src/profiledata.h',
+                '<(tcmalloc_dir)/src/profiler.cc',
+              ],
+            }],
+            ['use_experimental_allocator_shim==1', {
+              'defines': [
+                'TCMALLOC_DONT_REPLACE_SYSTEM_ALLOC',
+              ],
+            }]
+          ],
+          'configurations': {
+            'Debug_Base': {
+              'conditions': [
+                ['disable_debugallocation==0', {
+                  'defines': [
+                    # Use debugallocation for Debug builds to catch problems
+                    # early and cleanly, http://crbug.com/30715 .
+                    'TCMALLOC_FOR_DEBUGALLOCATION',
+                  ],
+                }],
+              ],
+            },
+          },
+        }],  # use_allocator=="tcmalloc
+        # For CrOS builds with vtable verification. According to the author of
+        # crrev.com/10854031 this is used in conjuction with some other CrOS
+        # build flag, to enable verification of any allocator that uses virtual
+        # function calls.
+        ['use_vtable_verify==1', {
+          'cflags': [
+            '-fvtable-verify=preinit',
+          ],
+        }],
+        ['order_profiling != 0', {
+          'target_conditions' : [
+            ['_toolset=="target"', {
+              'cflags!': [ '-finstrument-functions' ],
+            }],
+          ],
+        }],
+      ],  # conditions of 'allocator' target.
+    },  # 'allocator' target.
+    {
+      # GN: //base/allocator:features
+      # When referenced from a target that might be compiled in the host
+      # toolchain, always refer to 'allocator_features#target'.
+      'target_name': 'allocator_features',
+      'includes': [ '../../build/buildflag_header.gypi' ],
+      'variables': {
+        'buildflag_header_path': 'base/allocator/features.h',
+        'buildflag_flags': [
+          'USE_EXPERIMENTAL_ALLOCATOR_SHIM=<(use_experimental_allocator_shim)',
+        ],
+      },
+    },  # 'allocator_features' target.
+  ],  # targets.
+  'conditions': [
+    ['use_experimental_allocator_shim==1', {
+      'targets': [
+        {
+          # GN: //base/allocator:unified_allocator_shim
+          'target_name': 'unified_allocator_shim',
+          'toolsets': ['host', 'target'],
+          'type': 'static_library',
+          'defines': [ 'BASE_IMPLEMENTATION' ],
+          'sources': [
+            'allocator_shim.cc',
+            'allocator_shim.h',
+            'allocator_shim_internals.h',
+            'allocator_shim_override_cpp_symbols.h',
+            'allocator_shim_override_libc_symbols.h',
+          ],
+          'include_dirs': [
+            '../..',
+          ],
+          'conditions': [
+            ['OS=="linux" and use_allocator=="tcmalloc"', {
+              'sources': [
+                'allocator_shim_default_dispatch_to_tcmalloc.cc',
+                'allocator_shim_override_glibc_weak_symbols.h',
+              ],
+            }],
+            ['use_allocator=="none" and (OS=="linux" or (OS=="android" and _toolset == "host" and host_os == "linux"))', {
+              'sources': [
+                'allocator_shim_default_dispatch_to_glibc.cc',
+              ],
+            }],
+            ['OS=="android" and _toolset == "target"', {
+              'sources': [
+                'allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc',
+                'allocator_shim_override_linker_wrapped_symbols.h',
+              ],
+              # On Android all references to malloc & friends symbols are
+              # rewritten, at link time, and routed to the shim.
+              # See //base/allocator/README.md.
+              'all_dependent_settings': {
+                'ldflags': [
+                  '-Wl,-wrap,calloc',
+                  '-Wl,-wrap,free',
+                  '-Wl,-wrap,malloc',
+                  '-Wl,-wrap,memalign',
+                  '-Wl,-wrap,posix_memalign',
+                  '-Wl,-wrap,pvalloc',
+                  '-Wl,-wrap,realloc',
+                  '-Wl,-wrap,valloc',
+                ],
+              },
+            }],
+          ]
+        },  # 'unified_allocator_shim' target.
+      ],
+    }]
+  ],
+}
diff --git a/base/allocator/allocator_shim.cc b/base/allocator/allocator_shim.cc
index fbdbdfc..09ed45f 100644
--- a/base/allocator/allocator_shim.cc
+++ b/base/allocator/allocator_shim.cc
@@ -5,26 +5,16 @@
 #include "base/allocator/allocator_shim.h"
 
 #include <errno.h>
+#include <unistd.h>
 
 #include <new>
 
 #include "base/atomicops.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/process/process_metrics.h"
 #include "base/threading/platform_thread.h"
 #include "build/build_config.h"
 
-#if !defined(OS_WIN)
-#include <unistd.h>
-#else
-#include "base/allocator/winheap_stubs_win.h"
-#endif
-
-#if defined(OS_MACOSX)
-#include <malloc/malloc.h>
-#endif
-
 // No calls to malloc / new in this file. They would would cause re-entrancy of
 // the shim, which is hard to deal with. Keep this code as simple as possible
 // and don't use any external C++ object here, not even //base ones. Even if
@@ -38,25 +28,30 @@
     &allocator::AllocatorDispatch::default_dispatch);
 
 bool g_call_new_handler_on_malloc_failure = false;
-
-#if !defined(OS_WIN)
 subtle::Atomic32 g_new_handler_lock = 0;
-#endif
 
-inline size_t GetCachedPageSize() {
+// In theory this should be just base::ThreadChecker. But we can't afford
+// the luxury of a LazyInstance<ThreadChecker> here as it would cause a new().
+bool CalledOnValidThread() {
+  using subtle::Atomic32;
+  const Atomic32 kInvalidTID = static_cast<Atomic32>(kInvalidThreadId);
+  static Atomic32 g_tid = kInvalidTID;
+  Atomic32 cur_tid = static_cast<Atomic32>(PlatformThread::CurrentId());
+  Atomic32 prev_tid =
+      subtle::NoBarrier_CompareAndSwap(&g_tid, kInvalidTID, cur_tid);
+  return prev_tid == kInvalidTID || prev_tid == cur_tid;
+}
+
+inline size_t GetPageSize() {
   static size_t pagesize = 0;
   if (!pagesize)
-    pagesize = base::GetPageSize();
+    pagesize = sysconf(_SC_PAGESIZE);
   return pagesize;
 }
 
 // Calls the std::new handler thread-safely. Returns true if a new_handler was
 // set and called, false if no new_handler was set.
-bool CallNewHandler(size_t size) {
-#if defined(OS_WIN)
-  return base::allocator::WinCallNewHandler(size);
-#else
-  ALLOW_UNUSED_PARAM(size);
+bool CallNewHandler() {
   // TODO(primiano): C++11 has introduced ::get_new_handler() which is supposed
   // to be thread safe and would avoid the spinlock boilerplate here. However
   // it doesn't seem to be available yet in the Linux chroot headers yet.
@@ -74,7 +69,6 @@
   // Assume the new_handler will abort if it fails. Exception are disabled and
   // we don't support the case of a new_handler throwing std::bad_balloc.
   return true;
-#endif
 }
 
 inline const allocator::AllocatorDispatch* GetChainHead() {
@@ -101,34 +95,14 @@
 
 void* UncheckedAlloc(size_t size) {
   const allocator::AllocatorDispatch* const chain_head = GetChainHead();
-  return chain_head->alloc_function(chain_head, size, nullptr);
+  return chain_head->alloc_function(chain_head, size);
 }
 
 void InsertAllocatorDispatch(AllocatorDispatch* dispatch) {
-  // Loop in case of (an unlikely) race on setting the list head.
-  size_t kMaxRetries = 7;
-  for (size_t i = 0; i < kMaxRetries; ++i) {
-    const AllocatorDispatch* chain_head = GetChainHead();
-    dispatch->next = chain_head;
+  // Ensure this is always called on the same thread.
+  DCHECK(CalledOnValidThread());
 
-    // This function guarantees to be thread-safe w.r.t. concurrent
-    // insertions. It also has to guarantee that all the threads always
-    // see a consistent chain, hence the MemoryBarrier() below.
-    // InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so
-    // we don't really want this to be a release-store with a corresponding
-    // acquire-load during malloc().
-    subtle::MemoryBarrier();
-    subtle::AtomicWord old_value =
-        reinterpret_cast<subtle::AtomicWord>(chain_head);
-    // Set the chain head to the new dispatch atomically. If we lose the race,
-    // the comparison will fail, and the new head of chain will be returned.
-    if (subtle::NoBarrier_CompareAndSwap(
-            &g_chain_head, old_value,
-            reinterpret_cast<subtle::AtomicWord>(dispatch)) == old_value) {
-      // Success.
-      return;
-    }
-  }
+  dispatch->next = GetChainHead();
 
   // This function does not guarantee to be thread-safe w.r.t. concurrent
   // insertions, but still has to guarantee that all the threads always
@@ -143,6 +117,7 @@
 }
 
 void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) {
+  DCHECK(CalledOnValidThread());
   DCHECK_EQ(GetChainHead(), dispatch);
   subtle::NoBarrier_Store(&g_chain_head,
                           reinterpret_cast<subtle::AtomicWord>(dispatch->next));
@@ -152,10 +127,8 @@
 }  // namespace base
 
 // The Shim* functions below are the entry-points into the shim-layer and
-// are supposed to be invoked by the allocator_shim_override_*
+// are supposed to be invoked / aliased by the allocator_shim_override_*
 // headers to route the malloc / new symbols through the shim layer.
-// They are defined as ALWAYS_INLINE in order to remove a level of indirection
-// between the system-defined entry points and the shim implementations.
 extern "C" {
 
 // The general pattern for allocations is:
@@ -170,155 +143,102 @@
 //       just suicide priting a message).
 //     - Assume it did succeed if it returns, in which case reattempt the alloc.
 
-ALWAYS_INLINE void* ShimCppNew(size_t size) {
+void* ShimCppNew(size_t size) {
   const allocator::AllocatorDispatch* const chain_head = GetChainHead();
   void* ptr;
   do {
-    void* context = nullptr;
-#if defined(OS_MACOSX)
-    context = malloc_default_zone();
-#endif
-    ptr = chain_head->alloc_function(chain_head, size, context);
-  } while (!ptr && CallNewHandler(size));
+    ptr = chain_head->alloc_function(chain_head, size);
+  } while (!ptr && CallNewHandler());
   return ptr;
 }
 
-ALWAYS_INLINE void ShimCppDelete(void* address) {
-  void* context = nullptr;
-#if defined(OS_MACOSX)
-  context = malloc_default_zone();
-#endif
+void ShimCppDelete(void* address) {
   const allocator::AllocatorDispatch* const chain_head = GetChainHead();
-  return chain_head->free_function(chain_head, address, context);
+  return chain_head->free_function(chain_head, address);
 }
 
-ALWAYS_INLINE void* ShimMalloc(size_t size, void* context) {
+void* ShimMalloc(size_t size) {
   const allocator::AllocatorDispatch* const chain_head = GetChainHead();
   void* ptr;
   do {
-    ptr = chain_head->alloc_function(chain_head, size, context);
-  } while (!ptr && g_call_new_handler_on_malloc_failure &&
-           CallNewHandler(size));
+    ptr = chain_head->alloc_function(chain_head, size);
+  } while (!ptr && g_call_new_handler_on_malloc_failure && CallNewHandler());
   return ptr;
 }
 
-ALWAYS_INLINE void* ShimCalloc(size_t n, size_t size, void* context) {
+void* ShimCalloc(size_t n, size_t size) {
   const allocator::AllocatorDispatch* const chain_head = GetChainHead();
   void* ptr;
   do {
-    ptr = chain_head->alloc_zero_initialized_function(chain_head, n, size,
-                                                      context);
-  } while (!ptr && g_call_new_handler_on_malloc_failure &&
-           CallNewHandler(size));
+    ptr = chain_head->alloc_zero_initialized_function(chain_head, n, size);
+  } while (!ptr && g_call_new_handler_on_malloc_failure && CallNewHandler());
   return ptr;
 }
 
-ALWAYS_INLINE void* ShimRealloc(void* address, size_t size, void* context) {
+void* ShimRealloc(void* address, size_t size) {
   // realloc(size == 0) means free() and might return a nullptr. We should
   // not call the std::new_handler in that case, though.
   const allocator::AllocatorDispatch* const chain_head = GetChainHead();
   void* ptr;
   do {
-    ptr = chain_head->realloc_function(chain_head, address, size, context);
+    ptr = chain_head->realloc_function(chain_head, address, size);
   } while (!ptr && size && g_call_new_handler_on_malloc_failure &&
-           CallNewHandler(size));
+           CallNewHandler());
   return ptr;
 }
 
-ALWAYS_INLINE void* ShimMemalign(size_t alignment, size_t size, void* context) {
+void* ShimMemalign(size_t alignment, size_t size) {
   const allocator::AllocatorDispatch* const chain_head = GetChainHead();
   void* ptr;
   do {
-    ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
-                                             context);
-  } while (!ptr && g_call_new_handler_on_malloc_failure &&
-           CallNewHandler(size));
+    ptr = chain_head->alloc_aligned_function(chain_head, alignment, size);
+  } while (!ptr && g_call_new_handler_on_malloc_failure && CallNewHandler());
   return ptr;
 }
 
-ALWAYS_INLINE int ShimPosixMemalign(void** res, size_t alignment, size_t size) {
+int ShimPosixMemalign(void** res, size_t alignment, size_t size) {
   // posix_memalign is supposed to check the arguments. See tc_posix_memalign()
   // in tc_malloc.cc.
   if (((alignment % sizeof(void*)) != 0) ||
       ((alignment & (alignment - 1)) != 0) || (alignment == 0)) {
     return EINVAL;
   }
-  void* ptr = ShimMemalign(alignment, size, nullptr);
+  void* ptr = ShimMemalign(alignment, size);
   *res = ptr;
   return ptr ? 0 : ENOMEM;
 }
 
-ALWAYS_INLINE void* ShimValloc(size_t size, void* context) {
-  return ShimMemalign(GetCachedPageSize(), size, context);
+void* ShimValloc(size_t size) {
+  return ShimMemalign(GetPageSize(), size);
 }
 
-ALWAYS_INLINE void* ShimPvalloc(size_t size) {
+void* ShimPvalloc(size_t size) {
   // pvalloc(0) should allocate one page, according to its man page.
   if (size == 0) {
-    size = GetCachedPageSize();
+    size = GetPageSize();
   } else {
-    size = (size + GetCachedPageSize() - 1) & ~(GetCachedPageSize() - 1);
+    size = (size + GetPageSize() - 1) & ~(GetPageSize() - 1);
   }
-  // The third argument is nullptr because pvalloc is glibc only and does not
-  // exist on OSX/BSD systems.
-  return ShimMemalign(GetCachedPageSize(), size, nullptr);
+  return ShimMemalign(GetPageSize(), size);
 }
 
-ALWAYS_INLINE void ShimFree(void* address, void* context) {
+void ShimFree(void* address) {
   const allocator::AllocatorDispatch* const chain_head = GetChainHead();
-  return chain_head->free_function(chain_head, address, context);
-}
-
-ALWAYS_INLINE size_t ShimGetSizeEstimate(const void* address, void* context) {
-  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
-  return chain_head->get_size_estimate_function(
-      chain_head, const_cast<void*>(address), context);
-}
-
-ALWAYS_INLINE unsigned ShimBatchMalloc(size_t size,
-                                       void** results,
-                                       unsigned num_requested,
-                                       void* context) {
-  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
-  return chain_head->batch_malloc_function(chain_head, size, results,
-                                           num_requested, context);
-}
-
-ALWAYS_INLINE void ShimBatchFree(void** to_be_freed,
-                                 unsigned num_to_be_freed,
-                                 void* context) {
-  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
-  return chain_head->batch_free_function(chain_head, to_be_freed,
-                                         num_to_be_freed, context);
-}
-
-ALWAYS_INLINE void ShimFreeDefiniteSize(void* ptr, size_t size, void* context) {
-  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
-  return chain_head->free_definite_size_function(chain_head, ptr, size,
-                                                 context);
+  return chain_head->free_function(chain_head, address);
 }
 
 }  // extern "C"
 
-#if !defined(OS_WIN) && !defined(OS_MACOSX)
-// Cpp symbols (new / delete) should always be routed through the shim layer
-// except on Windows and macOS where the malloc intercept is deep enough that it
-// also catches the cpp calls.
+// Cpp symbols (new / delete) should always be routed through the shim layer.
 #include "base/allocator/allocator_shim_override_cpp_symbols.h"
-#endif
 
-#if defined(OS_ANDROID) || defined(ANDROID)
 // Android does not support symbol interposition. The way malloc symbols are
 // intercepted on Android is by using link-time -wrap flags.
-#include "base/allocator/allocator_shim_override_linker_wrapped_symbols.h"
-#elif defined(OS_WIN)
-// On Windows we use plain link-time overriding of the CRT symbols.
-#include "base/allocator/allocator_shim_override_ucrt_symbols_win.h"
-#elif defined(OS_MACOSX)
-#include "base/allocator/allocator_shim_default_dispatch_to_mac_zoned_malloc.h"
-#include "base/allocator/allocator_shim_override_mac_symbols.h"
-#else
+#if !defined(OS_ANDROID) && !defined(ANDROID)
+// Ditto for plain malloc() / calloc() / free() etc. symbols.
 #include "base/allocator/allocator_shim_override_libc_symbols.h"
+#else
+#include "base/allocator/allocator_shim_override_linker_wrapped_symbols.h"
 #endif
 
 // In the case of tcmalloc we also want to plumb into the glibc hooks
@@ -328,22 +248,6 @@
 #include "base/allocator/allocator_shim_override_glibc_weak_symbols.h"
 #endif
 
-#if defined(OS_MACOSX)
-namespace base {
-namespace allocator {
-void InitializeAllocatorShim() {
-  // Prepares the default dispatch. After the intercepted malloc calls have
-  // traversed the shim this will route them to the default malloc zone.
-  InitializeDefaultDispatchToMacAllocator();
-
-  // This replaces the default malloc zone, causing calls to malloc & friends
-  // from the codebase to be routed to ShimMalloc() above.
-  OverrideMacSymbols();
-}
-}  // namespace allocator
-}  // namespace base
-#endif
-
 // Cross-checks.
 
 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/base/allocator/allocator_shim.h b/base/allocator/allocator_shim.h
index 65ac1eb..f1a1e3d 100644
--- a/base/allocator/allocator_shim.h
+++ b/base/allocator/allocator_shim.h
@@ -8,7 +8,6 @@
 #include <stddef.h>
 
 #include "base/base_export.h"
-#include "build/build_config.h"
 
 namespace base {
 namespace allocator {
@@ -46,54 +45,23 @@
 // wihout introducing unnecessary perf hits.
 
 struct AllocatorDispatch {
-  using AllocFn = void*(const AllocatorDispatch* self,
-                        size_t size,
-                        void* context);
+  using AllocFn = void*(const AllocatorDispatch* self, size_t size);
   using AllocZeroInitializedFn = void*(const AllocatorDispatch* self,
                                        size_t n,
-                                       size_t size,
-                                       void* context);
+                                       size_t size);
   using AllocAlignedFn = void*(const AllocatorDispatch* self,
                                size_t alignment,
-                               size_t size,
-                               void* context);
+                               size_t size);
   using ReallocFn = void*(const AllocatorDispatch* self,
                           void* address,
-                          size_t size,
-                          void* context);
-  using FreeFn = void(const AllocatorDispatch* self,
-                      void* address,
-                      void* context);
-  // Returns the best available estimate for the actual amount of memory
-  // consumed by the allocation |address|. If possible, this should include
-  // heap overhead or at least a decent estimate of the full cost of the
-  // allocation. If no good estimate is possible, returns zero.
-  using GetSizeEstimateFn = size_t(const AllocatorDispatch* self,
-                                   void* address,
-                                   void* context);
-  using BatchMallocFn = unsigned(const AllocatorDispatch* self,
-                                 size_t size,
-                                 void** results,
-                                 unsigned num_requested,
-                                 void* context);
-  using BatchFreeFn = void(const AllocatorDispatch* self,
-                           void** to_be_freed,
-                           unsigned num_to_be_freed,
-                           void* context);
-  using FreeDefiniteSizeFn = void(const AllocatorDispatch* self,
-                                  void* ptr,
-                                  size_t size,
-                                  void* context);
+                          size_t size);
+  using FreeFn = void(const AllocatorDispatch* self, void* address);
 
   AllocFn* const alloc_function;
   AllocZeroInitializedFn* const alloc_zero_initialized_function;
   AllocAlignedFn* const alloc_aligned_function;
   ReallocFn* const realloc_function;
   FreeFn* const free_function;
-  GetSizeEstimateFn* const get_size_estimate_function;
-  BatchMallocFn* const batch_malloc_function;
-  BatchFreeFn* const batch_free_function;
-  FreeDefiniteSizeFn* const free_definite_size_function;
 
   const AllocatorDispatch* next;
 
@@ -111,10 +79,10 @@
 // regardless of SetCallNewHandlerOnMallocFailure().
 BASE_EXPORT void* UncheckedAlloc(size_t size);
 
-// Inserts |dispatch| in front of the allocator chain. This method is
+// Inserts |dispatch| in front of the allocator chain. This method is NOT
 // thread-safe w.r.t concurrent invocations of InsertAllocatorDispatch().
-// The callers have responsibility for inserting a single dispatch no more
-// than once.
+// The callers have the responsibility of linearizing the changes to the chain
+// (or more likely call these always on the same thread).
 BASE_EXPORT void InsertAllocatorDispatch(AllocatorDispatch* dispatch);
 
 // Test-only. Rationale: (1) lack of use cases; (2) dealing safely with a
@@ -122,11 +90,6 @@
 // in malloc(), which we really don't want.
 BASE_EXPORT void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch);
 
-#if defined(OS_MACOSX)
-// On macOS, the allocator shim needs to be turned on during runtime.
-BASE_EXPORT void InitializeAllocatorShim();
-#endif  // defined(OS_MACOSX)
-
 }  // namespace allocator
 }  // namespace base
 
diff --git a/base/allocator/allocator_shim_default_dispatch_to_glibc.cc b/base/allocator/allocator_shim_default_dispatch_to_glibc.cc
index 6f386d4..02facba 100644
--- a/base/allocator/allocator_shim_default_dispatch_to_glibc.cc
+++ b/base/allocator/allocator_shim_default_dispatch_to_glibc.cc
@@ -4,10 +4,6 @@
 
 #include "base/allocator/allocator_shim.h"
 
-#include <malloc.h>
-
-#include "base/compiler_specific.h"
-
 // This translation unit defines a default dispatch for the allocator shim which
 // routes allocations to libc functions.
 // The code here is strongly inspired from tcmalloc's libc_override_glibc.h.
@@ -24,60 +20,33 @@
 
 using base::allocator::AllocatorDispatch;
 
-void* GlibcMalloc(const AllocatorDispatch*, size_t size, void* context) {
-  ALLOW_UNUSED_PARAM(context);
+void* GlibcMalloc(const AllocatorDispatch*, size_t size) {
   return __libc_malloc(size);
 }
 
-void* GlibcCalloc(const AllocatorDispatch*,
-                  size_t n,
-                  size_t size,
-                  void* context) {
-  ALLOW_UNUSED_PARAM(context);
+void* GlibcCalloc(const AllocatorDispatch*, size_t n, size_t size) {
   return __libc_calloc(n, size);
 }
 
-void* GlibcRealloc(const AllocatorDispatch*,
-                   void* address,
-                   size_t size,
-                   void* context) {
-  ALLOW_UNUSED_PARAM(context);
+void* GlibcRealloc(const AllocatorDispatch*, void* address, size_t size) {
   return __libc_realloc(address, size);
 }
 
-void* GlibcMemalign(const AllocatorDispatch*,
-                    size_t alignment,
-                    size_t size,
-                    void* context) {
-  ALLOW_UNUSED_PARAM(context);
+void* GlibcMemalign(const AllocatorDispatch*, size_t alignment, size_t size) {
   return __libc_memalign(alignment, size);
 }
 
-void GlibcFree(const AllocatorDispatch*, void* address, void* context) {
-  ALLOW_UNUSED_PARAM(context);
+void GlibcFree(const AllocatorDispatch*, void* address) {
   __libc_free(address);
 }
 
-size_t GlibcGetSizeEstimate(const AllocatorDispatch*,
-                            void* address,
-                            void* context) {
-  // TODO(siggi, primiano): malloc_usable_size may need redirection in the
-  //     presence of interposing shims that divert allocations.
-  ALLOW_UNUSED_PARAM(context);
-  return malloc_usable_size(address);
-}
-
 }  // namespace
 
 const AllocatorDispatch AllocatorDispatch::default_dispatch = {
-    &GlibcMalloc,          /* alloc_function */
-    &GlibcCalloc,          /* alloc_zero_initialized_function */
-    &GlibcMemalign,        /* alloc_aligned_function */
-    &GlibcRealloc,         /* realloc_function */
-    &GlibcFree,            /* free_function */
-    &GlibcGetSizeEstimate, /* get_size_estimate_function */
-    nullptr,               /* batch_malloc_function */
-    nullptr,               /* batch_free_function */
-    nullptr,               /* free_definite_size_function */
-    nullptr,               /* next */
+    &GlibcMalloc,   /* alloc_function */
+    &GlibcCalloc,   /* alloc_zero_initialized_function */
+    &GlibcMemalign, /* alloc_aligned_function */
+    &GlibcRealloc,  /* realloc_function */
+    &GlibcFree,     /* free_function */
+    nullptr,        /* next */
 };
diff --git a/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc b/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
index 3ad13ef..7955cb7 100644
--- a/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
+++ b/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
@@ -2,15 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include <malloc.h>
-
 #include "base/allocator/allocator_shim.h"
-#include "base/compiler_specific.h"
-#include "build/build_config.h"
-
-#if defined(OS_ANDROID) && __ANDROID_API__ < 17
-#include <dlfcn.h>
-#endif
 
 // This translation unit defines a default dispatch for the allocator shim which
 // routes allocations to the original libc functions when using the link-time
@@ -33,86 +25,33 @@
 
 using base::allocator::AllocatorDispatch;
 
-void* RealMalloc(const AllocatorDispatch*, size_t size, void* context) {
-  ALLOW_UNUSED_PARAM(context);
+void* RealMalloc(const AllocatorDispatch*, size_t size) {
   return __real_malloc(size);
 }
 
-void* RealCalloc(const AllocatorDispatch*,
-                 size_t n,
-                 size_t size,
-                 void* context) {
-  ALLOW_UNUSED_PARAM(context);
+void* RealCalloc(const AllocatorDispatch*, size_t n, size_t size) {
   return __real_calloc(n, size);
 }
 
-void* RealRealloc(const AllocatorDispatch*,
-                  void* address,
-                  size_t size,
-                  void* context) {
-  ALLOW_UNUSED_PARAM(context);
+void* RealRealloc(const AllocatorDispatch*, void* address, size_t size) {
   return __real_realloc(address, size);
 }
 
-void* RealMemalign(const AllocatorDispatch*,
-                   size_t alignment,
-                   size_t size,
-                   void* context) {
-  ALLOW_UNUSED_PARAM(context);
+void* RealMemalign(const AllocatorDispatch*, size_t alignment, size_t size) {
   return __real_memalign(alignment, size);
 }
 
-void RealFree(const AllocatorDispatch*, void* address, void* context) {
-  ALLOW_UNUSED_PARAM(context);
+void RealFree(const AllocatorDispatch*, void* address) {
   __real_free(address);
 }
 
-#if defined(OS_ANDROID) && __ANDROID_API__ < 17
-size_t DummyMallocUsableSize(const void*) { return 0; }
-#endif
-
-size_t RealSizeEstimate(const AllocatorDispatch*,
-                        void* address,
-                        void* context) {
-  ALLOW_UNUSED_PARAM(address);
-  ALLOW_UNUSED_PARAM(context);
-#if defined(OS_ANDROID)
-#if __ANDROID_API__ < 17
-  // malloc_usable_size() is available only starting from API 17.
-  // TODO(dskiba): remove once we start building against 17+.
-  using MallocUsableSizeFunction = decltype(malloc_usable_size)*;
-  static MallocUsableSizeFunction usable_size_function = nullptr;
-  if (!usable_size_function) {
-    void* function_ptr = dlsym(RTLD_DEFAULT, "malloc_usable_size");
-    if (function_ptr) {
-      usable_size_function = reinterpret_cast<MallocUsableSizeFunction>(
-          function_ptr);
-    } else {
-      usable_size_function = &DummyMallocUsableSize;
-    }
-  }
-  return usable_size_function(address);
-#else
-  return malloc_usable_size(address);
-#endif
-#endif  // OS_ANDROID
-
-  // TODO(primiano): This should be redirected to malloc_usable_size or
-  //     the like.
-  return 0;
-}
-
 }  // namespace
 
 const AllocatorDispatch AllocatorDispatch::default_dispatch = {
-    &RealMalloc,       /* alloc_function */
-    &RealCalloc,       /* alloc_zero_initialized_function */
-    &RealMemalign,     /* alloc_aligned_function */
-    &RealRealloc,      /* realloc_function */
-    &RealFree,         /* free_function */
-    &RealSizeEstimate, /* get_size_estimate_function */
-    nullptr,           /* batch_malloc_function */
-    nullptr,           /* batch_free_function */
-    nullptr,           /* free_definite_size_function */
-    nullptr,           /* next */
+    &RealMalloc,   /* alloc_function */
+    &RealCalloc,   /* alloc_zero_initialized_function */
+    &RealMemalign, /* alloc_aligned_function */
+    &RealRealloc,  /* realloc_function */
+    &RealFree,     /* free_function */
+    nullptr,       /* next */
 };
diff --git a/base/allocator/allocator_shim_internals.h b/base/allocator/allocator_shim_internals.h
index 82624ee..fc3624c 100644
--- a/base/allocator/allocator_shim_internals.h
+++ b/base/allocator/allocator_shim_internals.h
@@ -20,6 +20,8 @@
 // Shim layer symbols need to be ALWAYS exported, regardless of component build.
 #define SHIM_ALWAYS_EXPORT __attribute__((visibility("default")))
 
+#define SHIM_ALIAS_SYMBOL(fn) __attribute__((alias(#fn)))
+
 #endif  // __GNUC__
 
 #endif  // BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
diff --git a/base/allocator/allocator_shim_override_cpp_symbols.h b/base/allocator/allocator_shim_override_cpp_symbols.h
index 3313687..616716f 100644
--- a/base/allocator/allocator_shim_override_cpp_symbols.h
+++ b/base/allocator/allocator_shim_override_cpp_symbols.h
@@ -7,45 +7,36 @@
 #endif
 #define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
 
-// Preempt the default new/delete C++ symbols so they call the shim entry
-// points. This file is strongly inspired by tcmalloc's
-// libc_override_redefine.h.
+// Alias the default new/delete C++ symbols to the shim entry points.
+// This file is strongly inspired by tcmalloc's libc_override_redefine.h.
 
 #include <new>
 
 #include "base/allocator/allocator_shim_internals.h"
 
-SHIM_ALWAYS_EXPORT void* operator new(size_t size) {
-  return ShimCppNew(size);
-}
+SHIM_ALWAYS_EXPORT void* operator new(size_t size)
+    SHIM_ALIAS_SYMBOL(ShimCppNew);
 
-SHIM_ALWAYS_EXPORT void operator delete(void* p) __THROW {
-  ShimCppDelete(p);
-}
+SHIM_ALWAYS_EXPORT void operator delete(void* p) __THROW
+    SHIM_ALIAS_SYMBOL(ShimCppDelete);
 
-SHIM_ALWAYS_EXPORT void* operator new[](size_t size) {
-  return ShimCppNew(size);
-}
+SHIM_ALWAYS_EXPORT void* operator new[](size_t size)
+    SHIM_ALIAS_SYMBOL(ShimCppNew);
 
-SHIM_ALWAYS_EXPORT void operator delete[](void* p) __THROW {
-  ShimCppDelete(p);
-}
+SHIM_ALWAYS_EXPORT void operator delete[](void* p) __THROW
+    SHIM_ALIAS_SYMBOL(ShimCppDelete);
 
 SHIM_ALWAYS_EXPORT void* operator new(size_t size,
-                                      const std::nothrow_t&) __THROW {
-  return ShimCppNew(size);
-}
+                                      const std::nothrow_t&) __THROW
+    SHIM_ALIAS_SYMBOL(ShimCppNew);
 
 SHIM_ALWAYS_EXPORT void* operator new[](size_t size,
-                                        const std::nothrow_t&) __THROW {
-  return ShimCppNew(size);
-}
+                                        const std::nothrow_t&) __THROW
+    SHIM_ALIAS_SYMBOL(ShimCppNew);
 
-SHIM_ALWAYS_EXPORT void operator delete(void* p, const std::nothrow_t&) __THROW {
-  ShimCppDelete(p);
-}
+SHIM_ALWAYS_EXPORT void operator delete(void* p, const std::nothrow_t&) __THROW
+    SHIM_ALIAS_SYMBOL(ShimCppDelete);
 
 SHIM_ALWAYS_EXPORT void operator delete[](void* p,
-                                          const std::nothrow_t&) __THROW {
-  ShimCppDelete(p);
-}
+                                          const std::nothrow_t&) __THROW
+    SHIM_ALIAS_SYMBOL(ShimCppDelete);
diff --git a/base/allocator/allocator_shim_override_libc_symbols.h b/base/allocator/allocator_shim_override_libc_symbols.h
index b77cbb1..37b3b4eb 100644
--- a/base/allocator/allocator_shim_override_libc_symbols.h
+++ b/base/allocator/allocator_shim_override_libc_symbols.h
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// Its purpose is to preempt the Libc symbols for malloc/new so they call the
+// Its purpose is to SHIM_ALIAS_SYMBOL the Libc symbols for malloc/new to the
 // shim layer entry points.
 
 #ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
@@ -16,41 +16,32 @@
 
 extern "C" {
 
-SHIM_ALWAYS_EXPORT void* malloc(size_t size) __THROW {
-  return ShimMalloc(size, nullptr);
-}
+SHIM_ALWAYS_EXPORT void* malloc(size_t size) __THROW
+    SHIM_ALIAS_SYMBOL(ShimMalloc);
 
-SHIM_ALWAYS_EXPORT void free(void* ptr) __THROW {
-  ShimFree(ptr, nullptr);
-}
+SHIM_ALWAYS_EXPORT void free(void* ptr) __THROW
+    SHIM_ALIAS_SYMBOL(ShimFree);
 
-SHIM_ALWAYS_EXPORT void* realloc(void* ptr, size_t size) __THROW {
-  return ShimRealloc(ptr, size, nullptr);
-}
+SHIM_ALWAYS_EXPORT void* realloc(void* ptr, size_t size) __THROW
+    SHIM_ALIAS_SYMBOL(ShimRealloc);
 
-SHIM_ALWAYS_EXPORT void* calloc(size_t n, size_t size) __THROW {
-  return ShimCalloc(n, size, nullptr);
-}
+SHIM_ALWAYS_EXPORT void* calloc(size_t n, size_t size) __THROW
+    SHIM_ALIAS_SYMBOL(ShimCalloc);
 
-SHIM_ALWAYS_EXPORT void cfree(void* ptr) __THROW {
-  ShimFree(ptr, nullptr);
-}
+SHIM_ALWAYS_EXPORT void cfree(void* ptr) __THROW
+    SHIM_ALIAS_SYMBOL(ShimFree);
 
-SHIM_ALWAYS_EXPORT void* memalign(size_t align, size_t s) __THROW {
-  return ShimMemalign(align, s, nullptr);
-}
+SHIM_ALWAYS_EXPORT void* memalign(size_t align, size_t s) __THROW
+    SHIM_ALIAS_SYMBOL(ShimMemalign);
 
-SHIM_ALWAYS_EXPORT void* valloc(size_t size) __THROW {
-  return ShimValloc(size, nullptr);
-}
+SHIM_ALWAYS_EXPORT void* valloc(size_t size) __THROW
+    SHIM_ALIAS_SYMBOL(ShimValloc);
 
-SHIM_ALWAYS_EXPORT void* pvalloc(size_t size) __THROW {
-  return ShimPvalloc(size);
-}
+SHIM_ALWAYS_EXPORT void* pvalloc(size_t size) __THROW
+    SHIM_ALIAS_SYMBOL(ShimPvalloc);
 
-SHIM_ALWAYS_EXPORT int posix_memalign(void** r, size_t a, size_t s) __THROW {
-  return ShimPosixMemalign(r, a, s);
-}
+SHIM_ALWAYS_EXPORT int posix_memalign(void** r, size_t a, size_t s) __THROW
+    SHIM_ALIAS_SYMBOL(ShimPosixMemalign);
 
 // The default dispatch translation unit has to define also the following
 // symbols (unless they are ultimately routed to the system symbols):
diff --git a/base/allocator/allocator_shim_override_linker_wrapped_symbols.h b/base/allocator/allocator_shim_override_linker_wrapped_symbols.h
index 6bf73c3..5b85d6e 100644
--- a/base/allocator/allocator_shim_override_linker_wrapped_symbols.h
+++ b/base/allocator/allocator_shim_override_linker_wrapped_symbols.h
@@ -17,38 +17,28 @@
 
 extern "C" {
 
-SHIM_ALWAYS_EXPORT void* __wrap_calloc(size_t n, size_t size) {
-  return ShimCalloc(n, size, nullptr);
-}
+SHIM_ALWAYS_EXPORT void* __wrap_calloc(size_t, size_t)
+    SHIM_ALIAS_SYMBOL(ShimCalloc);
 
-SHIM_ALWAYS_EXPORT void __wrap_free(void* ptr) {
-  ShimFree(ptr, nullptr);
-}
+SHIM_ALWAYS_EXPORT void __wrap_free(void*)
+    SHIM_ALIAS_SYMBOL(ShimFree);
 
-SHIM_ALWAYS_EXPORT void* __wrap_malloc(size_t size) {
-  return ShimMalloc(size, nullptr);
-}
+SHIM_ALWAYS_EXPORT void* __wrap_malloc(size_t)
+    SHIM_ALIAS_SYMBOL(ShimMalloc);
 
-SHIM_ALWAYS_EXPORT void* __wrap_memalign(size_t align, size_t size) {
-  return ShimMemalign(align, size, nullptr);
-}
+SHIM_ALWAYS_EXPORT void* __wrap_memalign(size_t, size_t)
+    SHIM_ALIAS_SYMBOL(ShimMemalign);
 
-SHIM_ALWAYS_EXPORT int __wrap_posix_memalign(void** res,
-                                             size_t align,
-                                             size_t size) {
-  return ShimPosixMemalign(res, align, size);
-}
+SHIM_ALWAYS_EXPORT int __wrap_posix_memalign(void**, size_t, size_t)
+    SHIM_ALIAS_SYMBOL(ShimPosixMemalign);
 
-SHIM_ALWAYS_EXPORT void* __wrap_pvalloc(size_t size) {
-  return ShimPvalloc(size);
-}
+SHIM_ALWAYS_EXPORT void* __wrap_pvalloc(size_t)
+    SHIM_ALIAS_SYMBOL(ShimPvalloc);
 
-SHIM_ALWAYS_EXPORT void* __wrap_realloc(void* address, size_t size) {
-  return ShimRealloc(address, size, nullptr);
-}
+SHIM_ALWAYS_EXPORT void* __wrap_realloc(void*, size_t)
+    SHIM_ALIAS_SYMBOL(ShimRealloc);
 
-SHIM_ALWAYS_EXPORT void* __wrap_valloc(size_t size) {
-  return ShimValloc(size, nullptr);
-}
+SHIM_ALWAYS_EXPORT void* __wrap_valloc(size_t)
+    SHIM_ALIAS_SYMBOL(ShimValloc);
 
 }  // extern "C"
diff --git a/base/at_exit.cc b/base/at_exit.cc
index 5dcc83c..cfe4cf9 100644
--- a/base/at_exit.cc
+++ b/base/at_exit.cc
@@ -22,8 +22,6 @@
 // this for thread-safe access, since it will only be modified in testing.
 static AtExitManager* g_top_manager = NULL;
 
-static bool g_disable_managers = false;
-
 AtExitManager::AtExitManager()
     : processing_callbacks_(false), next_manager_(g_top_manager) {
 // If multiple modules instantiate AtExitManagers they'll end up living in this
@@ -41,8 +39,7 @@
   }
   DCHECK_EQ(this, g_top_manager);
 
-  if (!g_disable_managers)
-    ProcessCallbacksNow();
+  ProcessCallbacksNow();
   g_top_manager = next_manager_;
 }
 
@@ -91,11 +88,6 @@
   DCHECK(g_top_manager->stack_.empty());
 }
 
-void AtExitManager::DisableAllAtExitManagers() {
-  AutoLock lock(g_top_manager->lock_);
-  g_disable_managers = true;
-}
-
 AtExitManager::AtExitManager(bool shadow)
     : processing_callbacks_(false), next_manager_(g_top_manager) {
   DCHECK(shadow || !g_top_manager);
diff --git a/base/at_exit.h b/base/at_exit.h
index 6bf3f50..02e18ed 100644
--- a/base/at_exit.h
+++ b/base/at_exit.h
@@ -49,10 +49,6 @@
   // is possible to register new callbacks after calling this function.
   static void ProcessCallbacksNow();
 
-  // Disable all registered at-exit callbacks. This is used only in a single-
-  // process mode.
-  static void DisableAllAtExitManagers();
-
  protected:
   // This constructor will allow this instance of AtExitManager to be created
   // even if one already exists.  This should only be used for testing!
diff --git a/base/atomic_ref_count.h b/base/atomic_ref_count.h
index 93c1f0d..2ab7242 100644
--- a/base/atomic_ref_count.h
+++ b/base/atomic_ref_count.h
@@ -12,7 +12,7 @@
 
 namespace base {
 
-typedef subtle::AtomicWord AtomicRefCount;
+typedef subtle::Atomic32 AtomicRefCount;
 
 // Increment a reference count by "increment", which must exceed 0.
 inline void AtomicRefCountIncN(volatile AtomicRefCount *ptr,
diff --git a/base/base.gyp b/base/base.gyp
new file mode 100644
index 0000000..a534d5c
--- /dev/null
+++ b/base/base.gyp
@@ -0,0 +1,1801 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'variables': {
+    'chromium_code': 1,
+  },
+  'includes': [
+    '../build/win_precompile.gypi',
+    'base.gypi',
+  ],
+  'targets': [
+    {
+      'target_name': 'base',
+      'type': '<(component)',
+      'toolsets': ['host', 'target'],
+      'variables': {
+        'base_target': 1,
+        'enable_wexit_time_destructors': 1,
+        'optimize': 'max',
+      },
+      'dependencies': [
+        'allocator/allocator.gyp:allocator',
+        'allocator/allocator.gyp:allocator_features#target',
+        'base_debugging_flags#target',
+        'base_win_features#target',
+        'base_static',
+        'base_build_date#target',
+        '../testing/gtest.gyp:gtest_prod',
+        '../third_party/modp_b64/modp_b64.gyp:modp_b64',
+        'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
+      ],
+      # TODO(gregoryd): direct_dependent_settings should be shared with the
+      #  64-bit target, but it doesn't work due to a bug in gyp
+      'direct_dependent_settings': {
+        'include_dirs': [
+          '..',
+        ],
+      },
+      'conditions': [
+        ['desktop_linux == 1 or chromeos == 1', {
+          'conditions': [
+            ['chromeos==1', {
+              'sources/': [ ['include', '_chromeos\\.cc$'] ]
+            }],
+          ],
+          'dependencies': [
+            'symbolize',
+            'xdg_mime',
+          ],
+          'defines': [
+            'USE_SYMBOLIZE',
+          ],
+        }, {  # desktop_linux == 0 and chromeos == 0
+            'sources/': [
+              ['exclude', '/xdg_user_dirs/'],
+              ['exclude', '_nss\\.cc$'],
+            ],
+        }],
+        ['use_glib==1', {
+          'dependencies': [
+            '../build/linux/system.gyp:glib',
+          ],
+          'export_dependent_settings': [
+            '../build/linux/system.gyp:glib',
+          ],
+        }],
+        ['OS == "android" and _toolset == "host"', {
+          # Always build base as a static_library for host toolset, even if
+          # we're doing a component build. Specifically, we only care about the
+          # target toolset using components since that's what developers are
+          # focusing on. In theory we should do this more generally for all
+          # targets when building for host, but getting the gyp magic
+          # per-toolset for the "component" variable is hard, and we really only
+          # need base on host.
+          'type': 'static_library',
+          # Base for host support is the minimum required to run the
+          # ssl false start blacklist tool. It requires further changes
+          # to generically support host builds (and tests).
+          # Note: when building for host, gyp has OS == "android",
+          # hence the *_android.cc files are included but the actual code
+          # doesn't have OS_ANDROID / ANDROID defined.
+          'conditions': [
+            ['host_os == "mac"', {
+              'sources/': [
+                ['exclude', '^native_library_linux\\.cc$'],
+                ['exclude', '^process_util_linux\\.cc$'],
+                ['exclude', '^sys_info_linux\\.cc$'],
+                ['exclude', '^sys_string_conversions_linux\\.cc$'],
+                ['exclude', '^worker_pool_linux\\.cc$'],
+              ],
+            }],
+          ],
+        }],
+        ['OS == "android" and _toolset == "target"', {
+          'dependencies': [
+            'base_java',
+            'base_jni_headers',
+            '../build/android/ndk.gyp:cpu_features',
+            '../third_party/ashmem/ashmem.gyp:ashmem',
+          ],
+          'link_settings': {
+            'libraries': [
+              '-llog',
+            ],
+          },
+          'sources!': [
+            'debug/stack_trace_posix.cc',
+          ],
+        }],
+        ['os_bsd==1', {
+          'include_dirs': [
+            '/usr/local/include',
+          ],
+          'link_settings': {
+            'libraries': [
+              '-L/usr/local/lib -lexecinfo',
+            ],
+          },
+        }],
+        ['OS == "linux"', {
+          'link_settings': {
+            'libraries': [
+              # We need rt for clock_gettime().
+              '-lrt',
+              # For 'native_library_linux.cc'
+              '-ldl',
+            ],
+          },
+          'conditions': [
+            ['use_allocator!="tcmalloc"', {
+              'defines': [
+                'NO_TCMALLOC',
+              ],
+              'direct_dependent_settings': {
+                'defines': [
+                  'NO_TCMALLOC',
+                ],
+              },
+            }],
+          ],
+        }],
+        ['use_sysroot==0 and (OS == "android" or OS == "linux")', {
+          'link_settings': {
+            'libraries': [
+              # Needed for <atomic> when building with newer C++ library.
+              '-latomic',
+            ],
+          },
+        }],
+        ['OS == "win"', {
+          # Specify delayload for base.dll.
+          'msvs_settings': {
+            'VCLinkerTool': {
+              'DelayLoadDLLs': [
+                'cfgmgr32.dll',
+                'powrprof.dll',
+                'setupapi.dll',
+              ],
+              'AdditionalDependencies': [
+                'cfgmgr32.lib',
+                'powrprof.lib',
+                'setupapi.lib',
+                'userenv.lib',
+                'winmm.lib',
+              ],
+            },
+          },
+          # Specify delayload for components that link with base.lib.
+          'all_dependent_settings': {
+            'msvs_settings': {
+              'VCLinkerTool': {
+                'DelayLoadDLLs': [
+                  'cfgmgr32.dll',
+                  'powrprof.dll',
+                  'setupapi.dll',
+                ],
+                'AdditionalDependencies': [
+                  'cfgmgr32.lib',
+                  'powrprof.lib',
+                  'setupapi.lib',
+                  'userenv.lib',
+                  'winmm.lib',
+                ],
+              },
+            },
+          },
+          'dependencies': [
+           'trace_event/etw_manifest/etw_manifest.gyp:etw_manifest',
+          ],
+        }],
+        ['OS == "mac" or (OS == "ios" and _toolset == "host")', {
+          'link_settings': {
+            'libraries': [
+              '$(SDKROOT)/System/Library/Frameworks/AppKit.framework',
+              '$(SDKROOT)/System/Library/Frameworks/ApplicationServices.framework',
+              '$(SDKROOT)/System/Library/Frameworks/Carbon.framework',
+              '$(SDKROOT)/System/Library/Frameworks/CoreFoundation.framework',
+              '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
+              '$(SDKROOT)/System/Library/Frameworks/IOKit.framework',
+              '$(SDKROOT)/System/Library/Frameworks/Security.framework',
+              '$(SDKROOT)/usr/lib/libbsm.dylib',
+            ],
+          },
+        }],
+        ['OS == "ios" and _toolset != "host"', {
+          'link_settings': {
+            'libraries': [
+              '$(SDKROOT)/System/Library/Frameworks/CoreFoundation.framework',
+              '$(SDKROOT)/System/Library/Frameworks/CoreGraphics.framework',
+              '$(SDKROOT)/System/Library/Frameworks/CoreText.framework',
+              '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
+              '$(SDKROOT)/System/Library/Frameworks/UIKit.framework',
+            ],
+          },
+        }],
+        ['OS != "win" and (OS != "ios" or _toolset == "host")', {
+            'dependencies': ['third_party/libevent/libevent.gyp:libevent'],
+        },],
+        ['component=="shared_library"', {
+          'conditions': [
+            ['OS=="win"', {
+              'sources!': [
+                'debug/debug_on_start_win.cc',
+              ],
+            }],
+          ],
+        }],
+        ['OS=="ios"', {
+          'sources!': [
+            'sync_socket.h',
+            'sync_socket_posix.cc',
+          ]
+        }],
+        ['use_experimental_allocator_shim==1', {
+          'dependencies': [ 'allocator/allocator.gyp:unified_allocator_shim']
+        }],
+      ],
+      'sources': [
+        'auto_reset.h',
+        'linux_util.cc',
+        'linux_util.h',
+        'message_loop/message_pump_android.cc',
+        'message_loop/message_pump_android.h',
+        'message_loop/message_pump_glib.cc',
+        'message_loop/message_pump_glib.h',
+        'message_loop/message_pump_io_ios.cc',
+        'message_loop/message_pump_io_ios.h',
+        'message_loop/message_pump_libevent.cc',
+        'message_loop/message_pump_libevent.h',
+        'message_loop/message_pump_mac.h',
+        'message_loop/message_pump_mac.mm',
+        'metrics/field_trial.cc',
+        'metrics/field_trial.h',
+        'posix/file_descriptor_shuffle.cc',
+        'posix/file_descriptor_shuffle.h',
+        'sync_socket.h',
+        'sync_socket_posix.cc',
+        'sync_socket_win.cc',
+        'third_party/xdg_user_dirs/xdg_user_dir_lookup.cc',
+        'third_party/xdg_user_dirs/xdg_user_dir_lookup.h',
+      ],
+      'includes': [
+        '../build/android/increase_size_for_speed.gypi',
+      ],
+    },
+    {
+      'target_name': 'base_i18n',
+      'type': '<(component)',
+      'variables': {
+        'enable_wexit_time_destructors': 1,
+        'optimize': 'max',
+        'base_i18n_target': 1,
+      },
+      'dependencies': [
+        'base',
+        'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
+        '../third_party/icu/icu.gyp:icui18n',
+        '../third_party/icu/icu.gyp:icuuc',
+      ],
+      'conditions': [
+        ['OS == "win"', {
+          # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+          'msvs_disabled_warnings': [
+            4267,
+          ],
+        }],
+        ['icu_use_data_file_flag==1', {
+          'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_FILE'],
+        }, { # else icu_use_data_file_flag !=1
+          'conditions': [
+            ['OS=="win"', {
+              'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_SHARED'],
+            }, {
+              'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC'],
+            }],
+          ],
+        }],
+        ['OS == "ios"', {
+          'toolsets': ['host', 'target'],
+        }],
+      ],
+      'export_dependent_settings': [
+        'base',
+        '../third_party/icu/icu.gyp:icuuc',
+        '../third_party/icu/icu.gyp:icui18n',
+      ],
+      'includes': [
+        '../build/android/increase_size_for_speed.gypi',
+      ],
+    },
+    {
+      'target_name': 'base_message_loop_tests',
+      'type': 'static_library',
+      'dependencies': [
+        'base',
+        '../testing/gtest.gyp:gtest',
+      ],
+      'sources': [
+        'message_loop/message_loop_test.cc',
+        'message_loop/message_loop_test.h',
+      ],
+    },
+    {
+      # This is the subset of files from base that should not be used with a
+      # dynamic library. Note that this library cannot depend on base because
+      # base depends on base_static.
+      'target_name': 'base_static',
+      'type': 'static_library',
+      'variables': {
+        'enable_wexit_time_destructors': 1,
+        'optimize': 'max',
+      },
+      'toolsets': ['host', 'target'],
+      'sources': [
+        'base_switches.cc',
+        'base_switches.h',
+        'win/pe_image.cc',
+        'win/pe_image.h',
+      ],
+      'include_dirs': [
+        '..',
+      ],
+      'includes': [
+        '../build/android/increase_size_for_speed.gypi',
+      ],
+    },
+    # Include this target for a main() function that simply instantiates
+    # and runs a base::TestSuite.
+    {
+      'target_name': 'run_all_unittests',
+      'type': 'static_library',
+      'dependencies': [
+        'test_support_base',
+      ],
+      'sources': [
+        'test/run_all_unittests.cc',
+      ],
+    },
+    {
+      'target_name': 'base_unittests',
+      'type': '<(gtest_target_type)',
+      'sources': [
+        'allocator/tcmalloc_unittest.cc',
+        'android/application_status_listener_unittest.cc',
+        'android/content_uri_utils_unittest.cc',
+        'android/jni_android_unittest.cc',
+        'android/jni_array_unittest.cc',
+        'android/jni_string_unittest.cc',
+        'android/library_loader/library_prefetcher_unittest.cc',
+        'android/path_utils_unittest.cc',
+        'android/scoped_java_ref_unittest.cc',
+        'android/sys_utils_unittest.cc',
+        'at_exit_unittest.cc',
+        'atomicops_unittest.cc',
+        'barrier_closure_unittest.cc',
+        'base64_unittest.cc',
+        'base64url_unittest.cc',
+        'big_endian_unittest.cc',
+        'bind_unittest.cc',
+        'bind_unittest.nc',
+        'bit_cast_unittest.cc',
+        'bits_unittest.cc',
+        'build_time_unittest.cc',
+        'callback_helpers_unittest.cc',
+        'callback_list_unittest.cc',
+        'callback_list_unittest.nc',
+        'callback_unittest.cc',
+        'callback_unittest.nc',
+        'cancelable_callback_unittest.cc',
+        'command_line_unittest.cc',
+        'containers/adapters_unittest.cc',
+        'containers/hash_tables_unittest.cc',
+        'containers/linked_list_unittest.cc',
+        'containers/mru_cache_unittest.cc',
+        'containers/scoped_ptr_hash_map_unittest.cc',
+        'containers/small_map_unittest.cc',
+        'containers/stack_container_unittest.cc',
+        'cpu_unittest.cc',
+        'debug/crash_logging_unittest.cc',
+        'debug/debugger_unittest.cc',
+        'debug/leak_tracker_unittest.cc',
+        'debug/proc_maps_linux_unittest.cc',
+        'debug/stack_trace_unittest.cc',
+        'debug/task_annotator_unittest.cc',
+        'deferred_sequenced_task_runner_unittest.cc',
+        'environment_unittest.cc',
+        'feature_list_unittest.cc',
+        'file_version_info_win_unittest.cc',
+        'files/dir_reader_posix_unittest.cc',
+        'files/file_locking_unittest.cc',
+        'files/file_path_unittest.cc',
+        'files/file_path_watcher_unittest.cc',
+        'files/file_proxy_unittest.cc',
+        'files/file_unittest.cc',
+        'files/file_util_proxy_unittest.cc',
+        'files/file_util_unittest.cc',
+        'files/important_file_writer_unittest.cc',
+        'files/memory_mapped_file_unittest.cc',
+        'files/scoped_temp_dir_unittest.cc',
+        'gmock_unittest.cc',
+        'guid_unittest.cc',
+        'hash_unittest.cc',
+        'i18n/break_iterator_unittest.cc',
+        'i18n/case_conversion_unittest.cc',
+        'i18n/char_iterator_unittest.cc',
+        'i18n/file_util_icu_unittest.cc',
+        'i18n/icu_string_conversions_unittest.cc',
+        'i18n/message_formatter_unittest.cc',
+        'i18n/number_formatting_unittest.cc',
+        'i18n/rtl_unittest.cc',
+        'i18n/streaming_utf8_validator_unittest.cc',
+        'i18n/string_search_unittest.cc',
+        'i18n/time_formatting_unittest.cc',
+        'i18n/timezone_unittest.cc',
+        'id_map_unittest.cc',
+        'ios/crb_protocol_observers_unittest.mm',
+        'ios/device_util_unittest.mm',
+        'ios/weak_nsobject_unittest.mm',
+        'json/json_parser_unittest.cc',
+        'json/json_reader_unittest.cc',
+        'json/json_value_converter_unittest.cc',
+        'json/json_value_serializer_unittest.cc',
+        'json/json_writer_unittest.cc',
+        'json/string_escape_unittest.cc',
+        'lazy_instance_unittest.cc',
+        'logging_unittest.cc',
+        'mac/bind_objc_block_unittest.mm',
+        'mac/call_with_eh_frame_unittest.mm',
+        'mac/dispatch_source_mach_unittest.cc',
+        'mac/foundation_util_unittest.mm',
+        'mac/mac_util_unittest.mm',
+        'mac/mach_port_broker_unittest.cc',
+        'mac/objc_property_releaser_unittest.mm',
+        'mac/scoped_nsobject_unittest.mm',
+        'mac/scoped_objc_class_swizzler_unittest.mm',
+        'mac/scoped_sending_event_unittest.mm',
+        'md5_unittest.cc',
+        'memory/aligned_memory_unittest.cc',
+        'memory/discardable_shared_memory_unittest.cc',
+        'memory/linked_ptr_unittest.cc',
+        'memory/memory_pressure_listener_unittest.cc',
+        'memory/memory_pressure_monitor_chromeos_unittest.cc',
+        'memory/memory_pressure_monitor_mac_unittest.cc',
+        'memory/memory_pressure_monitor_win_unittest.cc',
+        'memory/ptr_util_unittest.cc',
+        'memory/ref_counted_memory_unittest.cc',
+        'memory/ref_counted_unittest.cc',
+        'memory/scoped_vector_unittest.cc',
+        'memory/shared_memory_mac_unittest.cc',
+        'memory/shared_memory_unittest.cc',
+        'memory/shared_memory_win_unittest.cc',
+        'memory/singleton_unittest.cc',
+        'memory/weak_ptr_unittest.cc',
+        'memory/weak_ptr_unittest.nc',
+        'message_loop/message_loop_task_runner_unittest.cc',
+        'message_loop/message_loop_unittest.cc',
+        'message_loop/message_pump_glib_unittest.cc',
+        'message_loop/message_pump_io_ios_unittest.cc',
+        'message_loop/message_pump_libevent_unittest.cc',
+        'metrics/bucket_ranges_unittest.cc',
+        'metrics/field_trial_unittest.cc',
+        'metrics/histogram_base_unittest.cc',
+        'metrics/histogram_delta_serialization_unittest.cc',
+        'metrics/histogram_macros_unittest.cc',
+        'metrics/histogram_snapshot_manager_unittest.cc',
+        'metrics/histogram_unittest.cc',
+        'metrics/metrics_hashes_unittest.cc',
+        'metrics/persistent_histogram_allocator_unittest.cc',
+        'metrics/persistent_memory_allocator_unittest.cc',
+        'metrics/persistent_sample_map_unittest.cc',
+        'metrics/sample_map_unittest.cc',
+        'metrics/sample_vector_unittest.cc',
+        'metrics/sparse_histogram_unittest.cc',
+        'metrics/statistics_recorder_unittest.cc',
+        'native_library_unittest.cc',
+        'numerics/safe_numerics_unittest.cc',
+        'observer_list_unittest.cc',
+        'optional_unittest.cc',
+        'os_compat_android_unittest.cc',
+        'path_service_unittest.cc',
+        'pickle_unittest.cc',
+        'posix/file_descriptor_shuffle_unittest.cc',
+        'posix/unix_domain_socket_linux_unittest.cc',
+        'power_monitor/power_monitor_unittest.cc',
+        'process/memory_unittest.cc',
+        'process/memory_unittest_mac.h',
+        'process/memory_unittest_mac.mm',
+        'process/process_metrics_unittest.cc',
+        'process/process_metrics_unittest_ios.cc',
+        'process/process_unittest.cc',
+        'process/process_util_unittest.cc',
+        'profiler/stack_sampling_profiler_unittest.cc',
+        'profiler/tracked_time_unittest.cc',
+        'rand_util_unittest.cc',
+        'run_loop_unittest.cc',
+        'scoped_clear_errno_unittest.cc',
+        'scoped_generic_unittest.cc',
+        'scoped_native_library_unittest.cc',
+        'security_unittest.cc',
+        'sequence_checker_unittest.cc',
+        'sha1_unittest.cc',
+        'stl_util_unittest.cc',
+        'strings/nullable_string16_unittest.cc',
+        'strings/pattern_unittest.cc',
+        'strings/safe_sprintf_unittest.cc',
+        'strings/string16_unittest.cc',
+        'strings/string_number_conversions_unittest.cc',
+        'strings/string_piece_unittest.cc',
+        'strings/string_split_unittest.cc',
+        'strings/string_tokenizer_unittest.cc',
+        'strings/string_util_unittest.cc',
+        'strings/stringize_macros_unittest.cc',
+        'strings/stringprintf_unittest.cc',
+        'strings/sys_string_conversions_mac_unittest.mm',
+        'strings/sys_string_conversions_unittest.cc',
+        'strings/utf_offset_string_conversions_unittest.cc',
+        'strings/utf_string_conversions_unittest.cc',
+        'supports_user_data_unittest.cc',
+        'sync_socket_unittest.cc',
+        'synchronization/cancellation_flag_unittest.cc',
+        'synchronization/condition_variable_unittest.cc',
+        'synchronization/lock_unittest.cc',
+        'synchronization/read_write_lock_unittest.cc',
+        'synchronization/waitable_event_unittest.cc',
+        'synchronization/waitable_event_watcher_unittest.cc',
+        'sys_byteorder_unittest.cc',
+        'sys_info_unittest.cc',
+        'system_monitor/system_monitor_unittest.cc',
+        'task/cancelable_task_tracker_unittest.cc',
+        'task_runner_util_unittest.cc',
+        'task_scheduler/delayed_task_manager_unittest.cc',
+        'task_scheduler/priority_queue_unittest.cc',
+        'task_scheduler/scheduler_lock_unittest.cc',
+        'task_scheduler/scheduler_service_thread_unittest.cc',
+        'task_scheduler/scheduler_worker_unittest.cc',
+        'task_scheduler/scheduler_worker_pool_impl_unittest.cc',
+        'task_scheduler/scheduler_worker_stack_unittest.cc',
+        'task_scheduler/sequence_sort_key_unittest.cc',
+        'task_scheduler/sequence_unittest.cc',
+        'task_scheduler/task_scheduler_impl_unittest.cc',
+        'task_scheduler/task_tracker_unittest.cc',
+        'task_scheduler/test_task_factory.cc',
+        'task_scheduler/test_task_factory.h',
+        'task_scheduler/test_utils.h',
+        'template_util_unittest.cc',
+        'test/histogram_tester_unittest.cc',
+        'test/test_pending_task_unittest.cc',
+        'test/test_reg_util_win_unittest.cc',
+        'test/trace_event_analyzer_unittest.cc',
+        'test/user_action_tester_unittest.cc',
+        'threading/non_thread_safe_unittest.cc',
+        'threading/platform_thread_unittest.cc',
+        'threading/sequenced_worker_pool_unittest.cc',
+        'threading/sequenced_task_runner_handle_unittest.cc',
+        'threading/simple_thread_unittest.cc',
+        'threading/thread_checker_unittest.cc',
+        'threading/thread_collision_warner_unittest.cc',
+        'threading/thread_id_name_manager_unittest.cc',
+        'threading/thread_local_storage_unittest.cc',
+        'threading/thread_local_unittest.cc',
+        'threading/thread_unittest.cc',
+        'threading/watchdog_unittest.cc',
+        'threading/worker_pool_posix_unittest.cc',
+        'threading/worker_pool_unittest.cc',
+        'time/pr_time_unittest.cc',
+        'time/time_unittest.cc',
+        'time/time_win_unittest.cc',
+        'timer/hi_res_timer_manager_unittest.cc',
+        'timer/mock_timer_unittest.cc',
+        'timer/timer_unittest.cc',
+        'tools_sanity_unittest.cc',
+        'tracked_objects_unittest.cc',
+        'tuple_unittest.cc',
+        'values_unittest.cc',
+        'version_unittest.cc',
+        'vlog_unittest.cc',
+        'win/dllmain.cc',
+        'win/enum_variant_unittest.cc',
+        'win/event_trace_consumer_unittest.cc',
+        'win/event_trace_controller_unittest.cc',
+        'win/event_trace_provider_unittest.cc',
+        'win/i18n_unittest.cc',
+        'win/iunknown_impl_unittest.cc',
+        'win/message_window_unittest.cc',
+        'win/object_watcher_unittest.cc',
+        'win/pe_image_unittest.cc',
+        'win/registry_unittest.cc',
+        'win/scoped_bstr_unittest.cc',
+        'win/scoped_comptr_unittest.cc',
+        'win/scoped_handle_unittest.cc',
+        'win/scoped_process_information_unittest.cc',
+        'win/scoped_variant_unittest.cc',
+        'win/shortcut_unittest.cc',
+        'win/startup_information_unittest.cc',
+        'win/wait_chain_unittest.cc',
+        'win/win_util_unittest.cc',
+        'win/windows_version_unittest.cc',
+        'win/wrapped_window_proc_unittest.cc',
+        '<@(trace_event_test_sources)',
+      ],
+      'dependencies': [
+        'base',
+        'base_i18n',
+        'base_message_loop_tests',
+        'base_static',
+        'run_all_unittests',
+        'test_support_base',
+        'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
+        '../testing/gmock.gyp:gmock',
+        '../testing/gtest.gyp:gtest',
+        '../third_party/icu/icu.gyp:icui18n',
+        '../third_party/icu/icu.gyp:icuuc',
+      ],
+      'includes': ['../build/nocompile.gypi'],
+      'variables': {
+         # TODO(ajwong): Is there a way to autodetect this?
+        'module_dir': 'base'
+      },
+      'conditions': [
+        ['cfi_vptr==1 and cfi_cast==1', {
+          'defines': [
+             # TODO(krasin): remove CFI_CAST_CHECK, see https://crbug.com/626794.
+            'CFI_CAST_CHECK',
+          ],
+        }],
+        ['OS == "ios" or OS == "mac"', {
+          'dependencies': [
+            'base_unittests_arc',
+          ],
+        }],
+        ['OS == "android"', {
+          'dependencies': [
+            'android/jni_generator/jni_generator.gyp:jni_generator_tests',
+            '../testing/android/native_test.gyp:native_test_native_code',
+          ],
+        }],
+        ['OS == "ios" and _toolset != "host"', {
+          'sources/': [
+            # This test needs multiple processes.
+            ['exclude', '^files/file_locking_unittest\\.cc$'],
+            # iOS does not support FilePathWatcher.
+            ['exclude', '^files/file_path_watcher_unittest\\.cc$'],
+            # Only test the iOS-meaningful portion of memory and process_utils.
+            ['exclude', '^memory/discardable_shared_memory_unittest\\.cc$'],
+            ['exclude', '^memory/shared_memory_unittest\\.cc$'],
+            ['exclude', '^process/memory_unittest'],
+            ['exclude', '^process/process_unittest\\.cc$'],
+            ['exclude', '^process/process_util_unittest\\.cc$'],
+            ['include', '^process/process_util_unittest_ios\\.cc$'],
+            # iOS does not use message_pump_libevent.
+            ['exclude', '^message_loop/message_pump_libevent_unittest\\.cc$'],
+          ],
+          'actions': [
+            {
+              'action_name': 'copy_test_data',
+              'variables': {
+                'test_data_files': [
+                  'test/data',
+                ],
+                'test_data_prefix': 'base',
+              },
+              'includes': [ '../build/copy_test_data_ios.gypi' ],
+            },
+          ],
+        }],
+        ['desktop_linux == 1 or chromeos == 1', {
+          'defines': [
+            'USE_SYMBOLIZE',
+          ],
+          'conditions': [
+            [ 'desktop_linux==1', {
+              'sources': [
+                'nix/xdg_util_unittest.cc',
+              ],
+            }],
+          ],
+        }],
+        ['use_glib == 1', {
+          'dependencies': [
+            '../build/linux/system.gyp:glib',
+          ],
+        }, {  # use_glib == 0
+          'sources!': [
+            'message_loop/message_pump_glib_unittest.cc',
+          ]
+        }],
+        ['use_ozone == 1', {
+          'sources!': [
+            'message_loop/message_pump_glib_unittest.cc',
+          ]
+        }],
+        ['OS == "linux"', {
+          'dependencies': [
+            'malloc_wrapper',
+          ],
+        }],
+        [ 'OS == "win" and target_arch == "x64"', {
+          'sources': [
+            'profiler/win32_stack_frame_unwinder_unittest.cc',
+          ],
+          'dependencies': [
+            'base_profiler_test_support_library',
+          ],
+        }],
+        ['OS == "win"', {
+          'dependencies': [
+            'scoped_handle_test_dll'
+          ],
+          'sources!': [
+            'file_descriptor_shuffle_unittest.cc',
+            'files/dir_reader_posix_unittest.cc',
+            'message_loop/message_pump_libevent_unittest.cc',
+            'threading/worker_pool_posix_unittest.cc',
+          ],
+          # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+          'msvs_disabled_warnings': [
+            4267,
+          ],
+          'conditions': [
+            ['icu_use_data_file_flag==0', {
+              # This is needed to trigger the dll copy step on windows.
+              # TODO(mark): This should not be necessary.
+              'dependencies': [
+                '../third_party/icu/icu.gyp:icudata',
+              ],
+            }],
+          ],
+        }, {  # OS != "win"
+          'dependencies': [
+            'third_party/libevent/libevent.gyp:libevent'
+          ],
+        }],
+        ['use_experimental_allocator_shim==1', {
+          'sources': [ 'allocator/allocator_shim_unittest.cc']
+        }],
+      ],  # conditions
+      'target_conditions': [
+        ['OS == "ios" and _toolset != "host"', {
+          'sources/': [
+            # Pull in specific Mac files for iOS (which have been filtered out
+            # by file name rules).
+            ['include', '^mac/bind_objc_block_unittest\\.mm$'],
+            ['include', '^mac/foundation_util_unittest\\.mm$',],
+            ['include', '^mac/objc_property_releaser_unittest\\.mm$'],
+            ['include', '^mac/scoped_nsobject_unittest\\.mm$'],
+            ['include', '^sys_string_conversions_mac_unittest\\.mm$'],
+          ],
+        }],
+        ['OS == "android"', {
+          'sources/': [
+            ['include', '^debug/proc_maps_linux_unittest\\.cc$'],
+          ],
+        }],
+        # Enable more direct string conversions on platforms with native utf8
+        # strings
+        ['OS=="mac" or OS=="ios" or <(chromeos)==1 or <(chromecast)==1', {
+          'defines': ['SYSTEM_NATIVE_UTF8'],
+        }],
+        # SyncSocket isn't used on iOS
+        ['OS=="ios"', {
+          'sources!': [
+            'sync_socket_unittest.cc',
+          ],
+        }],
+      ],  # target_conditions
+    },
+    {
+      # GN: //base:base_perftests
+      'target_name': 'base_perftests',
+      'type': '<(gtest_target_type)',
+      'dependencies': [
+        'base',
+        'test_support_base',
+        '../testing/gtest.gyp:gtest',
+      ],
+      'sources': [
+        'message_loop/message_pump_perftest.cc',
+        'test/run_all_unittests.cc',
+        'threading/thread_perftest.cc',
+        '../testing/perf/perf_test.cc'
+      ],
+      'conditions': [
+        ['OS == "android"', {
+          'dependencies': [
+            '../testing/android/native_test.gyp:native_test_native_code',
+          ],
+        }],
+      ],
+    },
+    {
+      # GN: //base:base_i18n_perftests
+      'target_name': 'base_i18n_perftests',
+      'type': '<(gtest_target_type)',
+      'dependencies': [
+        'test_support_base',
+        'test_support_perf',
+        '../testing/gtest.gyp:gtest',
+        'base_i18n',
+        'base',
+      ],
+      'sources': [
+        'i18n/streaming_utf8_validator_perftest.cc',
+      ],
+    },
+    {
+      # GN: //base/test:test_support
+      'target_name': 'test_support_base',
+      'type': 'static_library',
+      'dependencies': [
+        'base',
+        'base_static',
+        'base_i18n',
+        '../testing/gmock.gyp:gmock',
+        '../testing/gtest.gyp:gtest',
+        '../third_party/icu/icu.gyp:icuuc',
+        '../third_party/libxml/libxml.gyp:libxml',
+        'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
+      ],
+      'export_dependent_settings': [
+        'base',
+      ],
+      'conditions': [
+        ['os_posix==0', {
+          'sources!': [
+            'test/scoped_locale.cc',
+            'test/scoped_locale.h',
+          ],
+        }],
+        ['os_bsd==1', {
+          'sources!': [
+            'test/test_file_util_linux.cc',
+          ],
+        }],
+        ['OS == "android"', {
+          'dependencies': [
+            'base_unittests_jni_headers',
+            'base_java_unittest_support',
+          ],
+        }],
+        ['OS == "ios"', {
+          'toolsets': ['host', 'target'],
+        }],
+      ],
+      'sources': [
+        'test/gtest_util.cc',
+        'test/gtest_util.h',
+        'test/gtest_xml_unittest_result_printer.cc',
+        'test/gtest_xml_unittest_result_printer.h',
+        'test/gtest_xml_util.cc',
+        'test/gtest_xml_util.h',
+        'test/histogram_tester.cc',
+        'test/histogram_tester.h',
+        'test/icu_test_util.cc',
+        'test/icu_test_util.h',
+        'test/ios/wait_util.h',
+        'test/ios/wait_util.mm',
+        'test/launcher/test_launcher.cc',
+        'test/launcher/test_launcher.h',
+        'test/launcher/test_launcher_tracer.cc',
+        'test/launcher/test_launcher_tracer.h',
+        'test/launcher/test_result.cc',
+        'test/launcher/test_result.h',
+        'test/launcher/test_results_tracker.cc',
+        'test/launcher/test_results_tracker.h',
+        'test/launcher/unit_test_launcher.cc',
+        'test/launcher/unit_test_launcher.h',
+        'test/launcher/unit_test_launcher_ios.cc',
+        'test/mock_chrome_application_mac.h',
+        'test/mock_chrome_application_mac.mm',
+        'test/mock_devices_changed_observer.cc',
+        'test/mock_devices_changed_observer.h',
+        'test/mock_entropy_provider.cc',
+        'test/mock_entropy_provider.h',
+        'test/mock_log.cc',
+        'test/mock_log.h',
+        'test/multiprocess_test.cc',
+        'test/multiprocess_test.h',
+        'test/multiprocess_test_android.cc',
+        'test/null_task_runner.cc',
+        'test/null_task_runner.h',
+        'test/opaque_ref_counted.cc',
+        'test/opaque_ref_counted.h',
+        'test/perf_log.cc',
+        'test/perf_log.h',
+        'test/perf_test_suite.cc',
+        'test/perf_test_suite.h',
+        'test/perf_time_logger.cc',
+        'test/perf_time_logger.h',
+        'test/power_monitor_test_base.cc',
+        'test/power_monitor_test_base.h',
+        'test/scoped_command_line.cc',
+        'test/scoped_command_line.h',
+        'test/scoped_locale.cc',
+        'test/scoped_locale.h',
+        'test/scoped_path_override.cc',
+        'test/scoped_path_override.h',
+        'test/sequenced_task_runner_test_template.cc',
+        'test/sequenced_task_runner_test_template.h',
+        'test/sequenced_worker_pool_owner.cc',
+        'test/sequenced_worker_pool_owner.h',
+        'test/simple_test_clock.cc',
+        'test/simple_test_clock.h',
+        'test/simple_test_tick_clock.cc',
+        'test/simple_test_tick_clock.h',
+        'test/task_runner_test_template.cc',
+        'test/task_runner_test_template.h',
+        'test/test_discardable_memory_allocator.cc',
+        'test/test_discardable_memory_allocator.h',
+        'test/test_file_util.cc',
+        'test/test_file_util.h',
+        'test/test_file_util_android.cc',
+        'test/test_file_util_linux.cc',
+        'test/test_file_util_mac.cc',
+        'test/test_file_util_posix.cc',
+        'test/test_file_util_win.cc',
+        'test/test_io_thread.cc',
+        'test/test_io_thread.h',
+        'test/test_listener_ios.h',
+        'test/test_listener_ios.mm',
+        'test/test_message_loop.cc',
+        'test/test_message_loop.h',
+        'test/test_mock_time_task_runner.cc',
+        'test/test_mock_time_task_runner.h',
+        'test/test_pending_task.cc',
+        'test/test_pending_task.h',
+        'test/test_reg_util_win.cc',
+        'test/test_reg_util_win.h',
+        'test/test_shortcut_win.cc',
+        'test/test_shortcut_win.h',
+        'test/test_simple_task_runner.cc',
+        'test/test_simple_task_runner.h',
+        'test/test_suite.cc',
+        'test/test_suite.h',
+        'test/test_support_android.cc',
+        'test/test_support_android.h',
+        'test/test_support_ios.h',
+        'test/test_support_ios.mm',
+        'test/test_switches.cc',
+        'test/test_switches.h',
+        'test/test_timeouts.cc',
+        'test/test_timeouts.h',
+        'test/test_ui_thread_android.cc',
+        'test/test_ui_thread_android.h',
+        'test/thread_test_helper.cc',
+        'test/thread_test_helper.h',
+        'test/trace_event_analyzer.cc',
+        'test/trace_event_analyzer.h',
+        'test/trace_to_file.cc',
+        'test/trace_to_file.h',
+        'test/user_action_tester.cc',
+        'test/user_action_tester.h',
+        'test/values_test_util.cc',
+        'test/values_test_util.h',
+      ],
+      'target_conditions': [
+        ['OS == "ios"', {
+          'sources/': [
+            # Pull in specific Mac files for iOS (which have been filtered out
+            # by file name rules).
+            ['include', '^test/test_file_util_mac\\.cc$'],
+          ],
+        }],
+        ['OS == "ios" and _toolset == "target"', {
+          'sources!': [
+            # iOS uses its own unit test launcher.
+            'test/launcher/unit_test_launcher.cc',
+          ],
+        }],
+        ['OS == "ios" and _toolset == "host"', {
+          'sources!': [
+            'test/launcher/unit_test_launcher_ios.cc',
+            'test/test_support_ios.h',
+            'test/test_support_ios.mm',
+          ],
+        }],
+      ],  # target_conditions
+    },
+    {
+      'target_name': 'test_support_perf',
+      'type': 'static_library',
+      'dependencies': [
+        'base',
+        'test_support_base',
+        '../testing/gtest.gyp:gtest',
+      ],
+      'sources': [
+        'test/run_all_perftests.cc',
+      ],
+      'direct_dependent_settings': {
+        'defines': [
+          'PERF_TEST',
+        ],
+      },
+    },
+    {
+      'target_name': 'test_launcher_nacl_nonsfi',
+      'conditions': [
+        ['disable_nacl==0 and disable_nacl_untrusted==0 and enable_nacl_nonsfi_test==1', {
+          'type': 'static_library',
+          'sources': [
+            'test/launcher/test_launcher_nacl_nonsfi.cc',
+          ],
+          'dependencies': [
+            'test_support_base',
+          ],
+        }, {
+          'type': 'none',
+        }],
+      ],
+    },
+    {
+      # GN version: //base/debug:debugging_flags
+      # Since this generates a file, it must only be referenced in the target
+      # toolchain or there will be multiple rules that generate the header.
+      # When referenced from a target that might be compiled in the host
+      # toolchain, always refer to 'base_debugging_flags#target'.
+      'target_name': 'base_debugging_flags',
+      'includes': [ '../build/buildflag_header.gypi' ],
+      'variables': {
+        'buildflag_header_path': 'base/debug/debugging_flags.h',
+        'buildflag_flags': [
+          'ENABLE_PROFILING=<(profiling)',
+        ],
+      },
+    },
+    {
+      # GN version: //base/win:base_win_features
+      # Since this generates a file, it must only be referenced in the target
+      # toolchain or there will be multiple rules that generate the header.
+      # When referenced from a target that might be compiled in the host
+      # toolchain, always refer to 'base_win_features#target'.
+      'target_name': 'base_win_features',
+      'conditions': [
+        ['OS=="win"', {
+          'includes': [ '../build/buildflag_header.gypi' ],
+          'variables': {
+            'buildflag_header_path': 'base/win/base_features.h',
+            'buildflag_flags': [
+              'SINGLE_MODULE_MODE_HANDLE_VERIFIER=<(single_module_mode_handle_verifier)',
+            ],
+          },
+        }, {
+          'type': 'none',
+        }],
+      ],
+    },
+    {
+      'type': 'none',
+      'target_name': 'base_build_date',
+      'hard_dependency': 1,
+      'actions': [{
+        'action_name': 'generate_build_date_headers',
+        'inputs': [
+          '<(DEPTH)/build/write_build_date_header.py',
+          '<(DEPTH)/build/util/LASTCHANGE'
+        ],
+        'outputs': [ '<(SHARED_INTERMEDIATE_DIR)/base/generated_build_date.h' ],
+        'action': [
+          'python', '<(DEPTH)/build/write_build_date_header.py',
+          '<(SHARED_INTERMEDIATE_DIR)/base/generated_build_date.h',
+          '<(build_type)'
+        ]
+      }],
+      'conditions': [
+        [ 'buildtype == "Official"', {
+          'variables': {
+            'build_type': 'official'
+          }
+        }, {
+          'variables': {
+            'build_type': 'default'
+          }
+        }],
+      ]
+    },
+  ],
+  'conditions': [
+    ['OS=="ios" and "<(GENERATOR)"=="ninja"', {
+      'targets': [
+        {
+          'target_name': 'test_launcher',
+          'toolsets': ['host'],
+          'type': 'executable',
+          'dependencies': [
+            'test_support_base',
+          ],
+          'sources': [
+            'test/launcher/test_launcher_ios.cc',
+          ],
+        },
+      ],
+    }],
+    ['OS!="ios"', {
+      'targets': [
+        {
+          # GN: //base:check_example
+          'target_name': 'check_example',
+          'type': 'executable',
+          'sources': [
+            'check_example.cc',
+          ],
+          'dependencies': [
+            'base',
+          ],
+        },
+        {
+          'target_name': 'build_utf8_validator_tables',
+          'type': 'executable',
+          'toolsets': ['host'],
+          'dependencies': [
+            'base',
+            '../third_party/icu/icu.gyp:icuuc',
+          ],
+          'sources': [
+            'i18n/build_utf8_validator_tables.cc'
+          ],
+        },
+      ],
+    }],
+    ['OS == "win" and target_arch=="ia32"', {
+      'targets': [
+        # The base_win64 target here allows us to use base for Win64 targets
+        # (the normal build is 32 bits).
+        {
+          'target_name': 'base_win64',
+          'type': '<(component)',
+          'variables': {
+            'base_target': 1,
+          },
+          'dependencies': [
+            'base_build_date',
+            'base_debugging_flags#target',
+            'base_static_win64',
+            '../third_party/modp_b64/modp_b64.gyp:modp_b64_win64',
+            'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations_win64',
+            'trace_event/etw_manifest/etw_manifest.gyp:etw_manifest',
+          ],
+          # TODO(gregoryd): direct_dependent_settings should be shared with the
+          # 32-bit target, but it doesn't work due to a bug in gyp
+          'direct_dependent_settings': {
+            'include_dirs': [
+              '..',
+            ],
+          },
+          'defines': [
+            'BASE_WIN64',
+            '<@(nacl_win64_defines)',
+          ],
+          'configurations': {
+            'Common_Base': {
+              'msvs_target_platform': 'x64',
+            },
+          },
+          'conditions': [
+            ['component == "shared_library"', {
+              'sources!': [
+                'debug/debug_on_start_win.cc',
+              ],
+            }],
+          ],
+          # Specify delayload for base_win64.dll.
+          'msvs_settings': {
+            'VCLinkerTool': {
+              'DelayLoadDLLs': [
+                'cfgmgr32.dll',
+                'powrprof.dll',
+                'setupapi.dll',
+              ],
+              'AdditionalDependencies': [
+                'cfgmgr32.lib',
+                'powrprof.lib',
+                'setupapi.lib',
+                'userenv.lib',
+                'winmm.lib',
+              ],
+            },
+          },
+          # Specify delayload for components that link with base_win64.lib.
+          'all_dependent_settings': {
+            'msvs_settings': {
+              'VCLinkerTool': {
+                'DelayLoadDLLs': [
+                  'cfgmgr32.dll',
+                  'powrprof.dll',
+                  'setupapi.dll',
+                ],
+                'AdditionalDependencies': [
+                  'cfgmgr32.lib',
+                  'powrprof.lib',
+                  'setupapi.lib',
+                  'userenv.lib',
+                  'winmm.lib',
+                ],
+              },
+            },
+          },
+          # TODO(rvargas): Bug 78117. Remove this.
+          'msvs_disabled_warnings': [
+            4244,
+            4996,
+            4267,
+          ],
+          'sources': [
+            'auto_reset.h',
+            'linux_util.cc',
+            'linux_util.h',
+            'md5.cc',
+            'md5.h',
+            'message_loop/message_pump_libevent.cc',
+            'message_loop/message_pump_libevent.h',
+            'metrics/field_trial.cc',
+            'metrics/field_trial.h',
+            'posix/file_descriptor_shuffle.cc',
+            'posix/file_descriptor_shuffle.h',
+            'sync_socket.h',
+            'sync_socket_posix.cc',
+            'sync_socket_win.cc',
+            'third_party/xdg_user_dirs/xdg_user_dir_lookup.cc',
+            'third_party/xdg_user_dirs/xdg_user_dir_lookup.h',
+          ],
+        },
+        {
+          'target_name': 'base_i18n_nacl_win64',
+          'type': '<(component)',
+          # TODO(gregoryd): direct_dependent_settings should be shared with the
+          # 32-bit target, but it doesn't work due to a bug in gyp
+          'direct_dependent_settings': {
+            'include_dirs': [
+              '..',
+            ],
+          },
+          'defines': [
+            '<@(nacl_win64_defines)',
+            'BASE_I18N_IMPLEMENTATION',
+          ],
+          'include_dirs': [
+            '..',
+          ],
+          'sources': [
+            'i18n/icu_util_nacl_win64.cc',
+          ],
+          'configurations': {
+            'Common_Base': {
+              'msvs_target_platform': 'x64',
+            },
+          },
+        },
+        {
+          # TODO(rvargas): Remove this when gyp finally supports a clean model.
+          # See bug 36232.
+          'target_name': 'base_static_win64',
+          'type': 'static_library',
+          'sources': [
+            'base_switches.cc',
+            'base_switches.h',
+            'win/pe_image.cc',
+            'win/pe_image.h',
+          ],
+          'sources!': [
+            # base64.cc depends on modp_b64.
+            'base64.cc',
+          ],
+          'include_dirs': [
+            '..',
+          ],
+          'configurations': {
+            'Common_Base': {
+              'msvs_target_platform': 'x64',
+            },
+          },
+          'defines': [
+            '<@(nacl_win64_defines)',
+          ],
+          # TODO(rvargas): Bug 78117. Remove this.
+          'msvs_disabled_warnings': [
+            4244,
+          ],
+        },
+      ],
+    }],
+    ['OS == "win" and target_arch=="x64"', {
+      'targets': [
+        {
+          'target_name': 'base_profiler_test_support_library',
+          # Must be a shared library so that it can be unloaded during testing.
+          'type': 'shared_library',
+          'include_dirs': [
+            '..',
+          ],
+          'sources': [
+            'profiler/test_support_library.cc',
+          ],
+        },
+      ]
+    }],
+    ['os_posix==1 and OS!="mac" and OS!="ios"', {
+      'targets': [
+        {
+          'target_name': 'symbolize',
+          'type': 'static_library',
+          'toolsets': ['host', 'target'],
+          'variables': {
+            'chromium_code': 0,
+          },
+          'conditions': [
+            ['OS == "solaris"', {
+              'include_dirs': [
+                '/usr/gnu/include',
+                '/usr/gnu/include/libelf',
+              ],
+            },],
+          ],
+          'cflags': [
+            '-Wno-sign-compare',
+          ],
+          'cflags!': [
+            '-Wextra',
+          ],
+          'defines': [
+            'GLOG_BUILD_CONFIG_INCLUDE="build/build_config.h"',
+          ],
+          'sources': [
+            'third_party/symbolize/config.h',
+            'third_party/symbolize/demangle.cc',
+            'third_party/symbolize/demangle.h',
+            'third_party/symbolize/glog/logging.h',
+            'third_party/symbolize/glog/raw_logging.h',
+            'third_party/symbolize/symbolize.cc',
+            'third_party/symbolize/symbolize.h',
+            'third_party/symbolize/utilities.h',
+          ],
+          'include_dirs': [
+            '..',
+          ],
+          'includes': [
+            '../build/android/increase_size_for_speed.gypi',
+          ],
+        },
+        {
+          'target_name': 'xdg_mime',
+          'type': 'static_library',
+          'toolsets': ['host', 'target'],
+          'variables': {
+            'chromium_code': 0,
+          },
+          'cflags!': [
+            '-Wextra',
+          ],
+          'sources': [
+            'third_party/xdg_mime/xdgmime.c',
+            'third_party/xdg_mime/xdgmime.h',
+            'third_party/xdg_mime/xdgmimealias.c',
+            'third_party/xdg_mime/xdgmimealias.h',
+            'third_party/xdg_mime/xdgmimecache.c',
+            'third_party/xdg_mime/xdgmimecache.h',
+            'third_party/xdg_mime/xdgmimeglob.c',
+            'third_party/xdg_mime/xdgmimeglob.h',
+            'third_party/xdg_mime/xdgmimeicon.c',
+            'third_party/xdg_mime/xdgmimeicon.h',
+            'third_party/xdg_mime/xdgmimeint.c',
+            'third_party/xdg_mime/xdgmimeint.h',
+            'third_party/xdg_mime/xdgmimemagic.c',
+            'third_party/xdg_mime/xdgmimemagic.h',
+            'third_party/xdg_mime/xdgmimeparent.c',
+            'third_party/xdg_mime/xdgmimeparent.h',
+          ],
+          'includes': [
+            '../build/android/increase_size_for_speed.gypi',
+          ],
+        },
+      ],
+    }],
+    ['OS == "linux"', {
+      'targets': [
+        {
+          'target_name': 'malloc_wrapper',
+          'type': 'shared_library',
+          'dependencies': [
+            'base',
+          ],
+          'sources': [
+            'test/malloc_wrapper.cc',
+          ],
+        }
+      ],
+    }],
+    ['OS == "android"', {
+      'targets': [
+        {
+          # GN: //base:base_jni_headers
+          'target_name': 'base_jni_headers',
+          'type': 'none',
+          'sources': [
+            'android/java/src/org/chromium/base/ApkAssets.java',
+            'android/java/src/org/chromium/base/ApplicationStatus.java',
+            'android/java/src/org/chromium/base/AnimationFrameTimeHistogram.java',
+            'android/java/src/org/chromium/base/BuildInfo.java',
+            'android/java/src/org/chromium/base/Callback.java',
+            'android/java/src/org/chromium/base/CommandLine.java',
+            'android/java/src/org/chromium/base/ContentUriUtils.java',
+            'android/java/src/org/chromium/base/ContextUtils.java',
+            'android/java/src/org/chromium/base/CpuFeatures.java',
+            'android/java/src/org/chromium/base/EventLog.java',
+            'android/java/src/org/chromium/base/FieldTrialList.java',
+            'android/java/src/org/chromium/base/ImportantFileWriterAndroid.java',
+            'android/java/src/org/chromium/base/JNIUtils.java',
+            'android/java/src/org/chromium/base/JavaHandlerThread.java',
+            'android/java/src/org/chromium/base/LocaleUtils.java',
+            'android/java/src/org/chromium/base/MemoryPressureListener.java',
+            'android/java/src/org/chromium/base/PathService.java',
+            'android/java/src/org/chromium/base/PathUtils.java',
+            'android/java/src/org/chromium/base/PowerMonitor.java',
+            'android/java/src/org/chromium/base/SysUtils.java',
+            'android/java/src/org/chromium/base/SystemMessageHandler.java',
+            'android/java/src/org/chromium/base/ThreadUtils.java',
+            'android/java/src/org/chromium/base/TraceEvent.java',
+            'android/java/src/org/chromium/base/library_loader/LibraryLoader.java',
+            'android/java/src/org/chromium/base/metrics/RecordHistogram.java',
+            'android/java/src/org/chromium/base/metrics/RecordUserAction.java',
+          ],
+          'variables': {
+            'jni_gen_package': 'base',
+          },
+          'dependencies': [
+            'android_runtime_jni_headers',
+          ],
+          'includes': [ '../build/jni_generator.gypi' ],
+        },
+        {
+          # GN: //base:android_runtime_jni_headers
+          'target_name': 'android_runtime_jni_headers',
+          'type': 'none',
+          'variables': {
+            'jni_gen_package': 'base',
+            'input_java_class': 'java/lang/Runtime.class',
+          },
+          'includes': [ '../build/jar_file_jni_generator.gypi' ],
+        },
+        {
+          # GN: //base:base_unittests_jni_headers
+          'target_name': 'base_unittests_jni_headers',
+          'type': 'none',
+          'sources': [
+            'test/android/java/src/org/chromium/base/ContentUriTestUtils.java',
+            'test/android/java/src/org/chromium/base/TestUiThread.java',
+          ],
+          'variables': {
+            'jni_gen_package': 'base',
+          },
+          'includes': [ '../build/jni_generator.gypi' ],
+        },
+        {
+          # GN: //base:base_native_libraries_gen
+          'target_name': 'base_native_libraries_gen',
+          'type': 'none',
+          'sources': [
+            'android/java/templates/NativeLibraries.template',
+          ],
+          'variables': {
+            'package_name': 'org/chromium/base/library_loader',
+            'template_deps': [],
+          },
+          'includes': [ '../build/android/java_cpp_template.gypi' ],
+        },
+        {
+          # GN: //base:base_build_config_gen
+          'target_name': 'base_build_config_gen',
+          'type': 'none',
+          'sources': [
+            'android/java/templates/BuildConfig.template',
+          ],
+          'variables': {
+            'package_name': 'org/chromium/base',
+            'template_deps': [],
+          },
+          'includes': ['../build/android/java_cpp_template.gypi'],
+        },
+        {
+          # GN: //base:base_android_java_enums_srcjar
+          'target_name': 'base_java_library_process_type',
+          'type': 'none',
+          'variables': {
+            'source_file': 'android/library_loader/library_loader_hooks.h',
+          },
+          'includes': [ '../build/android/java_cpp_enum.gypi' ],
+        },
+        {
+          # GN: //base:base_java
+          'target_name': 'base_java',
+          'type': 'none',
+          'variables': {
+            'java_in_dir': 'android/java',
+            'jar_excluded_classes': [
+              '*/BuildConfig.class',
+              '*/NativeLibraries.class',
+            ],
+          },
+          'dependencies': [
+            'base_java_application_state',
+            'base_java_library_load_from_apk_status_codes',
+            'base_java_library_process_type',
+            'base_java_memory_pressure_level',
+            'base_build_config_gen',
+            'base_native_libraries_gen',
+            '../third_party/android_tools/android_tools.gyp:android_support_multidex_javalib',
+            '../third_party/jsr-305/jsr-305.gyp:jsr_305_javalib',
+          ],
+          'all_dependent_settings': {
+            'variables': {
+              'generate_build_config': 1,
+            },
+          },
+          'includes': [ '../build/java.gypi' ],
+        },
+        {
+          # GN: //base:base_java_unittest_support
+          'target_name': 'base_java_unittest_support',
+          'type': 'none',
+          'dependencies': [
+            'base_java',
+          ],
+          'variables': {
+            'java_in_dir': '../base/test/android/java',
+          },
+          'includes': [ '../build/java.gypi' ],
+        },
+        {
+          # GN: //base:base_android_java_enums_srcjar
+          'target_name': 'base_java_application_state',
+          'type': 'none',
+          'variables': {
+            'source_file': 'android/application_status_listener.h',
+          },
+          'includes': [ '../build/android/java_cpp_enum.gypi' ],
+        },
+        {
+          # GN: //base:base_android_java_enums_srcjar
+          'target_name': 'base_java_library_load_from_apk_status_codes',
+          'type': 'none',
+          'variables': {
+            'source_file': 'android/library_loader/library_load_from_apk_status_codes.h'
+          },
+          'includes': [ '../build/android/java_cpp_enum.gypi' ],
+        },
+        {
+          # GN: //base:base_android_java_enums_srcjar
+          'target_name': 'base_java_memory_pressure_level',
+          'type': 'none',
+          'variables': {
+            'source_file': 'memory/memory_pressure_listener.h',
+          },
+          'includes': [ '../build/android/java_cpp_enum.gypi' ],
+        },
+        {
+          # GN: //base:base_java_test_support
+          'target_name': 'base_java_test_support',
+          'type': 'none',
+          'dependencies': [
+            'base_java',
+            '../testing/android/on_device_instrumentation.gyp:reporter_java',
+          ],
+          'variables': {
+            'java_in_dir': '../base/test/android/javatests',
+          },
+          'includes': [ '../build/java.gypi' ],
+        },
+        {
+          # TODO(jbudorick): Remove this once we roll to robolectric 3.0 and pull
+          # in the multidex shadow library. crbug.com/522043
+          # GN: //base:base_junit_test_support
+          'target_name': 'base_junit_test_support',
+          'type': 'none',
+          'dependencies': [
+            'base_build_config_gen',
+            '../testing/android/junit/junit_test.gyp:junit_test_support',
+            '../third_party/android_tools/android_tools.gyp:android_support_multidex_javalib',
+          ],
+          'variables': {
+            'src_paths': [
+              '../base/test/android/junit/src/org/chromium/base/test/shadows/ShadowMultiDex.java',
+            ],
+          },
+          'includes': [ '../build/host_jar.gypi' ]
+        },
+        {
+          # GN: //base:base_junit_tests
+          'target_name': 'base_junit_tests',
+          'type': 'none',
+          'dependencies': [
+            'base_java',
+            'base_java_test_support',
+            'base_junit_test_support',
+            '../testing/android/junit/junit_test.gyp:junit_test_support',
+          ],
+          'variables': {
+            'main_class': 'org.chromium.testing.local.JunitTestMain',
+            'src_paths': [
+              '../base/android/junit/',
+              '../base/test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java',
+              '../base/test/android/junit/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheckTest.java',
+              '../base/test/android/junit/src/org/chromium/base/test/util/RestrictionSkipCheckTest.java',
+              '../base/test/android/junit/src/org/chromium/base/test/util/SkipCheckTest.java',
+            ],
+            'test_type': 'junit',
+            'wrapper_script_name': 'helper/<(_target_name)',
+          },
+          'includes': [
+            '../build/android/test_runner.gypi',
+            '../build/host_jar.gypi',
+          ],
+        },
+        {
+          # GN: //base:base_javatests
+          'target_name': 'base_javatests',
+          'type': 'none',
+          'dependencies': [
+            'base_java',
+            'base_java_test_support',
+          ],
+          'variables': {
+            'java_in_dir': '../base/android/javatests',
+          },
+          'includes': [ '../build/java.gypi' ],
+        },
+        {
+          # GN: //base/android/linker:chromium_android_linker
+          'target_name': 'chromium_android_linker',
+          'type': 'shared_library',
+          'sources': [
+            'android/linker/android_dlext.h',
+            'android/linker/legacy_linker_jni.cc',
+            'android/linker/legacy_linker_jni.h',
+            'android/linker/linker_jni.cc',
+            'android/linker/linker_jni.h',
+            'android/linker/modern_linker_jni.cc',
+            'android/linker/modern_linker_jni.h',
+          ],
+          # The crazy linker is never instrumented.
+          'cflags!': [
+            '-finstrument-functions',
+          ],
+          'dependencies': [
+            # The NDK contains the crazy_linker here:
+            #   '<(android_ndk_root)/crazy_linker.gyp:crazy_linker'
+            # However, we use our own fork.  See bug 384700.
+            '../third_party/android_crazy_linker/crazy_linker.gyp:crazy_linker',
+          ],
+        },
+        {
+          # GN: //base:base_perftests_apk
+          'target_name': 'base_perftests_apk',
+          'type': 'none',
+          'dependencies': [
+            'base_perftests',
+          ],
+          'variables': {
+            'test_suite_name': 'base_perftests',
+          },
+          'includes': [ '../build/apk_test.gypi' ],
+        },
+        {
+          # GN: //base:base_unittests_apk
+          'target_name': 'base_unittests_apk',
+          'type': 'none',
+          'dependencies': [
+            'base_java',
+            'base_unittests',
+          ],
+          'variables': {
+            'test_suite_name': 'base_unittests',
+            'isolate_file': 'base_unittests.isolate',
+          },
+          'includes': [ '../build/apk_test.gypi' ],
+        },
+      ],
+      'conditions': [
+        ['test_isolation_mode != "noop"',
+          {
+            'targets': [
+              {
+                'target_name': 'base_unittests_apk_run',
+                'type': 'none',
+                'dependencies': [
+                  'base_unittests_apk',
+                ],
+                'includes': [
+                  '../build/isolate.gypi',
+                ],
+                'sources': [
+                  'base_unittests_apk.isolate',
+                ],
+              },
+            ]
+          }
+        ],
+      ],
+    }],
+    ['OS == "win"', {
+      'targets': [
+        {
+          # Target to manually rebuild pe_image_test.dll which is checked into
+          # base/test/data/pe_image.
+          'target_name': 'pe_image_test',
+          'type': 'shared_library',
+          'sources': [
+            'win/pe_image_test.cc',
+          ],
+          'msvs_settings': {
+            'VCLinkerTool': {
+              'SubSystem': '2',         # Set /SUBSYSTEM:WINDOWS
+              'DelayLoadDLLs': [
+                'cfgmgr32.dll',
+                'shell32.dll',
+              ],
+              'AdditionalDependencies': [
+                'cfgmgr32.lib',
+                'shell32.lib',
+              ],
+            },
+          },
+        },
+        {
+          'target_name': 'scoped_handle_test_dll',
+          'type': 'loadable_module',
+          'dependencies': [
+            'base',
+          ],
+          'sources': [
+            'win/scoped_handle_test_dll.cc',
+          ],
+        },
+      ],
+    }],
+    ['test_isolation_mode != "noop"', {
+      'targets': [
+        {
+          'target_name': 'base_unittests_run',
+          'type': 'none',
+          'dependencies': [
+            'base_unittests',
+          ],
+          'includes': [
+            '../build/isolate.gypi',
+          ],
+          'sources': [
+            'base_unittests.isolate',
+          ],
+        },
+      ],
+    }],
+    ['OS == "ios" or OS == "mac"', {
+      'targets': [
+        {
+          'target_name': 'base_unittests_arc',
+          'type': 'static_library',
+          'dependencies': [
+            'base',
+            '../testing/gtest.gyp:gtest',
+          ],
+          'sources': [
+            'mac/bind_objc_block_unittest_arc.mm',
+            'mac/scoped_nsobject_unittest_arc.mm'
+          ],
+          'xcode_settings': {
+            'CLANG_ENABLE_OBJC_ARC': 'YES',
+          },
+          'target_conditions': [
+            ['OS == "ios" and _toolset != "host"', {
+              'sources/': [
+                ['include', 'mac/bind_objc_block_unittest_arc\\.mm$'],
+                ['include', 'mac/scoped_nsobject_unittest_arc\\.mm$'],
+              ],
+            }]
+          ],
+        },
+      ],
+    }],
+  ],
+}
diff --git a/base/base.gypi b/base/base.gypi
new file mode 100644
index 0000000..cb41e79
--- /dev/null
+++ b/base/base.gypi
@@ -0,0 +1,1106 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'includes': [
+    'trace_event/trace_event.gypi',
+  ],
+  'target_defaults': {
+    'variables': {
+      'base_target': 0,
+      'base_i18n_target': 0,
+    },
+    'target_conditions': [
+      # This part is shared between the targets defined below.
+      ['base_target==1', {
+        'sources': [
+          '../build/build_config.h',
+          'allocator/allocator_check.cc',
+          'allocator/allocator_check.h',
+          'allocator/allocator_extension.cc',
+          'allocator/allocator_extension.h',
+          'android/animation_frame_time_histogram.cc',
+          'android/animation_frame_time_histogram.h',
+          'android/apk_assets.cc',
+          'android/apk_assets.h',
+          'android/application_status_listener.cc',
+          'android/application_status_listener.h',
+          'android/base_jni_onload.cc',
+          'android/base_jni_onload.h',
+          'android/base_jni_registrar.cc',
+          'android/base_jni_registrar.h',
+          'android/build_info.cc',
+          'android/build_info.h',
+          'android/callback_android.cc',
+          'android/callback_android.h',
+          'android/command_line_android.cc',
+          'android/command_line_android.h',
+          'android/content_uri_utils.cc',
+          'android/content_uri_utils.h',
+          'android/context_utils.cc',
+          'android/context_utils.h',
+          'android/cpu_features.cc',
+          'android/cxa_demangle_stub.cc',
+          'android/event_log.cc',
+          'android/event_log.h',
+          'android/field_trial_list.cc',
+          'android/field_trial_list.h',
+          'android/fifo_utils.cc',
+          'android/fifo_utils.h',
+          'android/important_file_writer_android.cc',
+          'android/important_file_writer_android.h',
+          'android/java_handler_thread.cc',
+          'android/java_handler_thread.h',
+          'android/java_runtime.cc',
+          'android/java_runtime.h',
+          'android/jni_android.cc',
+          'android/jni_android.h',
+          'android/jni_array.cc',
+          'android/jni_array.h',
+          'android/jni_registrar.cc',
+          'android/jni_registrar.h',
+          'android/jni_string.cc',
+          'android/jni_string.h',
+          'android/jni_utils.cc',
+          'android/jni_utils.h',
+          'android/jni_weak_ref.cc',
+          'android/jni_weak_ref.h',
+          'android/library_loader/library_load_from_apk_status_codes.h',
+          'android/library_loader/library_loader_hooks.cc',
+          'android/library_loader/library_loader_hooks.h',
+          'android/library_loader/library_prefetcher.cc',
+          'android/library_loader/library_prefetcher.h',
+          'android/locale_utils.cc',
+          'android/locale_utils.h',
+          'android/memory_pressure_listener_android.cc',
+          'android/memory_pressure_listener_android.h',
+          'android/path_service_android.cc',
+          'android/path_service_android.h',
+          'android/path_utils.cc',
+          'android/path_utils.h',
+          'android/record_histogram.cc',
+          'android/record_histogram.h',
+          'android/record_user_action.cc',
+          'android/record_user_action.h',
+          'android/scoped_java_ref.cc',
+          'android/scoped_java_ref.h',
+          'android/sys_utils.cc',
+          'android/sys_utils.h',
+          'android/thread_utils.h',
+          'android/trace_event_binding.cc',
+          'android/trace_event_binding.h',
+          'at_exit.cc',
+          'at_exit.h',
+          'atomic_ref_count.h',
+          'atomic_sequence_num.h',
+          'atomicops.h',
+          'atomicops_internals_portable.h',
+          'atomicops_internals_x86_msvc.h',
+          'barrier_closure.cc',
+          'barrier_closure.h',
+          'base64.cc',
+          'base64.h',
+          'base64url.cc',
+          'base64url.h',
+          'base_export.h',
+          'base_paths.cc',
+          'base_paths.h',
+          'base_paths_android.cc',
+          'base_paths_android.h',
+          'base_paths_mac.h',
+          'base_paths_mac.mm',
+          'base_paths_posix.cc',
+          'base_paths_posix.h',
+          'base_paths_win.cc',
+          'base_paths_win.h',
+          'base_switches.h',
+          'big_endian.cc',
+          'big_endian.h',
+          'bind.h',
+          'bind_helpers.cc',
+          'bind_helpers.h',
+          'bind_internal.h',
+          'bit_cast.h',
+          'bits.h',
+          'build_time.cc',
+          'build_time.h',
+          'callback.h',
+          'callback_helpers.cc',
+          'callback_helpers.h',
+          'callback_internal.cc',
+          'callback_internal.h',
+          'callback_list.h',
+          'cancelable_callback.h',
+          'command_line.cc',
+          'command_line.h',
+          'compiler_specific.h',
+          'containers/adapters.h',
+          'containers/hash_tables.h',
+          'containers/linked_list.h',
+          'containers/mru_cache.h',
+          'containers/scoped_ptr_hash_map.h',
+          'containers/small_map.h',
+          'containers/stack_container.h',
+          'cpu.cc',
+          'cpu.h',
+          'critical_closure.h',
+          'critical_closure_internal_ios.mm',
+          'debug/alias.cc',
+          'debug/alias.h',
+          'debug/asan_invalid_access.cc',
+          'debug/asan_invalid_access.h',
+          'debug/close_handle_hook_win.cc',
+          'debug/close_handle_hook_win.h',
+          'debug/crash_logging.cc',
+          'debug/crash_logging.h',
+          'debug/debugger.cc',
+          'debug/debugger.h',
+          'debug/debugger_posix.cc',
+          'debug/debugger_win.cc',
+          'debug/dump_without_crashing.cc',
+          'debug/dump_without_crashing.h',
+          'debug/gdi_debug_util_win.cc',
+          'debug/gdi_debug_util_win.h',
+          # This file depends on files from the 'allocator' target,
+          # but this target does not depend on 'allocator' (see
+          # allocator.gyp for details).
+          'debug/leak_annotations.h',
+          'debug/leak_tracker.h',
+          'debug/proc_maps_linux.cc',
+          'debug/proc_maps_linux.h',
+          'debug/profiler.cc',
+          'debug/profiler.h',
+          'debug/stack_trace.cc',
+          'debug/stack_trace.h',
+          'debug/stack_trace_android.cc',
+          'debug/stack_trace_posix.cc',
+          'debug/stack_trace_win.cc',
+          'debug/task_annotator.cc',
+          'debug/task_annotator.h',
+          'deferred_sequenced_task_runner.cc',
+          'deferred_sequenced_task_runner.h',
+          'environment.cc',
+          'environment.h',
+          'feature_list.cc',
+          'feature_list.h',
+          'file_descriptor_posix.h',
+          'file_version_info.h',
+          'file_version_info_mac.h',
+          'file_version_info_mac.mm',
+          'file_version_info_win.cc',
+          'file_version_info_win.h',
+          'files/dir_reader_fallback.h',
+          'files/dir_reader_linux.h',
+          'files/dir_reader_posix.h',
+          'files/file.cc',
+          'files/file.h',
+          'files/file_enumerator.cc',
+          'files/file_enumerator.h',
+          'files/file_enumerator_posix.cc',
+          'files/file_enumerator_win.cc',
+          'files/file_path.cc',
+          'files/file_path.h',
+          'files/file_path_constants.cc',
+          'files/file_path_watcher.cc',
+          'files/file_path_watcher.h',
+          'files/file_path_watcher_fsevents.cc',
+          'files/file_path_watcher_fsevents.h',
+          'files/file_path_watcher_kqueue.cc',
+          'files/file_path_watcher_kqueue.h',
+          'files/file_path_watcher_linux.cc',
+          'files/file_path_watcher_mac.cc',
+          'files/file_path_watcher_stub.cc',
+          'files/file_path_watcher_win.cc',
+          'files/file_posix.cc',
+          'files/file_proxy.cc',
+          'files/file_proxy.h',
+          'files/file_tracing.cc',
+          'files/file_tracing.h',
+          'files/file_util.cc',
+          'files/file_util.h',
+          'files/file_util_android.cc',
+          'files/file_util_linux.cc',
+          'files/file_util_mac.mm',
+          'files/file_util_posix.cc',
+          'files/file_util_proxy.cc',
+          'files/file_util_proxy.h',
+          'files/file_util_win.cc',
+          'files/file_win.cc',
+          'files/important_file_writer.cc',
+          'files/important_file_writer.h',
+          'files/memory_mapped_file.cc',
+          'files/memory_mapped_file.h',
+          'files/memory_mapped_file_posix.cc',
+          'files/memory_mapped_file_win.cc',
+          'files/scoped_file.cc',
+          'files/scoped_file.h',
+          'files/scoped_temp_dir.cc',
+          'files/scoped_temp_dir.h',
+          'format_macros.h',
+          'gtest_prod_util.h',
+          'guid.cc',
+          'guid.h',
+          'hash.cc',
+          'hash.h',
+          'id_map.h',
+          'ios/block_types.h',
+          'ios/crb_protocol_observers.h',
+          'ios/crb_protocol_observers.mm',
+          'ios/device_util.h',
+          'ios/device_util.mm',
+          'ios/ios_util.h',
+          'ios/ios_util.mm',
+          'ios/ns_error_util.h',
+          'ios/ns_error_util.mm',
+          'ios/scoped_critical_action.h',
+          'ios/scoped_critical_action.mm',
+          'ios/weak_nsobject.h',
+          'ios/weak_nsobject.mm',
+          'json/json_file_value_serializer.cc',
+          'json/json_file_value_serializer.h',
+          'json/json_parser.cc',
+          'json/json_parser.h',
+          'json/json_reader.cc',
+          'json/json_reader.h',
+          'json/json_string_value_serializer.cc',
+          'json/json_string_value_serializer.h',
+          'json/json_value_converter.cc',
+          'json/json_value_converter.h',
+          'json/json_writer.cc',
+          'json/json_writer.h',
+          'json/string_escape.cc',
+          'json/string_escape.h',
+          'lazy_instance.cc',
+          'lazy_instance.h',
+          'location.cc',
+          'location.h',
+          'logging.cc',
+          'logging.h',
+          'logging_win.cc',
+          'logging_win.h',
+          'mac/authorization_util.h',
+          'mac/authorization_util.mm',
+          'mac/bind_objc_block.h',
+          'mac/bundle_locations.h',
+          'mac/bundle_locations.mm',
+          'mac/call_with_eh_frame.cc',
+          'mac/call_with_eh_frame.h',
+          'mac/call_with_eh_frame_asm.S',
+          'mac/close_nocancel.cc',
+          'mac/cocoa_protocols.h',
+          'mac/dispatch_source_mach.cc',
+          'mac/dispatch_source_mach.h',
+          'mac/foundation_util.h',
+          'mac/foundation_util.mm',
+          'mac/launch_services_util.cc',
+          'mac/launch_services_util.h',
+          'mac/launchd.cc',
+          'mac/launchd.h',
+          'mac/mac_logging.h',
+          'mac/mac_logging.mm',
+          'mac/mac_util.h',
+          'mac/mac_util.mm',
+          'mac/mach_logging.cc',
+          'mac/mach_logging.h',
+          'mac/mach_port_broker.h',
+          'mac/mach_port_broker.mm',
+          'mac/mach_port_util.cc',
+          'mac/mach_port_util.h',
+          'mac/objc_property_releaser.h',
+          'mac/objc_property_releaser.mm',
+          'mac/os_crash_dumps.cc',
+          'mac/os_crash_dumps.h',
+          'mac/scoped_aedesc.h',
+          'mac/scoped_authorizationref.h',
+          'mac/scoped_block.h',
+          'mac/scoped_cftyperef.h',
+          'mac/scoped_dispatch_object.h',
+          'mac/scoped_ioobject.h',
+          'mac/scoped_ioplugininterface.h',
+          'mac/scoped_launch_data.h',
+          'mac/scoped_mach_port.cc',
+          'mac/scoped_mach_port.h',
+          'mac/scoped_mach_vm.cc',
+          'mac/scoped_mach_vm.h',
+          'mac/scoped_nsautorelease_pool.h',
+          'mac/scoped_nsautorelease_pool.mm',
+          'mac/scoped_nsobject.h',
+          'mac/scoped_nsobject.mm',
+          'mac/scoped_objc_class_swizzler.h',
+          'mac/scoped_objc_class_swizzler.mm',
+          'mac/scoped_sending_event.h',
+          'mac/scoped_sending_event.mm',
+          'mac/scoped_typeref.h',
+          'mac/sdk_forward_declarations.h',
+          'mac/sdk_forward_declarations.mm',
+          'macros.h',
+          'md5.cc',
+          'md5.h',
+          'memory/aligned_memory.cc',
+          'memory/aligned_memory.h',
+          'memory/discardable_memory.cc',
+          'memory/discardable_memory.h',
+          'memory/discardable_memory_allocator.cc',
+          'memory/discardable_memory_allocator.h',
+          'memory/discardable_shared_memory.cc',
+          'memory/discardable_shared_memory.h',
+          'memory/free_deleter.h',
+          'memory/linked_ptr.h',
+          'memory/manual_constructor.h',
+          'memory/memory_pressure_listener.cc',
+          'memory/memory_pressure_listener.h',
+          'memory/memory_pressure_monitor.cc',
+          'memory/memory_pressure_monitor.h',
+          'memory/memory_pressure_monitor_chromeos.cc',
+          'memory/memory_pressure_monitor_chromeos.h',
+          'memory/memory_pressure_monitor_mac.cc',
+          'memory/memory_pressure_monitor_mac.h',
+          'memory/memory_pressure_monitor_win.cc',
+          'memory/memory_pressure_monitor_win.h',
+          'memory/ptr_util.h',
+          'memory/raw_scoped_refptr_mismatch_checker.h',
+          'memory/ref_counted.cc',
+          'memory/ref_counted.h',
+          'memory/ref_counted_delete_on_message_loop.h',
+          'memory/ref_counted_memory.cc',
+          'memory/ref_counted_memory.h',
+          'memory/scoped_policy.h',
+          'memory/scoped_vector.h',
+          'memory/shared_memory.h',
+          'memory/shared_memory_android.cc',
+          'memory/shared_memory_handle.h',
+          'memory/shared_memory_handle_mac.cc',
+          'memory/shared_memory_handle_win.cc',
+          'memory/shared_memory_mac.cc',
+          'memory/shared_memory_nacl.cc',
+          'memory/shared_memory_posix.cc',
+          'memory/shared_memory_win.cc',
+          'memory/singleton.cc',
+          'memory/singleton.h',
+          'memory/weak_ptr.cc',
+          'memory/weak_ptr.h',
+          'message_loop/incoming_task_queue.cc',
+          'message_loop/incoming_task_queue.h',
+          'message_loop/message_loop.cc',
+          'message_loop/message_loop.h',
+          'message_loop/message_loop_task_runner.cc',
+          'message_loop/message_loop_task_runner.h',
+          'message_loop/message_pump.cc',
+          'message_loop/message_pump.h',
+          'message_loop/message_pump_android.cc',
+          'message_loop/message_pump_android.h',
+          'message_loop/message_pump_default.cc',
+          'message_loop/message_pump_default.h',
+          'message_loop/message_pump_win.cc',
+          'message_loop/message_pump_win.h',
+          'message_loop/timer_slack.h',
+          'metrics/bucket_ranges.cc',
+          'metrics/bucket_ranges.h',
+          'metrics/histogram.cc',
+          'metrics/histogram.h',
+          'metrics/histogram_base.cc',
+          'metrics/histogram_base.h',
+          'metrics/histogram_delta_serialization.cc',
+          'metrics/histogram_delta_serialization.h',
+          'metrics/histogram_flattener.h',
+          'metrics/histogram_macros.h',
+          'metrics/histogram_samples.cc',
+          'metrics/histogram_samples.h',
+          'metrics/histogram_snapshot_manager.cc',
+          'metrics/histogram_snapshot_manager.h',
+          'metrics/metrics_hashes.cc',
+          'metrics/metrics_hashes.h',
+          'metrics/persistent_histogram_allocator.cc',
+          'metrics/persistent_histogram_allocator.h',
+          'metrics/persistent_memory_allocator.cc',
+          'metrics/persistent_memory_allocator.h',
+          'metrics/persistent_sample_map.cc',
+          'metrics/persistent_sample_map.h',
+          'metrics/sample_map.cc',
+          'metrics/sample_map.h',
+          'metrics/sample_vector.cc',
+          'metrics/sample_vector.h',
+          'metrics/sparse_histogram.cc',
+          'metrics/sparse_histogram.h',
+          'metrics/statistics_recorder.cc',
+          'metrics/statistics_recorder.h',
+          'metrics/user_metrics.cc',
+          'metrics/user_metrics.h',
+          'metrics/user_metrics_action.h',
+          'native_library.h',
+          'native_library_ios.mm',
+          'native_library_mac.mm',
+          'native_library_posix.cc',
+          'native_library_win.cc',
+          'nix/mime_util_xdg.cc',
+          'nix/mime_util_xdg.h',
+          'nix/xdg_util.cc',
+          'nix/xdg_util.h',
+          'numerics/safe_conversions.h',
+          'numerics/safe_conversions_impl.h',
+          'numerics/safe_math.h',
+          'numerics/safe_math_impl.h',
+          'observer_list.h',
+          'observer_list_threadsafe.h',
+          'optional.h',
+          'os_compat_android.cc',
+          'os_compat_android.h',
+          'os_compat_nacl.cc',
+          'os_compat_nacl.h',
+          'path_service.cc',
+          'path_service.h',
+          'pending_task.cc',
+          'pending_task.h',
+          'pickle.cc',
+          'pickle.h',
+          'posix/eintr_wrapper.h',
+          'posix/global_descriptors.cc',
+          'posix/global_descriptors.h',
+          'posix/safe_strerror.cc',
+          'posix/safe_strerror.h',
+          'posix/unix_domain_socket_linux.cc',
+          'posix/unix_domain_socket_linux.h',
+          'power_monitor/power_monitor.cc',
+          'power_monitor/power_monitor.h',
+          'power_monitor/power_monitor_device_source.cc',
+          'power_monitor/power_monitor_device_source.h',
+          'power_monitor/power_monitor_device_source_android.cc',
+          'power_monitor/power_monitor_device_source_android.h',
+          'power_monitor/power_monitor_device_source_chromeos.cc',
+          'power_monitor/power_monitor_device_source_ios.mm',
+          'power_monitor/power_monitor_device_source_mac.mm',
+          'power_monitor/power_monitor_device_source_posix.cc',
+          'power_monitor/power_monitor_device_source_win.cc',
+          'power_monitor/power_monitor_source.cc',
+          'power_monitor/power_monitor_source.h',
+          'power_monitor/power_observer.h',
+          'process/internal_linux.cc',
+          'process/internal_linux.h',
+          'process/kill.cc',
+          'process/kill.h',
+          'process/kill_mac.cc',
+          'process/kill_posix.cc',
+          'process/kill_win.cc',
+          'process/launch.cc',
+          'process/launch.h',
+          'process/launch_ios.cc',
+          'process/launch_mac.cc',
+          'process/launch_posix.cc',
+          'process/launch_win.cc',
+          'process/memory.cc',
+          'process/memory.h',
+          'process/memory_linux.cc',
+          'process/memory_mac.mm',
+          'process/memory_win.cc',
+          'process/port_provider_mac.cc',
+          'process/port_provider_mac.h',
+          'process/process.h',
+          'process/process_handle.cc',
+          'process/process_handle_freebsd.cc',
+          'process/process_handle_linux.cc',
+          'process/process_handle_mac.cc',
+          'process/process_handle_openbsd.cc',
+          'process/process_handle_posix.cc',
+          'process/process_handle_win.cc',
+          'process/process_info.h',
+          'process/process_info_linux.cc',
+          'process/process_info_mac.cc',
+          'process/process_info_win.cc',
+          'process/process_iterator.cc',
+          'process/process_iterator.h',
+          'process/process_iterator_freebsd.cc',
+          'process/process_iterator_linux.cc',
+          'process/process_iterator_mac.cc',
+          'process/process_iterator_openbsd.cc',
+          'process/process_iterator_win.cc',
+          'process/process_linux.cc',
+          'process/process_metrics.cc',
+          'process/process_metrics.h',
+          'process/process_metrics_freebsd.cc',
+          'process/process_metrics_ios.cc',
+          'process/process_metrics_linux.cc',
+          'process/process_metrics_mac.cc',
+          'process/process_metrics_nacl.cc',
+          'process/process_metrics_openbsd.cc',
+          'process/process_metrics_posix.cc',
+          'process/process_metrics_win.cc',
+          'process/process_posix.cc',
+          'process/process_win.cc',
+          'profiler/native_stack_sampler.cc',
+          'profiler/native_stack_sampler.h',
+          'profiler/native_stack_sampler_posix.cc',
+          'profiler/native_stack_sampler_win.cc',
+          'profiler/scoped_profile.cc',
+          'profiler/scoped_profile.h',
+          'profiler/scoped_tracker.cc',
+          'profiler/scoped_tracker.h',
+          'profiler/stack_sampling_profiler.cc',
+          'profiler/stack_sampling_profiler.h',
+          'profiler/tracked_time.cc',
+          'profiler/tracked_time.h',
+          'rand_util.cc',
+          'rand_util.h',
+          'rand_util_nacl.cc',
+          'rand_util_posix.cc',
+          'rand_util_win.cc',
+          'run_loop.cc',
+          'run_loop.h',
+          'scoped_generic.h',
+          'scoped_native_library.cc',
+          'scoped_native_library.h',
+          'scoped_observer.h',
+          'sequence_checker.h',
+          'sequence_checker_impl.cc',
+          'sequence_checker_impl.h',
+          'sequenced_task_runner.cc',
+          'sequenced_task_runner.h',
+          'sequenced_task_runner_helpers.h',
+          'sha1.cc',
+          'sha1.h',
+          'single_thread_task_runner.h',
+          'stl_util.h',
+          'strings/latin1_string_conversions.cc',
+          'strings/latin1_string_conversions.h',
+          'strings/nullable_string16.cc',
+          'strings/nullable_string16.h',
+          'strings/pattern.cc',
+          'strings/pattern.h',
+          'strings/safe_sprintf.cc',
+          'strings/safe_sprintf.h',
+          'strings/string16.cc',
+          'strings/string16.h',
+          'strings/string_number_conversions.cc',
+          'strings/string_number_conversions.h',
+          'strings/string_piece.cc',
+          'strings/string_piece.h',
+          'strings/string_split.cc',
+          'strings/string_split.h',
+          'strings/string_tokenizer.h',
+          'strings/string_util.cc',
+          'strings/string_util.h',
+          'strings/string_util_constants.cc',
+          'strings/string_util_posix.h',
+          'strings/string_util_win.h',
+          'strings/stringize_macros.h',
+          'strings/stringprintf.cc',
+          'strings/stringprintf.h',
+          'strings/sys_string_conversions.h',
+          'strings/sys_string_conversions_mac.mm',
+          'strings/sys_string_conversions_posix.cc',
+          'strings/sys_string_conversions_win.cc',
+          'strings/utf_offset_string_conversions.cc',
+          'strings/utf_offset_string_conversions.h',
+          'strings/utf_string_conversion_utils.cc',
+          'strings/utf_string_conversion_utils.h',
+          'strings/utf_string_conversions.cc',
+          'strings/utf_string_conversions.h',
+          'supports_user_data.cc',
+          'supports_user_data.h',
+          'synchronization/cancellation_flag.cc',
+          'synchronization/cancellation_flag.h',
+          'synchronization/condition_variable.h',
+          'synchronization/condition_variable_posix.cc',
+          'synchronization/condition_variable_win.cc',
+          'synchronization/lock.cc',
+          'synchronization/lock.h',
+          'synchronization/lock_impl.h',
+          'synchronization/lock_impl_posix.cc',
+          'synchronization/lock_impl_win.cc',
+          'synchronization/read_write_lock.h',
+          'synchronization/read_write_lock_nacl.cc',
+          'synchronization/read_write_lock_posix.cc',
+          'synchronization/read_write_lock_win.cc',
+          'synchronization/spin_wait.h',
+          'synchronization/waitable_event.h',
+          'synchronization/waitable_event_posix.cc',
+          'synchronization/waitable_event_watcher.h',
+          'synchronization/waitable_event_watcher_posix.cc',
+          'synchronization/waitable_event_watcher_win.cc',
+          'synchronization/waitable_event_win.cc',
+          'sys_byteorder.h',
+          'sys_info.cc',
+          'sys_info.h',
+          'sys_info_android.cc',
+          'sys_info_chromeos.cc',
+          'sys_info_freebsd.cc',
+          'sys_info_internal.h',
+          'sys_info_ios.mm',
+          'sys_info_linux.cc',
+          'sys_info_mac.mm',
+          'sys_info_openbsd.cc',
+          'sys_info_posix.cc',
+          'sys_info_win.cc',
+          'system_monitor/system_monitor.cc',
+          'system_monitor/system_monitor.h',
+          'task/cancelable_task_tracker.cc',
+          'task/cancelable_task_tracker.h',
+          'task_runner.cc',
+          'task_runner.h',
+          'task_runner_util.h',
+          'task_scheduler/delayed_task_manager.cc',
+          'task_scheduler/delayed_task_manager.h',
+          'task_scheduler/priority_queue.cc',
+          'task_scheduler/priority_queue.h',
+          'task_scheduler/scheduler_lock.h',
+          'task_scheduler/scheduler_lock_impl.cc',
+          'task_scheduler/scheduler_lock_impl.h',
+          'task_scheduler/scheduler_service_thread.cc',
+          'task_scheduler/scheduler_service_thread.h',
+          'task_scheduler/scheduler_worker.cc',
+          'task_scheduler/scheduler_worker.h',
+          'task_scheduler/scheduler_worker_pool.h',
+          'task_scheduler/scheduler_worker_pool_impl.cc',
+          'task_scheduler/scheduler_worker_pool_impl.h',
+          'task_scheduler/scheduler_worker_stack.cc',
+          'task_scheduler/scheduler_worker_stack.h',
+          'task_scheduler/sequence.cc',
+          'task_scheduler/sequence.h',
+          'task_scheduler/sequence_sort_key.cc',
+          'task_scheduler/sequence_sort_key.h',
+          'task_scheduler/task.cc',
+          'task_scheduler/task.h',
+          'task_scheduler/task_scheduler.cc',
+          'task_scheduler/task_scheduler.h',
+          'task_scheduler/task_scheduler_impl.cc',
+          'task_scheduler/task_scheduler_impl.h',
+          'task_scheduler/task_tracker.cc',
+          'task_scheduler/task_tracker.h',
+          'task_scheduler/task_traits.cc',
+          'task_scheduler/task_traits.h',
+          'template_util.h',
+          'third_party/dmg_fp/dmg_fp.h',
+          'third_party/dmg_fp/dtoa_wrapper.cc',
+          'third_party/dmg_fp/g_fmt.cc',
+          'third_party/icu/icu_utf.cc',
+          'third_party/icu/icu_utf.h',
+          'third_party/nspr/prtime.cc',
+          'third_party/nspr/prtime.h',
+          'third_party/superfasthash/superfasthash.c',
+          'third_party/xdg_mime/xdgmime.h',
+          'threading/non_thread_safe.h',
+          'threading/non_thread_safe_impl.cc',
+          'threading/non_thread_safe_impl.h',
+          'threading/platform_thread.h',
+          'threading/platform_thread_android.cc',
+          'threading/platform_thread_internal_posix.cc',
+          'threading/platform_thread_internal_posix.h',
+          'threading/platform_thread_linux.cc',
+          'threading/platform_thread_mac.mm',
+          'threading/platform_thread_posix.cc',
+          'threading/platform_thread_win.cc',
+          'threading/post_task_and_reply_impl.cc',
+          'threading/post_task_and_reply_impl.h',
+          'threading/sequenced_task_runner_handle.cc',
+          'threading/sequenced_task_runner_handle.h',
+          'threading/sequenced_worker_pool.cc',
+          'threading/sequenced_worker_pool.h',
+          'threading/simple_thread.cc',
+          'threading/simple_thread.h',
+          'threading/thread.cc',
+          'threading/thread.h',
+          'threading/thread_checker.h',
+          'threading/thread_checker_impl.cc',
+          'threading/thread_checker_impl.h',
+          'threading/thread_collision_warner.cc',
+          'threading/thread_collision_warner.h',
+          'threading/thread_id_name_manager.cc',
+          'threading/thread_id_name_manager.h',
+          'threading/thread_local.h',
+          'threading/thread_local_android.cc',
+          'threading/thread_local_posix.cc',
+          'threading/thread_local_storage.cc',
+          'threading/thread_local_storage.h',
+          'threading/thread_local_storage_posix.cc',
+          'threading/thread_local_storage_win.cc',
+          'threading/thread_local_win.cc',
+          'threading/thread_restrictions.cc',
+          'threading/thread_restrictions.h',
+          'threading/thread_task_runner_handle.cc',
+          'threading/thread_task_runner_handle.h',
+          'threading/watchdog.cc',
+          'threading/watchdog.h',
+          'threading/worker_pool.cc',
+          'threading/worker_pool.h',
+          'threading/worker_pool_posix.cc',
+          'threading/worker_pool_posix.h',
+          'threading/worker_pool_win.cc',
+          'time/clock.cc',
+          'time/clock.h',
+          'time/default_clock.cc',
+          'time/default_clock.h',
+          'time/default_tick_clock.cc',
+          'time/default_tick_clock.h',
+          'time/tick_clock.cc',
+          'time/tick_clock.h',
+          'time/time.cc',
+          'time/time.h',
+          'time/time_mac.cc',
+          'time/time_posix.cc',
+          'time/time_win.cc',
+          'timer/elapsed_timer.cc',
+          'timer/elapsed_timer.h',
+          'timer/hi_res_timer_manager.h',
+          'timer/hi_res_timer_manager_posix.cc',
+          'timer/hi_res_timer_manager_win.cc',
+          'timer/mock_timer.cc',
+          'timer/mock_timer.h',
+          'timer/timer.cc',
+          'timer/timer.h',
+          'tracked_objects.cc',
+          'tracked_objects.h',
+          'tracking_info.cc',
+          'tracking_info.h',
+          'tuple.h',
+          'value_conversions.cc',
+          'value_conversions.h',
+          'values.cc',
+          'values.h',
+          'version.cc',
+          'version.h',
+          'vlog.cc',
+          'vlog.h',
+          'win/enum_variant.cc',
+          'win/enum_variant.h',
+          'win/event_trace_consumer.h',
+          'win/event_trace_controller.cc',
+          'win/event_trace_controller.h',
+          'win/event_trace_provider.cc',
+          'win/event_trace_provider.h',
+          'win/i18n.cc',
+          'win/i18n.h',
+          'win/iat_patch_function.cc',
+          'win/iat_patch_function.h',
+          'win/iunknown_impl.cc',
+          'win/iunknown_impl.h',
+          'win/message_window.cc',
+          'win/message_window.h',
+          'win/object_watcher.cc',
+          'win/object_watcher.h',
+          'win/process_startup_helper.cc',
+          'win/process_startup_helper.h',
+          'win/registry.cc',
+          'win/registry.h',
+          'win/resource_util.cc',
+          'win/resource_util.h',
+          'win/scoped_bstr.cc',
+          'win/scoped_bstr.h',
+          'win/scoped_co_mem.h',
+          'win/scoped_com_initializer.h',
+          'win/scoped_comptr.h',
+          'win/scoped_gdi_object.h',
+          'win/scoped_handle.cc',
+          'win/scoped_handle.h',
+          'win/scoped_hdc.h',
+          'win/scoped_hglobal.h',
+          'win/scoped_process_information.cc',
+          'win/scoped_process_information.h',
+          'win/scoped_propvariant.h',
+          'win/scoped_select_object.h',
+          'win/scoped_variant.cc',
+          'win/scoped_variant.h',
+          'win/shortcut.cc',
+          'win/shortcut.h',
+          'win/startup_information.cc',
+          'win/startup_information.h',
+          'win/wait_chain.cc',
+          'win/wait_chain.h',
+          'win/win_util.cc',
+          'win/win_util.h',
+          'win/windows_version.cc',
+          'win/windows_version.h',
+          'win/wrapped_window_proc.cc',
+          'win/wrapped_window_proc.h',
+          '<@(trace_event_sources)',
+        ],
+        'defines': [
+          'BASE_IMPLEMENTATION',
+        ],
+        'include_dirs': [
+          '..',
+        ],
+        'target_conditions': [
+          ['OS == "mac" or OS == "ios"', {
+            'sources!': [
+              'memory/shared_memory_posix.cc',
+            ],
+          }],
+          ['OS == "ios"', {
+            'sources!': [
+               'memory/discardable_shared_memory.cc',
+               'memory/discardable_shared_memory.h',
+            ],
+          }],
+          ['(<(desktop_linux) == 0 and <(chromeos) == 0) or >(nacl_untrusted_build)==1', {
+              'sources/': [
+                ['exclude', '^nix/'],
+              ],
+          }],
+          ['<(use_glib)==0 or >(nacl_untrusted_build)==1', {
+              'sources!': [
+                'message_loop/message_pump_glib.cc',
+              ],
+          }],
+          ['(OS != "linux" and <(os_bsd) != 1 and OS != "android") or >(nacl_untrusted_build)==1', {
+              'sources!': [
+                # Not automatically excluded by the *linux.cc rules.
+                'linux_util.cc',
+              ],
+            },
+          ],
+          ['>(nacl_untrusted_build)==1', {
+            'sources!': [
+               'base_paths.cc',
+               'cpu.cc',
+               'debug/stack_trace.cc',
+               'debug/stack_trace_posix.cc',
+               'files/file_enumerator_posix.cc',
+               'files/file_path_watcher_fsevents.cc',
+               'files/file_path_watcher_fsevents.h',
+               'files/file_path_watcher_kqueue.cc',
+               'files/file_path_watcher_kqueue.h',
+               'files/file_proxy.cc',
+               'files/file_util.cc',
+               'files/file_util_posix.cc',
+               'files/file_util_proxy.cc',
+               'files/important_file_writer.cc',
+               'files/scoped_temp_dir.cc',
+               'memory/shared_memory_posix.cc',
+               'native_library_posix.cc',
+               'path_service.cc',
+               'posix/unix_domain_socket_linux.cc',
+               'process/kill.cc',
+               'process/kill_posix.cc',
+               'process/launch.cc',
+               'process/launch_posix.cc',
+               'process/process_metrics.cc',
+               'process/process_metrics_posix.cc',
+               'process/process_posix.cc',
+               'rand_util_posix.cc',
+               'scoped_native_library.cc',
+               'synchronization/read_write_lock_posix.cc',
+               'sys_info.cc',
+               'sys_info_posix.cc',
+               'third_party/dynamic_annotations/dynamic_annotations.c',
+            ],
+            'sources/': [
+              ['include', '^threading/platform_thread_linux\\.cc$'],
+            ],
+          }],
+          ['OS == "android" and >(nacl_untrusted_build)==0', {
+            'sources!': [
+              'base_paths_posix.cc',
+              'files/file_path_watcher_fsevents.cc',
+              'files/file_path_watcher_fsevents.h',
+              'files/file_path_watcher_kqueue.cc',
+              'files/file_path_watcher_kqueue.h',
+              'files/file_path_watcher_stub.cc',
+              'power_monitor/power_monitor_device_source_posix.cc',
+            ],
+            'sources/': [
+              ['include', '^debug/proc_maps_linux\\.cc$'],
+              ['include', '^files/file_path_watcher_linux\\.cc$'],
+              ['include', '^process/memory_linux\\.cc$'],
+              ['include', '^process/internal_linux\\.cc$'],
+              ['include', '^process/process_handle_linux\\.cc$'],
+              ['include', '^process/process_iterator\\.cc$'],
+              ['include', '^process/process_iterator_linux\\.cc$'],
+              ['include', '^process/process_metrics_linux\\.cc$'],
+              ['include', '^posix/unix_domain_socket_linux\\.cc$'],
+              ['include', '^strings/sys_string_conversions_posix\\.cc$'],
+              ['include', '^sys_info_linux\\.cc$'],
+              ['include', '^worker_pool_linux\\.cc$'],
+            ],
+          }],
+          ['OS == "android" and _toolset == "host" and host_os == "linux"', {
+            'sources/': [
+              # Pull in specific files for host builds.
+              ['include', '^threading/platform_thread_linux\\.cc$'],
+            ],
+          }],
+          ['<(chromeos) == 1', {
+            'sources!': [
+              'power_monitor/power_monitor_device_source_posix.cc',
+            ],
+          }],
+          ['OS == "ios" and _toolset != "host"', {
+            'sources/': [
+              # Pull in specific Mac files for iOS (which have been filtered out
+              # by file name rules).
+              ['include', '^base_paths_mac\\.'],
+              ['include', '^files/file_util_mac\\.'],
+              ['include', '^file_version_info_mac\\.'],
+              ['include', '^mac/bundle_locations\\.'],
+              ['include', '^mac/call_with_eh_frame\\.'],
+              ['include', '^mac/foundation_util\\.'],
+              ['include', '^mac/mac_logging\\.'],
+              ['include', '^mac/mach_logging\\.'],
+              ['include', '^mac/objc_property_releaser\\.'],
+              ['include', '^mac/scoped_block\\.'],
+              ['include', '^mac/scoped_mach_port\\.'],
+              ['include', '^mac/scoped_mach_vm\\.'],
+              ['include', '^mac/scoped_nsautorelease_pool\\.'],
+              ['include', '^mac/scoped_nsobject\\.'],
+              ['include', '^mac/scoped_objc_class_swizzler\\.'],
+              ['include', '^memory/shared_memory_posix\\.'],
+              ['include', '^message_loop/message_pump_mac\\.'],
+              ['include', '^strings/sys_string_conversions_mac\\.'],
+              ['include', '^threading/platform_thread_mac\\.'],
+              ['include', '^time/time_mac\\.'],
+              ['include', '^worker_pool_mac\\.'],
+              # Exclude all process/ except the minimal implementation
+              # needed on iOS (mostly for unit tests).
+              ['exclude', '^process/.*'],
+              ['include', '^process/.*_ios\.(cc|mm)$'],
+              ['include', '^process/memory_stubs\.cc$'],
+              ['include', '^process/process_handle_posix\.cc$'],
+              ['include', '^process/process_metrics\\.cc$'],
+              # Exclude unsupported features on iOS.
+              ['exclude', '^files/file_path_watcher.*'],
+              ['exclude', '^threading/platform_thread_internal_posix\\.(h|cc)'],
+              ['exclude', '^trace_event/malloc_dump_provider\\.(h|cc)$'],
+            ],
+            'sources': [
+              'process/memory_stubs.cc',
+            ],
+            'sources!': [
+              'message_loop/message_pump_libevent.cc'
+            ],
+          }],
+          ['OS == "ios" and _toolset == "host"', {
+            'sources/': [
+              # Copied filename_rules to switch from iOS to Mac inclusions.
+              ['include', '_(cocoa|mac)(_unittest)?\\.(h|cc|mm?)$'],
+              ['include', '(^|/)(cocoa|mac)/'],
+              ['exclude', '_ios(_unittest)?\\.(h|cc|mm?)$'],
+              ['exclude', '(^|/)ios/'],
+              ['exclude', 'files/file_path_watcher_fsevents.cc'],
+              ['exclude', 'files/file_path_watcher_fsevents.h'],
+              ['include', 'files/file_path_watcher_mac.cc'],
+            ]
+          }],
+          # For now, just test the *BSD platforms enough to exclude them.
+          # Subsequent changes will include them further.
+          ['OS != "freebsd" or >(nacl_untrusted_build)==1', {
+              'sources/': [ ['exclude', '_freebsd\\.cc$'] ],
+            },
+          ],
+          ['OS != "openbsd" or >(nacl_untrusted_build)==1', {
+              'sources/': [ ['exclude', '_openbsd\\.cc$'] ],
+            },
+          ],
+          ['OS == "win" and >(nacl_untrusted_build)==0', {
+            'include_dirs': [
+              '<(DEPTH)/third_party/wtl/include',
+            ],
+            'sources': [
+              'profiler/win32_stack_frame_unwinder.cc',
+              'profiler/win32_stack_frame_unwinder.h',
+            ],
+            'sources!': [
+              'files/file_path_watcher_fsevents.cc',
+              'files/file_path_watcher_fsevents.h',
+              'files/file_path_watcher_kqueue.cc',
+              'files/file_path_watcher_kqueue.h',
+              'files/file_path_watcher_stub.cc',
+              'message_loop/message_pump_libevent.cc',
+              'posix/file_descriptor_shuffle.cc',
+              'strings/string16.cc',
+            ],
+          },],
+          ['<(use_ozone) == 1', {
+            'sources!': [
+              'message_loop/message_pump_glib.cc',
+            ]
+          }],
+          ['OS == "linux" and >(nacl_untrusted_build)==0', {
+            'sources!': [
+              'files/file_path_watcher_fsevents.cc',
+              'files/file_path_watcher_fsevents.h',
+              'files/file_path_watcher_kqueue.cc',
+              'files/file_path_watcher_kqueue.h',
+              'files/file_path_watcher_stub.cc',
+            ],
+          }],
+          ['(OS == "mac" or OS == "ios") and >(nacl_untrusted_build)==0', {
+            'sources/': [
+              ['exclude', '^base_paths_posix\\.cc$'],
+              ['exclude', '^files/file_path_watcher_stub\\.cc$'],
+              ['exclude', '^native_library_posix\\.cc$'],
+              ['exclude', '^strings/sys_string_conversions_posix\\.cc$'],
+              ['exclude', '^threading/platform_thread_internal_posix\\.cc$'],
+            ],
+          }],
+          ['<(os_bsd)==1 and >(nacl_untrusted_build)==0', {
+            'sources': [
+              'process/memory_stubs.cc',
+            ],
+            'sources/': [
+              ['exclude', '^files/file_path_watcher_linux\\.cc$'],
+              ['exclude', '^files/file_path_watcher_stub\\.cc$'],
+              ['exclude', '^files/file_util_linux\\.cc$'],
+              ['exclude', '^process/process_linux\\.cc$'],
+              ['exclude', '^sys_info_linux\\.cc$'],
+            ],
+          }],
+          # Remove all unnecessary files for build_nexe.py to avoid exceeding
+          # command-line-string limitation when building NaCl on Windows.
+          ['OS == "win" and >(nacl_untrusted_build)==1', {
+              'sources/': [ ['exclude', '\\.h$'] ],
+          }],
+          # Enable more direct string conversions on platforms with native utf8
+          # strings
+          ['OS=="mac" or OS=="ios" or <(chromeos)==1 or <(chromecast)==1', {
+            'defines': ['SYSTEM_NATIVE_UTF8'],
+          }],
+        ],
+      }],
+      ['base_i18n_target==1', {
+        'defines': [
+          'BASE_I18N_IMPLEMENTATION',
+        ],
+        'sources': [
+          'i18n/base_i18n_export.h',
+          'i18n/base_i18n_switches.cc',
+          'i18n/base_i18n_switches.h',
+          'i18n/bidi_line_iterator.cc',
+          'i18n/bidi_line_iterator.h',
+          'i18n/break_iterator.cc',
+          'i18n/break_iterator.h',
+          'i18n/case_conversion.cc',
+          'i18n/case_conversion.h',
+          'i18n/char_iterator.cc',
+          'i18n/char_iterator.h',
+          'i18n/file_util_icu.cc',
+          'i18n/file_util_icu.h',
+          'i18n/i18n_constants.cc',
+          'i18n/i18n_constants.h',
+          'i18n/icu_encoding_detection.cc',
+          'i18n/icu_encoding_detection.h',
+          'i18n/icu_string_conversions.cc',
+          'i18n/icu_string_conversions.h',
+          'i18n/icu_util.cc',
+          'i18n/icu_util.h',
+          'i18n/message_formatter.cc',
+          'i18n/message_formatter.h',
+          'i18n/number_formatting.cc',
+          'i18n/number_formatting.h',
+          'i18n/rtl.cc',
+          'i18n/rtl.h',
+          'i18n/streaming_utf8_validator.cc',
+          'i18n/streaming_utf8_validator.h',
+          'i18n/string_compare.cc',
+          'i18n/string_compare.h',
+          'i18n/string_search.cc',
+          'i18n/string_search.h',
+          'i18n/time_formatting.cc',
+          'i18n/time_formatting.h',
+          'i18n/timezone.cc',
+          'i18n/timezone.h',
+          'i18n/utf8_validator_tables.cc',
+          'i18n/utf8_validator_tables.h',
+        ],
+      }]
+    ],
+  },
+}
diff --git a/base/base_nacl.gyp b/base/base_nacl.gyp
new file mode 100644
index 0000000..30763d4
--- /dev/null
+++ b/base/base_nacl.gyp
@@ -0,0 +1,158 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'variables': {
+    'chromium_code': 1,
+  },
+  'includes': [
+    # base.gypi must be included before common_untrusted.gypi.
+    #
+    # TODO(sergeyu): Replace the target_defaults magic in base.gypi with a
+    # sources variables lists. That way order of includes will not matter.
+    'base.gypi',
+    '../build/common_untrusted.gypi',
+  ],
+  'conditions': [
+    ['disable_nacl==0 and disable_nacl_untrusted==0', {
+      'targets': [
+        {
+          'target_name': 'base_nacl',
+          'type': 'none',
+          'variables': {
+            'base_target': 1,
+            'nacl_untrusted_build': 1,
+            'nlib_target': 'libbase_nacl.a',
+            'build_glibc': 0,
+            'build_newlib': 0,
+            'build_irt': 1,
+            'build_pnacl_newlib': 1,
+            'sources': [
+              'base_switches.cc',
+              'base_switches.h',
+              'strings/string16.cc',
+              'sync_socket_nacl.cc',
+              'time/time_posix.cc',
+            ],
+            'compile_flags': [
+              '-fno-strict-aliasing',
+            ],
+          },
+          'dependencies': [
+            'allocator/allocator.gyp:allocator_features#target',
+            'base.gyp:base_debugging_flags',
+            'base.gyp:base_build_date',
+          ],
+        },
+        {
+          'target_name': 'base_i18n_nacl',
+          'type': 'none',
+          'variables': {
+            'base_i18n_target': 1,
+            'nacl_untrusted_build': 1,
+            'nlib_target': 'libbase_i18n_nacl.a',
+            'build_glibc': 0,
+            'build_newlib': 0,
+            'build_irt': 0,
+            'build_pnacl_newlib': 1,
+            'sources': [
+              'base_switches.cc',
+              'base_switches.h',
+              'strings/string16.cc',
+              'sync_socket_nacl.cc',
+              'time/time_posix.cc',
+            ],
+          },
+          'dependencies': [
+            'allocator/allocator.gyp:allocator_features#target',
+            'base.gyp:base_build_date',
+            '../third_party/icu/icu_nacl.gyp:icudata_nacl',
+            '../third_party/icu/icu_nacl.gyp:icui18n_nacl',
+            '../third_party/icu/icu_nacl.gyp:icuuc_nacl',
+          ],
+        },
+        {
+          'target_name': 'base_nacl_nonsfi',
+          'type': 'none',
+          'variables': {
+            'base_target': 1,
+            'nacl_untrusted_build': 1,
+            'nlib_target': 'libbase_nacl_nonsfi.a',
+            'build_glibc': 0,
+            'build_newlib': 0,
+            'build_irt': 0,
+            'build_pnacl_newlib': 0,
+            'build_nonsfi_helper': 1,
+
+            'sources': [
+              'base_switches.cc',
+              'base_switches.h',
+
+              # For PathExists and ReadFromFD.
+              'files/file_util.cc',
+              'files/file_util_posix.cc',
+
+              # For MessageLoopForIO based on libevent.
+              'message_loop/message_pump_libevent.cc',
+              'message_loop/message_pump_libevent.h',
+
+              # For UnixDomainSocket::SendMsg and RecvMsg.
+              'posix/unix_domain_socket_linux.cc',
+
+              # For GetKnownDeadTerminationStatus and GetTerminationStatus.
+              'process/kill_posix.cc',
+
+              # For ForkWithFlags.
+              'process/launch.h',
+              'process/launch_posix.cc',
+
+              # Unlike libbase_nacl, for Non-SFI build, we need to use
+              # rand_util_posix for random implementation, instead of
+              # rand_util_nacl.cc, which is based on IRT. rand_util_nacl.cc is
+              # excluded below.
+              'rand_util_posix.cc',
+
+              # For CancelableSyncSocket.
+              'sync_socket_nacl.cc',
+            ],
+          },
+          'sources!': [
+            'rand_util_nacl.cc',
+          ],
+          'dependencies': [
+            'allocator/allocator.gyp:allocator_features#target',
+            'base.gyp:base_debugging_flags',
+            'base.gyp:base_build_date',
+            'third_party/libevent/libevent_nacl_nonsfi.gyp:event_nacl_nonsfi',
+          ],
+        },
+        {
+          'target_name': 'test_support_base_nacl_nonsfi',
+          'type': 'none',
+          'variables': {
+            'nacl_untrusted_build': 1,
+            'nlib_target': 'libtest_support_base_nacl_nonsfi.a',
+            'build_glibc': 0,
+            'build_newlib': 0,
+            'build_irt': 0,
+            'build_pnacl_newlib': 0,
+            'build_nonsfi_helper': 1,
+
+            'sources': [
+              'test/gtest_util.cc',
+              'test/launcher/unit_test_launcher_nacl_nonsfi.cc',
+              'test/gtest_xml_unittest_result_printer.cc',
+              'test/test_switches.cc',
+            ],
+          },
+          'dependencies': [
+            'base.gyp:base_build_date',
+            'base_nacl_nonsfi',
+            '../testing/gtest_nacl.gyp:gtest_nacl',
+          ],
+        },
+      ],
+    }],
+  ],
+}
diff --git a/base/base_switches.cc b/base/base_switches.cc
index e8aa5cb..f5c6eb3 100644
--- a/base/base_switches.cc
+++ b/base/base_switches.cc
@@ -24,12 +24,6 @@
 // derived from trace events are reported.
 const char kEnableHeapProfilingModeNative[] = "native";
 
-// Report per-task heap usage and churn in the task profiler.
-// Does not keep track of individual allocations unlike the default and native
-// mode. Keeps only track of summarized churn stats in the task profiler
-// (chrome://profiler).
-const char kEnableHeapProfilingTaskProfiler[] = "task-profiler";
-
 // Generates full memory crash dump.
 const char kFullMemoryCrashReport[]         = "full-memory-crash-report";
 
diff --git a/base/base_switches.h b/base/base_switches.h
index 04b0773..0585186 100644
--- a/base/base_switches.h
+++ b/base/base_switches.h
@@ -16,7 +16,6 @@
 extern const char kEnableCrashReporter[];
 extern const char kEnableHeapProfiling[];
 extern const char kEnableHeapProfilingModeNative[];
-extern const char kEnableHeapProfilingTaskProfiler[];
 extern const char kEnableLowEndDeviceMode[];
 extern const char kForceFieldTrials[];
 extern const char kFullMemoryCrashReport[];
diff --git a/base/base_unittests.isolate b/base/base_unittests.isolate
new file mode 100644
index 0000000..208501f
--- /dev/null
+++ b/base/base_unittests.isolate
@@ -0,0 +1,56 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+  'variables': {
+    'command': [
+      '../testing/test_env.py',
+      '<(PRODUCT_DIR)/base_unittests<(EXECUTABLE_SUFFIX)',
+      '--brave-new-test-launcher',
+      '--test-launcher-bot-mode',
+      '--asan=<(asan)',
+      '--msan=<(msan)',
+      '--tsan=<(tsan)',
+    ],
+  },
+  'conditions': [
+    ['OS=="android" or OS=="linux" or OS=="mac" or OS=="win"', {
+      'variables': {
+        'files': [
+          'test/data/',
+        ],
+      },
+    }],
+    ['OS=="linux" or OS=="mac" or OS=="win"', {
+      'variables': {
+        'files': [
+          '../testing/test_env.py',
+        ],
+      },
+    }],
+    ['OS=="linux"', {
+      'variables': {
+        'files': [
+          '<(PRODUCT_DIR)/lib/libmalloc_wrapper.so',
+        ],
+      },
+    }],
+    ['OS=="mac" and asan==1 and fastbuild==0', {
+      'variables': {
+        'files': [
+          '<(PRODUCT_DIR)/base_unittests.dSYM/',
+        ],
+      },
+    }],
+    ['OS=="win" and (fastbuild==0 or fastbuild==1)', {
+      'variables': {
+        'files': [
+          '<(PRODUCT_DIR)/base_unittests.exe.pdb',
+        ],
+      },
+    }],
+  ],
+  'includes': [
+    'base.isolate',
+  ],
+}
diff --git a/base/bind.h b/base/bind.h
index ce71797..9cf65b6 100644
--- a/base/bind.h
+++ b/base/bind.h
@@ -11,7 +11,7 @@
 // Usage documentation
 // -----------------------------------------------------------------------------
 //
-// See //docs/callback.md for documentation.
+// See base/callback.h for documentation.
 //
 //
 // -----------------------------------------------------------------------------
@@ -24,58 +24,18 @@
 
 namespace base {
 
-// Bind as OnceCallback.
 template <typename Functor, typename... Args>
-inline OnceCallback<MakeUnboundRunType<Functor, Args...>>
-BindOnce(Functor&& functor, Args&&... args) {
+inline base::Callback<MakeUnboundRunType<Functor, Args...>> Bind(
+    Functor&& functor,
+    Args&&... args) {
   using BindState = internal::MakeBindStateType<Functor, Args...>;
   using UnboundRunType = MakeUnboundRunType<Functor, Args...>;
   using Invoker = internal::Invoker<BindState, UnboundRunType>;
-  using CallbackType = OnceCallback<UnboundRunType>;
 
-  // Store the invoke func into PolymorphicInvoke before casting it to
-  // InvokeFuncStorage, so that we can ensure its type matches to
-  // PolymorphicInvoke, to which CallbackType will cast back.
-  using PolymorphicInvoke = typename CallbackType::PolymorphicInvoke;
-  PolymorphicInvoke invoke_func = &Invoker::RunOnce;
-
-  using InvokeFuncStorage = internal::BindStateBase::InvokeFuncStorage;
-  return CallbackType(new BindState(
-      reinterpret_cast<InvokeFuncStorage>(invoke_func),
-      std::forward<Functor>(functor),
-      std::forward<Args>(args)...));
-}
-
-// Bind as RepeatingCallback.
-template <typename Functor, typename... Args>
-inline RepeatingCallback<MakeUnboundRunType<Functor, Args...>>
-BindRepeating(Functor&& functor, Args&&... args) {
-  using BindState = internal::MakeBindStateType<Functor, Args...>;
-  using UnboundRunType = MakeUnboundRunType<Functor, Args...>;
-  using Invoker = internal::Invoker<BindState, UnboundRunType>;
-  using CallbackType = RepeatingCallback<UnboundRunType>;
-
-  // Store the invoke func into PolymorphicInvoke before casting it to
-  // InvokeFuncStorage, so that we can ensure its type matches to
-  // PolymorphicInvoke, to which CallbackType will cast back.
-  using PolymorphicInvoke = typename CallbackType::PolymorphicInvoke;
-  PolymorphicInvoke invoke_func = &Invoker::Run;
-
-  using InvokeFuncStorage = internal::BindStateBase::InvokeFuncStorage;
-  return CallbackType(new BindState(
-      reinterpret_cast<InvokeFuncStorage>(invoke_func),
-      std::forward<Functor>(functor),
-      std::forward<Args>(args)...));
-}
-
-// Unannotated Bind.
-// TODO(tzik): Deprecate this and migrate to OnceCallback and
-// RepeatingCallback, once they get ready.
-template <typename Functor, typename... Args>
-inline Callback<MakeUnboundRunType<Functor, Args...>>
-Bind(Functor&& functor, Args&&... args) {
-  return BindRepeating(std::forward<Functor>(functor),
-                       std::forward<Args>(args)...);
+  using CallbackType = Callback<UnboundRunType>;
+  return CallbackType(new BindState(std::forward<Functor>(functor),
+                                    std::forward<Args>(args)...),
+                      &Invoker::Run);
 }
 
 }  // namespace base
diff --git a/base/bind_helpers.h b/base/bind_helpers.h
index 7b3d7d3..93d02e3 100644
--- a/base/bind_helpers.h
+++ b/base/bind_helpers.h
@@ -21,7 +21,7 @@
 // Owned() transfers ownership of an object to the Callback resulting from
 // bind; the object will be deleted when the Callback is deleted.
 //
-// Passed() is for transferring movable-but-not-copyable types (eg. unique_ptr)
+// Passed() is for transferring movable-but-not-copyable types (eg. scoped_ptr)
 // through a Callback. Logically, this signifies a destructive transfer of
 // the state of the argument into the target function.  Invoking
 // Callback::Run() twice on a Callback that was created with a Passed()
@@ -174,14 +174,8 @@
 template <typename T>
 struct IsWeakReceiver;
 
-template <typename>
-struct BindUnwrapTraits;
-
 namespace internal {
 
-template <typename Functor, typename SFINAE = void>
-struct FunctorTraits;
-
 template <typename T>
 class UnretainedWrapper {
  public:
@@ -281,12 +275,35 @@
   mutable T scoper_;
 };
 
+// Unwrap the stored parameters for the wrappers above.
 template <typename T>
-using Unwrapper = BindUnwrapTraits<typename std::decay<T>::type>;
+T&& Unwrap(T&& o) {
+  return std::forward<T>(o);
+}
 
 template <typename T>
-auto Unwrap(T&& o) -> decltype(Unwrapper<T>::Unwrap(std::forward<T>(o))) {
-  return Unwrapper<T>::Unwrap(std::forward<T>(o));
+T* Unwrap(const UnretainedWrapper<T>& unretained) {
+  return unretained.get();
+}
+
+template <typename T>
+const T& Unwrap(const ConstRefWrapper<T>& const_ref) {
+  return const_ref.get();
+}
+
+template <typename T>
+T* Unwrap(const RetainedRefWrapper<T>& o) {
+  return o.get();
+}
+
+template <typename T>
+T* Unwrap(const OwnedWrapper<T>& o) {
+  return o.get();
+}
+
+template <typename T>
+T Unwrap(const PassedWrapper<T>& o) {
+  return o.Take();
 }
 
 // IsWeakMethod is a helper that determine if we are binding a WeakPtr<> to a
@@ -480,92 +497,6 @@
 template <typename T>
 struct IsWeakReceiver<WeakPtr<T>> : std::true_type {};
 
-// An injection point to control how bound objects passed to the target
-// function. BindUnwrapTraits<>::Unwrap() is called for each bound objects right
-// before the target function is invoked.
-template <typename>
-struct BindUnwrapTraits {
-  template <typename T>
-  static T&& Unwrap(T&& o) { return std::forward<T>(o); }
-};
-
-template <typename T>
-struct BindUnwrapTraits<internal::UnretainedWrapper<T>> {
-  static T* Unwrap(const internal::UnretainedWrapper<T>& o) {
-    return o.get();
-  }
-};
-
-template <typename T>
-struct BindUnwrapTraits<internal::ConstRefWrapper<T>> {
-  static const T& Unwrap(const internal::ConstRefWrapper<T>& o) {
-    return o.get();
-  }
-};
-
-template <typename T>
-struct BindUnwrapTraits<internal::RetainedRefWrapper<T>> {
-  static T* Unwrap(const internal::RetainedRefWrapper<T>& o) {
-    return o.get();
-  }
-};
-
-template <typename T>
-struct BindUnwrapTraits<internal::OwnedWrapper<T>> {
-  static T* Unwrap(const internal::OwnedWrapper<T>& o) {
-    return o.get();
-  }
-};
-
-template <typename T>
-struct BindUnwrapTraits<internal::PassedWrapper<T>> {
-  static T Unwrap(const internal::PassedWrapper<T>& o) {
-    return o.Take();
-  }
-};
-
-// CallbackCancellationTraits allows customization of Callback's cancellation
-// semantics. By default, callbacks are not cancellable. A specialization should
-// set is_cancellable = true and implement an IsCancelled() that returns if the
-// callback should be cancelled.
-template <typename Functor, typename BoundArgsTuple, typename SFINAE = void>
-struct CallbackCancellationTraits {
-  static constexpr bool is_cancellable = false;
-};
-
-// Specialization for method bound to weak pointer receiver.
-template <typename Functor, typename... BoundArgs>
-struct CallbackCancellationTraits<
-    Functor,
-    std::tuple<BoundArgs...>,
-    typename std::enable_if<
-        internal::IsWeakMethod<internal::FunctorTraits<Functor>::is_method,
-                               BoundArgs...>::value>::type> {
-  static constexpr bool is_cancellable = true;
-
-  template <typename Receiver, typename... Args>
-  static bool IsCancelled(const Functor&,
-                          const Receiver& receiver,
-                          const Args&...) {
-    return !receiver;
-  }
-};
-
-// Specialization for a nested bind.
-template <typename Signature,
-          typename... BoundArgs,
-          internal::CopyMode copy_mode,
-          internal::RepeatMode repeat_mode>
-struct CallbackCancellationTraits<Callback<Signature, copy_mode, repeat_mode>,
-                                  std::tuple<BoundArgs...>> {
-  static constexpr bool is_cancellable = true;
-
-  template <typename Functor>
-  static bool IsCancelled(const Functor& functor, const BoundArgs&...) {
-    return functor.IsCancelled();
-  }
-};
-
 }  // namespace base
 
 #endif  // BASE_BIND_HELPERS_H_
diff --git a/base/bind_internal.h b/base/bind_internal.h
index 8988bdc..3d6ca09 100644
--- a/base/bind_internal.h
+++ b/base/bind_internal.h
@@ -130,7 +130,7 @@
 // FunctorTraits<>
 //
 // See description at top of file.
-template <typename Functor, typename SFINAE>
+template <typename Functor, typename SFINAE = void>
 struct FunctorTraits;
 
 // For a callable type that is convertible to the corresponding function type.
@@ -244,16 +244,14 @@
   template <typename IgnoreResultType, typename... RunArgs>
   static void Invoke(IgnoreResultType&& ignore_result_helper,
                      RunArgs&&... args) {
-    FunctorTraits<T>::Invoke(
-        std::forward<IgnoreResultType>(ignore_result_helper).functor_,
-        std::forward<RunArgs>(args)...);
+    FunctorTraits<T>::Invoke(ignore_result_helper.functor_,
+                             std::forward<RunArgs>(args)...);
   }
 };
 
 // For Callbacks.
-template <typename R, typename... Args,
-          CopyMode copy_mode, RepeatMode repeat_mode>
-struct FunctorTraits<Callback<R(Args...), copy_mode, repeat_mode>> {
+template <typename R, typename... Args, CopyMode copy_mode>
+struct FunctorTraits<Callback<R(Args...), copy_mode>> {
   using RunType = R(Args...);
   static constexpr bool is_method = false;
   static constexpr bool is_nullable = true;
@@ -316,19 +314,6 @@
 
 template <typename StorageType, typename R, typename... UnboundArgs>
 struct Invoker<StorageType, R(UnboundArgs...)> {
-  static R RunOnce(BindStateBase* base, UnboundArgs&&... unbound_args) {
-    // Local references to make debugger stepping easier. If in a debugger,
-    // you really want to warp ahead and step through the
-    // InvokeHelper<>::MakeItSo() call below.
-    StorageType* storage = static_cast<StorageType*>(base);
-    static constexpr size_t num_bound_args =
-        std::tuple_size<decltype(storage->bound_args_)>::value;
-    return RunImpl(std::move(storage->functor_),
-                   std::move(storage->bound_args_),
-                   MakeIndexSequence<num_bound_args>(),
-                   std::forward<UnboundArgs>(unbound_args)...);
-  }
-
   static R Run(BindStateBase* base, UnboundArgs&&... unbound_args) {
     // Local references to make debugger stepping easier. If in a debugger,
     // you really want to warp ahead and step through the
@@ -387,102 +372,27 @@
   return false;
 }
 
-// Used by ApplyCancellationTraits below.
-template <typename Functor, typename BoundArgsTuple, size_t... indices>
-bool ApplyCancellationTraitsImpl(const Functor& functor,
-                                 const BoundArgsTuple& bound_args,
-                                 IndexSequence<indices...>) {
-  return CallbackCancellationTraits<Functor, BoundArgsTuple>::IsCancelled(
-      functor, base::get<indices>(bound_args)...);
-}
-
-// Relays |base| to corresponding CallbackCancellationTraits<>::Run(). Returns
-// true if the callback |base| represents is canceled.
-template <typename BindStateType>
-bool ApplyCancellationTraits(const BindStateBase* base) {
-  const BindStateType* storage = static_cast<const BindStateType*>(base);
-  static constexpr size_t num_bound_args =
-      std::tuple_size<decltype(storage->bound_args_)>::value;
-  return ApplyCancellationTraitsImpl(storage->functor_, storage->bound_args_,
-                                     MakeIndexSequence<num_bound_args>());
-};
-
-// Template helpers to detect using Bind() on a base::Callback without any
-// additional arguments. In that case, the original base::Callback object should
-// just be directly used.
-template <typename Functor, typename... BoundArgs>
-struct BindingCallbackWithNoArgs {
-  static constexpr bool value = false;
-};
-
-template <typename Signature,
-          typename... BoundArgs,
-          CopyMode copy_mode,
-          RepeatMode repeat_mode>
-struct BindingCallbackWithNoArgs<Callback<Signature, copy_mode, repeat_mode>,
-                                 BoundArgs...> {
-  static constexpr bool value = sizeof...(BoundArgs) == 0;
-};
-
 // BindState<>
 //
 // This stores all the state passed into Bind().
 template <typename Functor, typename... BoundArgs>
 struct BindState final : BindStateBase {
-  using IsCancellable = std::integral_constant<
-      bool,
-      CallbackCancellationTraits<Functor,
-                                 std::tuple<BoundArgs...>>::is_cancellable>;
-
   template <typename ForwardFunctor, typename... ForwardBoundArgs>
-  explicit BindState(BindStateBase::InvokeFuncStorage invoke_func,
-                     ForwardFunctor&& functor,
-                     ForwardBoundArgs&&... bound_args)
-      // IsCancellable is std::false_type if
-      // CallbackCancellationTraits<>::IsCancelled returns always false.
-      // Otherwise, it's std::true_type.
-      : BindState(IsCancellable{},
-                  invoke_func,
-                  std::forward<ForwardFunctor>(functor),
-                  std::forward<ForwardBoundArgs>(bound_args)...) {
-    static_assert(!BindingCallbackWithNoArgs<Functor, BoundArgs...>::value,
-                  "Attempting to bind a base::Callback with no additional "
-                  "arguments: save a heap allocation and use the original "
-                  "base::Callback object");
+  explicit BindState(ForwardFunctor&& functor, ForwardBoundArgs&&... bound_args)
+      : BindStateBase(&Destroy),
+      functor_(std::forward<ForwardFunctor>(functor)),
+        bound_args_(std::forward<ForwardBoundArgs>(bound_args)...) {
+    DCHECK(!IsNull(functor_));
   }
 
   Functor functor_;
   std::tuple<BoundArgs...> bound_args_;
 
  private:
-  template <typename ForwardFunctor, typename... ForwardBoundArgs>
-  explicit BindState(std::true_type,
-                     BindStateBase::InvokeFuncStorage invoke_func,
-                     ForwardFunctor&& functor,
-                     ForwardBoundArgs&&... bound_args)
-      : BindStateBase(invoke_func,
-                      &Destroy,
-                      &ApplyCancellationTraits<BindState>),
-        functor_(std::forward<ForwardFunctor>(functor)),
-        bound_args_(std::forward<ForwardBoundArgs>(bound_args)...) {
-    DCHECK(!IsNull(functor_));
-  }
-
-  template <typename ForwardFunctor, typename... ForwardBoundArgs>
-  explicit BindState(std::false_type,
-                     BindStateBase::InvokeFuncStorage invoke_func,
-                     ForwardFunctor&& functor,
-                     ForwardBoundArgs&&... bound_args)
-      : BindStateBase(invoke_func, &Destroy),
-        functor_(std::forward<ForwardFunctor>(functor)),
-        bound_args_(std::forward<ForwardBoundArgs>(bound_args)...) {
-    DCHECK(!IsNull(functor_));
-  }
-
   ~BindState() {}
 
-  static void Destroy(const BindStateBase* self) {
-    delete static_cast<const BindState*>(self);
+  static void Destroy(BindStateBase* self) {
+    delete static_cast<BindState*>(self);
   }
 };
 
diff --git a/base/bind_unittest.cc b/base/bind_unittest.cc
index a9ca9d2..ba5113b 100644
--- a/base/bind_unittest.cc
+++ b/base/bind_unittest.cc
@@ -13,14 +13,11 @@
 #include "base/memory/ptr_util.h"
 #include "base/memory/ref_counted.h"
 #include "base/memory/weak_ptr.h"
-#include "base/test/gtest_util.h"
 #include "build/build_config.h"
 #include "testing/gmock/include/gmock/gmock.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
-using ::testing::_;
 using ::testing::Mock;
-using ::testing::ByMove;
 using ::testing::Return;
 using ::testing::StrictMock;
 
@@ -39,9 +36,6 @@
   MOCK_METHOD0(IntMethod0, int());
   MOCK_CONST_METHOD0(IntConstMethod0, int());
 
-  MOCK_METHOD1(VoidMethodWithIntArg, void(int));
-  MOCK_METHOD0(UniquePtrMethod0, std::unique_ptr<int>());
-
  private:
   // Particularly important in this test to ensure no copies are made.
   DISALLOW_COPY_AND_ASSIGN(NoRef);
@@ -351,28 +345,53 @@
 };
 
 StrictMock<NoRef>* BindTest::static_func_mock_ptr;
-StrictMock<NoRef>* g_func_mock_ptr;
 
-void VoidFunc0() {
-  g_func_mock_ptr->VoidMethod0();
+// Sanity check that we can instantiate a callback for each arity.
+TEST_F(BindTest, ArityTest) {
+  Callback<int()> c0 = Bind(&Sum, 32, 16, 8, 4, 2, 1);
+  EXPECT_EQ(63, c0.Run());
+
+  Callback<int(int)> c1 = Bind(&Sum, 32, 16, 8, 4, 2);
+  EXPECT_EQ(75, c1.Run(13));
+
+  Callback<int(int,int)> c2 = Bind(&Sum, 32, 16, 8, 4);
+  EXPECT_EQ(85, c2.Run(13, 12));
+
+  Callback<int(int,int,int)> c3 = Bind(&Sum, 32, 16, 8);
+  EXPECT_EQ(92, c3.Run(13, 12, 11));
+
+  Callback<int(int,int,int,int)> c4 = Bind(&Sum, 32, 16);
+  EXPECT_EQ(94, c4.Run(13, 12, 11, 10));
+
+  Callback<int(int,int,int,int,int)> c5 = Bind(&Sum, 32);
+  EXPECT_EQ(87, c5.Run(13, 12, 11, 10, 9));
+
+  Callback<int(int,int,int,int,int,int)> c6 = Bind(&Sum);
+  EXPECT_EQ(69, c6.Run(13, 12, 11, 10, 9, 14));
 }
 
-int IntFunc0() {
-  return g_func_mock_ptr->IntMethod0();
-}
+// Test the Currying ability of the Callback system.
+TEST_F(BindTest, CurryingTest) {
+  Callback<int(int,int,int,int,int,int)> c6 = Bind(&Sum);
+  EXPECT_EQ(69, c6.Run(13, 12, 11, 10, 9, 14));
 
-TEST_F(BindTest, BasicTest) {
-  Callback<int(int, int, int)> cb = Bind(&Sum, 32, 16, 8);
-  EXPECT_EQ(92, cb.Run(13, 12, 11));
+  Callback<int(int,int,int,int,int)> c5 = Bind(c6, 32);
+  EXPECT_EQ(87, c5.Run(13, 12, 11, 10, 9));
 
-  Callback<int(int, int, int, int, int, int)> c1 = Bind(&Sum);
-  EXPECT_EQ(69, c1.Run(14, 13, 12, 11, 10, 9));
+  Callback<int(int,int,int,int)> c4 = Bind(c5, 16);
+  EXPECT_EQ(94, c4.Run(13, 12, 11, 10));
 
-  Callback<int(int, int, int)> c2 = Bind(c1, 32, 16, 8);
-  EXPECT_EQ(86, c2.Run(11, 10, 9));
+  Callback<int(int,int,int)> c3 = Bind(c4, 8);
+  EXPECT_EQ(92, c3.Run(13, 12, 11));
 
-  Callback<int()> c3 = Bind(c2, 4, 2, 1);
-  EXPECT_EQ(63, c3.Run());
+  Callback<int(int,int)> c2 = Bind(c3, 4);
+  EXPECT_EQ(85, c2.Run(13, 12));
+
+  Callback<int(int)> c1 = Bind(c2, 2);
+  EXPECT_EQ(75, c1.Run(13));
+
+  Callback<int()> c0 = Bind(c1, 1);
+  EXPECT_EQ(63, c0.Run());
 }
 
 // Test that currying the rvalue result of another Bind() works correctly.
@@ -380,8 +399,7 @@
 //   - multiple runs of resulting Callback remain valid.
 TEST_F(BindTest, CurryingRvalueResultOfBind) {
   int n = 0;
-  RepeatingClosure cb = BindRepeating(&TakesACallback,
-                                      BindRepeating(&PtrArgSet, &n));
+  Closure cb = base::Bind(&TakesACallback, base::Bind(&PtrArgSet, &n));
 
   // If we implement Bind() such that the return value has auto_ptr-like
   // semantics, the second call here will fail because ownership of
@@ -395,388 +413,6 @@
   EXPECT_EQ(2, n);
 }
 
-TEST_F(BindTest, RepeatingCallbackBasicTest) {
-  RepeatingCallback<int(int)> c0 = BindRepeating(&Sum, 1, 2, 4, 8, 16);
-
-  // RepeatingCallback can run via a lvalue-reference.
-  EXPECT_EQ(63, c0.Run(32));
-
-  // It is valid to call a RepeatingCallback more than once.
-  EXPECT_EQ(54, c0.Run(23));
-
-  // BindRepeating can handle a RepeatingCallback as the target functor.
-  RepeatingCallback<int()> c1 = BindRepeating(c0, 11);
-
-  // RepeatingCallback can run via a rvalue-reference.
-  EXPECT_EQ(42, std::move(c1).Run());
-
-  // BindRepeating can handle a rvalue-reference of RepeatingCallback.
-  EXPECT_EQ(32, BindRepeating(std::move(c0), 1).Run());
-}
-
-TEST_F(BindTest, OnceCallbackBasicTest) {
-  OnceCallback<int(int)> c0 = BindOnce(&Sum, 1, 2, 4, 8, 16);
-
-  // OnceCallback can run via a rvalue-reference.
-  EXPECT_EQ(63, std::move(c0).Run(32));
-
-  // After running via the rvalue-reference, the value of the OnceCallback
-  // is undefined. The implementation simply clears the instance after the
-  // invocation.
-  EXPECT_TRUE(c0.is_null());
-
-  c0 = BindOnce(&Sum, 2, 3, 5, 7, 11);
-
-  // BindOnce can handle a rvalue-reference of OnceCallback as the target
-  // functor.
-  OnceCallback<int()> c1 = BindOnce(std::move(c0), 13);
-  EXPECT_EQ(41, std::move(c1).Run());
-
-  RepeatingCallback<int(int)> c2 = BindRepeating(&Sum, 2, 3, 5, 7, 11);
-  EXPECT_EQ(41, BindOnce(c2, 13).Run());
-}
-
-// IgnoreResult adapter test.
-//   - Function with return value.
-//   - Method with return value.
-//   - Const Method with return.
-//   - Method with return value bound to WeakPtr<>.
-//   - Const Method with return bound to WeakPtr<>.
-TEST_F(BindTest, IgnoreResultForRepeating) {
-  EXPECT_CALL(static_func_mock_, IntMethod0()).WillOnce(Return(1337));
-  EXPECT_CALL(has_ref_, AddRef()).Times(2);
-  EXPECT_CALL(has_ref_, Release()).Times(2);
-  EXPECT_CALL(has_ref_, IntMethod0()).WillOnce(Return(10));
-  EXPECT_CALL(has_ref_, IntConstMethod0()).WillOnce(Return(11));
-  EXPECT_CALL(no_ref_, IntMethod0()).WillOnce(Return(12));
-  EXPECT_CALL(no_ref_, IntConstMethod0()).WillOnce(Return(13));
-
-  RepeatingClosure normal_func_cb = BindRepeating(IgnoreResult(&IntFunc0));
-  normal_func_cb.Run();
-
-  RepeatingClosure non_void_method_cb =
-      BindRepeating(IgnoreResult(&HasRef::IntMethod0), &has_ref_);
-  non_void_method_cb.Run();
-
-  RepeatingClosure non_void_const_method_cb =
-      BindRepeating(IgnoreResult(&HasRef::IntConstMethod0), &has_ref_);
-  non_void_const_method_cb.Run();
-
-  WeakPtrFactory<NoRef> weak_factory(&no_ref_);
-  WeakPtrFactory<const NoRef> const_weak_factory(const_no_ref_ptr_);
-
-  RepeatingClosure non_void_weak_method_cb  =
-      BindRepeating(IgnoreResult(&NoRef::IntMethod0),
-                    weak_factory.GetWeakPtr());
-  non_void_weak_method_cb.Run();
-
-  RepeatingClosure non_void_weak_const_method_cb =
-      BindRepeating(IgnoreResult(&NoRef::IntConstMethod0),
-                    weak_factory.GetWeakPtr());
-  non_void_weak_const_method_cb.Run();
-
-  weak_factory.InvalidateWeakPtrs();
-  non_void_weak_const_method_cb.Run();
-  non_void_weak_method_cb.Run();
-}
-
-TEST_F(BindTest, IgnoreResultForOnce) {
-  EXPECT_CALL(static_func_mock_, IntMethod0()).WillOnce(Return(1337));
-  EXPECT_CALL(has_ref_, AddRef()).Times(2);
-  EXPECT_CALL(has_ref_, Release()).Times(2);
-  EXPECT_CALL(has_ref_, IntMethod0()).WillOnce(Return(10));
-  EXPECT_CALL(has_ref_, IntConstMethod0()).WillOnce(Return(11));
-
-  OnceClosure normal_func_cb = BindOnce(IgnoreResult(&IntFunc0));
-  std::move(normal_func_cb).Run();
-
-  OnceClosure non_void_method_cb =
-      BindOnce(IgnoreResult(&HasRef::IntMethod0), &has_ref_);
-  std::move(non_void_method_cb).Run();
-
-  OnceClosure non_void_const_method_cb =
-      BindOnce(IgnoreResult(&HasRef::IntConstMethod0), &has_ref_);
-  std::move(non_void_const_method_cb).Run();
-
-  WeakPtrFactory<NoRef> weak_factory(&no_ref_);
-  WeakPtrFactory<const NoRef> const_weak_factory(const_no_ref_ptr_);
-
-  OnceClosure non_void_weak_method_cb  =
-      BindOnce(IgnoreResult(&NoRef::IntMethod0),
-                  weak_factory.GetWeakPtr());
-  OnceClosure non_void_weak_const_method_cb =
-      BindOnce(IgnoreResult(&NoRef::IntConstMethod0),
-                  weak_factory.GetWeakPtr());
-
-  weak_factory.InvalidateWeakPtrs();
-  std::move(non_void_weak_const_method_cb).Run();
-  std::move(non_void_weak_method_cb).Run();
-}
-
-// Functions that take reference parameters.
-//  - Forced reference parameter type still stores a copy.
-//  - Forced const reference parameter type still stores a copy.
-TEST_F(BindTest, ReferenceArgumentBindingForRepeating) {
-  int n = 1;
-  int& ref_n = n;
-  const int& const_ref_n = n;
-
-  RepeatingCallback<int()> ref_copies_cb = BindRepeating(&Identity, ref_n);
-  EXPECT_EQ(n, ref_copies_cb.Run());
-  n++;
-  EXPECT_EQ(n - 1, ref_copies_cb.Run());
-
-  RepeatingCallback<int()> const_ref_copies_cb =
-      BindRepeating(&Identity, const_ref_n);
-  EXPECT_EQ(n, const_ref_copies_cb.Run());
-  n++;
-  EXPECT_EQ(n - 1, const_ref_copies_cb.Run());
-}
-
-TEST_F(BindTest, ReferenceArgumentBindingForOnce) {
-  int n = 1;
-  int& ref_n = n;
-  const int& const_ref_n = n;
-
-  OnceCallback<int()> ref_copies_cb = BindOnce(&Identity, ref_n);
-  n++;
-  EXPECT_EQ(n - 1, std::move(ref_copies_cb).Run());
-
-  OnceCallback<int()> const_ref_copies_cb =
-      BindOnce(&Identity, const_ref_n);
-  n++;
-  EXPECT_EQ(n - 1, std::move(const_ref_copies_cb).Run());
-}
-
-// Check that we can pass in arrays and have them be stored as a pointer.
-//  - Array of values stores a pointer.
-//  - Array of const values stores a pointer.
-TEST_F(BindTest, ArrayArgumentBindingForRepeating) {
-  int array[4] = {1, 1, 1, 1};
-  const int (*const_array_ptr)[4] = &array;
-
-  RepeatingCallback<int()> array_cb = BindRepeating(&ArrayGet, array, 1);
-  EXPECT_EQ(1, array_cb.Run());
-
-  RepeatingCallback<int()> const_array_cb =
-      BindRepeating(&ArrayGet, *const_array_ptr, 1);
-  EXPECT_EQ(1, const_array_cb.Run());
-
-  array[1] = 3;
-  EXPECT_EQ(3, array_cb.Run());
-  EXPECT_EQ(3, const_array_cb.Run());
-}
-
-TEST_F(BindTest, ArrayArgumentBindingForOnce) {
-  int array[4] = {1, 1, 1, 1};
-  const int (*const_array_ptr)[4] = &array;
-
-  OnceCallback<int()> array_cb = BindOnce(&ArrayGet, array, 1);
-  OnceCallback<int()> const_array_cb =
-      BindOnce(&ArrayGet, *const_array_ptr, 1);
-
-  array[1] = 3;
-  EXPECT_EQ(3, std::move(array_cb).Run());
-  EXPECT_EQ(3, std::move(const_array_cb).Run());
-}
-
-// WeakPtr() support.
-//   - Method bound to WeakPtr<> to non-const object.
-//   - Const method bound to WeakPtr<> to non-const object.
-//   - Const method bound to WeakPtr<> to const object.
-//   - Normal Function with WeakPtr<> as P1 can have return type and is
-//     not canceled.
-TEST_F(BindTest, WeakPtrForRepeating) {
-  EXPECT_CALL(no_ref_, VoidMethod0());
-  EXPECT_CALL(no_ref_, VoidConstMethod0()).Times(2);
-
-  WeakPtrFactory<NoRef> weak_factory(&no_ref_);
-  WeakPtrFactory<const NoRef> const_weak_factory(const_no_ref_ptr_);
-
-  RepeatingClosure method_cb =
-      BindRepeating(&NoRef::VoidMethod0, weak_factory.GetWeakPtr());
-  method_cb.Run();
-
-  RepeatingClosure const_method_cb =
-      BindRepeating(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
-  const_method_cb.Run();
-
-  RepeatingClosure const_method_const_ptr_cb =
-      BindRepeating(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
-  const_method_const_ptr_cb.Run();
-
-  RepeatingCallback<int(int)> normal_func_cb =
-      BindRepeating(&FunctionWithWeakFirstParam, weak_factory.GetWeakPtr());
-  EXPECT_EQ(1, normal_func_cb.Run(1));
-
-  weak_factory.InvalidateWeakPtrs();
-  const_weak_factory.InvalidateWeakPtrs();
-
-  method_cb.Run();
-  const_method_cb.Run();
-  const_method_const_ptr_cb.Run();
-
-  // Still runs even after the pointers are invalidated.
-  EXPECT_EQ(2, normal_func_cb.Run(2));
-}
-
-TEST_F(BindTest, WeakPtrForOnce) {
-  WeakPtrFactory<NoRef> weak_factory(&no_ref_);
-  WeakPtrFactory<const NoRef> const_weak_factory(const_no_ref_ptr_);
-
-  OnceClosure method_cb =
-      BindOnce(&NoRef::VoidMethod0, weak_factory.GetWeakPtr());
-  OnceClosure const_method_cb =
-      BindOnce(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
-  OnceClosure const_method_const_ptr_cb =
-      BindOnce(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
-  Callback<int(int)> normal_func_cb =
-      Bind(&FunctionWithWeakFirstParam, weak_factory.GetWeakPtr());
-
-  weak_factory.InvalidateWeakPtrs();
-  const_weak_factory.InvalidateWeakPtrs();
-
-  std::move(method_cb).Run();
-  std::move(const_method_cb).Run();
-  std::move(const_method_const_ptr_cb).Run();
-
-  // Still runs even after the pointers are invalidated.
-  EXPECT_EQ(2, std::move(normal_func_cb).Run(2));
-}
-
-// ConstRef() wrapper support.
-//   - Binding w/o ConstRef takes a copy.
-//   - Binding a ConstRef takes a reference.
-//   - Binding ConstRef to a function ConstRef does not copy on invoke.
-TEST_F(BindTest, ConstRefForRepeating) {
-  int n = 1;
-
-  RepeatingCallback<int()> copy_cb = BindRepeating(&Identity, n);
-  RepeatingCallback<int()> const_ref_cb = BindRepeating(&Identity, ConstRef(n));
-  EXPECT_EQ(n, copy_cb.Run());
-  EXPECT_EQ(n, const_ref_cb.Run());
-  n++;
-  EXPECT_EQ(n - 1, copy_cb.Run());
-  EXPECT_EQ(n, const_ref_cb.Run());
-
-  int copies = 0;
-  int assigns = 0;
-  int move_constructs = 0;
-  int move_assigns = 0;
-  CopyMoveCounter counter(&copies, &assigns, &move_constructs, &move_assigns);
-  RepeatingCallback<int()> all_const_ref_cb =
-      BindRepeating(&GetCopies, ConstRef(counter));
-  EXPECT_EQ(0, all_const_ref_cb.Run());
-  EXPECT_EQ(0, copies);
-  EXPECT_EQ(0, assigns);
-  EXPECT_EQ(0, move_constructs);
-  EXPECT_EQ(0, move_assigns);
-}
-
-TEST_F(BindTest, ConstRefForOnce) {
-  int n = 1;
-
-  OnceCallback<int()> copy_cb = BindOnce(&Identity, n);
-  OnceCallback<int()> const_ref_cb = BindOnce(&Identity, ConstRef(n));
-  n++;
-  EXPECT_EQ(n - 1, std::move(copy_cb).Run());
-  EXPECT_EQ(n, std::move(const_ref_cb).Run());
-
-  int copies = 0;
-  int assigns = 0;
-  int move_constructs = 0;
-  int move_assigns = 0;
-  CopyMoveCounter counter(&copies, &assigns, &move_constructs, &move_assigns);
-  OnceCallback<int()> all_const_ref_cb =
-      BindOnce(&GetCopies, ConstRef(counter));
-  EXPECT_EQ(0, std::move(all_const_ref_cb).Run());
-  EXPECT_EQ(0, copies);
-  EXPECT_EQ(0, assigns);
-  EXPECT_EQ(0, move_constructs);
-  EXPECT_EQ(0, move_assigns);
-}
-
-// Test Owned() support.
-TEST_F(BindTest, OwnedForRepeating) {
-  int deletes = 0;
-  DeleteCounter* counter = new DeleteCounter(&deletes);
-
-  // If we don't capture, delete happens on Callback destruction/reset.
-  // return the same value.
-  RepeatingCallback<DeleteCounter*()> no_capture_cb =
-      BindRepeating(&PolymorphicIdentity<DeleteCounter*>, Owned(counter));
-  ASSERT_EQ(counter, no_capture_cb.Run());
-  ASSERT_EQ(counter, no_capture_cb.Run());
-  EXPECT_EQ(0, deletes);
-  no_capture_cb.Reset();  // This should trigger a delete.
-  EXPECT_EQ(1, deletes);
-
-  deletes = 0;
-  counter = new DeleteCounter(&deletes);
-  RepeatingClosure own_object_cb =
-      BindRepeating(&DeleteCounter::VoidMethod0, Owned(counter));
-  own_object_cb.Run();
-  EXPECT_EQ(0, deletes);
-  own_object_cb.Reset();
-  EXPECT_EQ(1, deletes);
-}
-
-TEST_F(BindTest, OwnedForOnce) {
-  int deletes = 0;
-  DeleteCounter* counter = new DeleteCounter(&deletes);
-
-  // If we don't capture, delete happens on Callback destruction/reset.
-  // return the same value.
-  OnceCallback<DeleteCounter*()> no_capture_cb =
-      BindOnce(&PolymorphicIdentity<DeleteCounter*>, Owned(counter));
-  EXPECT_EQ(0, deletes);
-  no_capture_cb.Reset();  // This should trigger a delete.
-  EXPECT_EQ(1, deletes);
-
-  deletes = 0;
-  counter = new DeleteCounter(&deletes);
-  OnceClosure own_object_cb =
-      BindOnce(&DeleteCounter::VoidMethod0, Owned(counter));
-  EXPECT_EQ(0, deletes);
-  own_object_cb.Reset();
-  EXPECT_EQ(1, deletes);
-}
-
-template <typename T>
-class BindVariantsTest : public ::testing::Test {
-};
-
-struct RepeatingTestConfig {
-  template <typename Signature>
-  using CallbackType = RepeatingCallback<Signature>;
-  using ClosureType = RepeatingClosure;
-
-  template <typename F, typename... Args>
-  static CallbackType<MakeUnboundRunType<F, Args...>>
-  Bind(F&& f, Args&&... args) {
-    return BindRepeating(std::forward<F>(f), std::forward<Args>(args)...);
-  }
-};
-
-struct OnceTestConfig {
-  template <typename Signature>
-  using CallbackType = OnceCallback<Signature>;
-  using ClosureType = OnceClosure;
-
-  template <typename F, typename... Args>
-  static CallbackType<MakeUnboundRunType<F, Args...>>
-  Bind(F&& f, Args&&... args) {
-    return BindOnce(std::forward<F>(f), std::forward<Args>(args)...);
-  }
-};
-
-using BindVariantsTestConfig = ::testing::Types<
-  RepeatingTestConfig, OnceTestConfig>;
-TYPED_TEST_CASE(BindVariantsTest, BindVariantsTestConfig);
-
-template <typename TypeParam, typename Signature>
-using CallbackType = typename TypeParam::template CallbackType<Signature>;
-
 // Function type support.
 //   - Normal function.
 //   - Normal function bound with non-refcounted first argument.
@@ -787,49 +423,40 @@
 //   - Derived classes can be used with pointers to non-virtual base functions.
 //   - Derived classes can be used with pointers to virtual base functions (and
 //     preserve virtual dispatch).
-TYPED_TEST(BindVariantsTest, FunctionTypeSupport) {
-  using ClosureType = typename TypeParam::ClosureType;
+TEST_F(BindTest, FunctionTypeSupport) {
+  EXPECT_CALL(static_func_mock_, VoidMethod0());
+  EXPECT_CALL(has_ref_, AddRef()).Times(4);
+  EXPECT_CALL(has_ref_, Release()).Times(4);
+  EXPECT_CALL(has_ref_, VoidMethod0()).Times(2);
+  EXPECT_CALL(has_ref_, VoidConstMethod0()).Times(2);
 
-  StrictMock<HasRef> has_ref;
-  StrictMock<NoRef> no_ref;
-  StrictMock<NoRef> static_func_mock;
-  const HasRef* const_has_ref_ptr = &has_ref;
-  g_func_mock_ptr = &static_func_mock;
+  Closure normal_cb = Bind(&VoidFunc0);
+  Callback<NoRef*()> normal_non_refcounted_cb =
+      Bind(&PolymorphicIdentity<NoRef*>, &no_ref_);
+  normal_cb.Run();
+  EXPECT_EQ(&no_ref_, normal_non_refcounted_cb.Run());
 
-  EXPECT_CALL(static_func_mock, VoidMethod0());
-  EXPECT_CALL(has_ref, AddRef()).Times(4);
-  EXPECT_CALL(has_ref, Release()).Times(4);
-  EXPECT_CALL(has_ref, VoidMethod0()).Times(2);
-  EXPECT_CALL(has_ref, VoidConstMethod0()).Times(2);
-
-  ClosureType normal_cb = TypeParam::Bind(&VoidFunc0);
-  CallbackType<TypeParam, NoRef*()> normal_non_refcounted_cb =
-      TypeParam::Bind(&PolymorphicIdentity<NoRef*>, &no_ref);
-  std::move(normal_cb).Run();
-  EXPECT_EQ(&no_ref, std::move(normal_non_refcounted_cb).Run());
-
-  ClosureType method_cb = TypeParam::Bind(&HasRef::VoidMethod0, &has_ref);
-  ClosureType method_refptr_cb = TypeParam::Bind(&HasRef::VoidMethod0,
-                                                 make_scoped_refptr(&has_ref));
-  ClosureType const_method_nonconst_obj_cb =
-      TypeParam::Bind(&HasRef::VoidConstMethod0, &has_ref);
-  ClosureType const_method_const_obj_cb =
-      TypeParam::Bind(&HasRef::VoidConstMethod0, const_has_ref_ptr);
-  std::move(method_cb).Run();
-  std::move(method_refptr_cb).Run();
-  std::move(const_method_nonconst_obj_cb).Run();
-  std::move(const_method_const_obj_cb).Run();
+  Closure method_cb = Bind(&HasRef::VoidMethod0, &has_ref_);
+  Closure method_refptr_cb = Bind(&HasRef::VoidMethod0,
+                                  make_scoped_refptr(&has_ref_));
+  Closure const_method_nonconst_obj_cb = Bind(&HasRef::VoidConstMethod0,
+                                              &has_ref_);
+  Closure const_method_const_obj_cb = Bind(&HasRef::VoidConstMethod0,
+                                           const_has_ref_ptr_);
+  method_cb.Run();
+  method_refptr_cb.Run();
+  const_method_nonconst_obj_cb.Run();
+  const_method_const_obj_cb.Run();
 
   Child child;
   child.value = 0;
-  ClosureType virtual_set_cb = TypeParam::Bind(&Parent::VirtualSet, &child);
-  std::move(virtual_set_cb).Run();
+  Closure virtual_set_cb = Bind(&Parent::VirtualSet, &child);
+  virtual_set_cb.Run();
   EXPECT_EQ(kChildValue, child.value);
 
   child.value = 0;
-  ClosureType non_virtual_set_cb =
-      TypeParam::Bind(&Parent::NonVirtualSet, &child);
-  std::move(non_virtual_set_cb).Run();
+  Closure non_virtual_set_cb = Bind(&Parent::NonVirtualSet, &child);
+  non_virtual_set_cb.Run();
   EXPECT_EQ(kParentValue, child.value);
 }
 
@@ -837,37 +464,67 @@
 //   - Function with return value.
 //   - Method with return value.
 //   - Const method with return value.
-//   - Move-only return value.
-TYPED_TEST(BindVariantsTest, ReturnValues) {
-  StrictMock<NoRef> static_func_mock;
-  StrictMock<HasRef> has_ref;
-  g_func_mock_ptr = &static_func_mock;
-  const HasRef* const_has_ref_ptr = &has_ref;
-
-  EXPECT_CALL(static_func_mock, IntMethod0()).WillOnce(Return(1337));
-  EXPECT_CALL(has_ref, AddRef()).Times(4);
-  EXPECT_CALL(has_ref, Release()).Times(4);
-  EXPECT_CALL(has_ref, IntMethod0()).WillOnce(Return(31337));
-  EXPECT_CALL(has_ref, IntConstMethod0())
+TEST_F(BindTest, ReturnValues) {
+  EXPECT_CALL(static_func_mock_, IntMethod0()).WillOnce(Return(1337));
+  EXPECT_CALL(has_ref_, AddRef()).Times(3);
+  EXPECT_CALL(has_ref_, Release()).Times(3);
+  EXPECT_CALL(has_ref_, IntMethod0()).WillOnce(Return(31337));
+  EXPECT_CALL(has_ref_, IntConstMethod0())
       .WillOnce(Return(41337))
       .WillOnce(Return(51337));
-  EXPECT_CALL(has_ref, UniquePtrMethod0())
-      .WillOnce(Return(ByMove(MakeUnique<int>(42))));
 
-  CallbackType<TypeParam, int()> normal_cb = TypeParam::Bind(&IntFunc0);
-  CallbackType<TypeParam, int()> method_cb =
-      TypeParam::Bind(&HasRef::IntMethod0, &has_ref);
-  CallbackType<TypeParam, int()> const_method_nonconst_obj_cb =
-      TypeParam::Bind(&HasRef::IntConstMethod0, &has_ref);
-  CallbackType<TypeParam, int()> const_method_const_obj_cb =
-      TypeParam::Bind(&HasRef::IntConstMethod0, const_has_ref_ptr);
-  CallbackType<TypeParam, std::unique_ptr<int>()> move_only_rv_cb =
-      TypeParam::Bind(&HasRef::UniquePtrMethod0, &has_ref);
-  EXPECT_EQ(1337, std::move(normal_cb).Run());
-  EXPECT_EQ(31337, std::move(method_cb).Run());
-  EXPECT_EQ(41337, std::move(const_method_nonconst_obj_cb).Run());
-  EXPECT_EQ(51337, std::move(const_method_const_obj_cb).Run());
-  EXPECT_EQ(42, *std::move(move_only_rv_cb).Run());
+  Callback<int()> normal_cb = Bind(&IntFunc0);
+  Callback<int()> method_cb = Bind(&HasRef::IntMethod0, &has_ref_);
+  Callback<int()> const_method_nonconst_obj_cb =
+      Bind(&HasRef::IntConstMethod0, &has_ref_);
+  Callback<int()> const_method_const_obj_cb =
+      Bind(&HasRef::IntConstMethod0, const_has_ref_ptr_);
+  EXPECT_EQ(1337, normal_cb.Run());
+  EXPECT_EQ(31337, method_cb.Run());
+  EXPECT_EQ(41337, const_method_nonconst_obj_cb.Run());
+  EXPECT_EQ(51337, const_method_const_obj_cb.Run());
+}
+
+// IgnoreResult adapter test.
+//   - Function with return value.
+//   - Method with return value.
+//   - Const Method with return.
+//   - Method with return value bound to WeakPtr<>.
+//   - Const Method with return bound to WeakPtr<>.
+TEST_F(BindTest, IgnoreResult) {
+  EXPECT_CALL(static_func_mock_, IntMethod0()).WillOnce(Return(1337));
+  EXPECT_CALL(has_ref_, AddRef()).Times(2);
+  EXPECT_CALL(has_ref_, Release()).Times(2);
+  EXPECT_CALL(has_ref_, IntMethod0()).WillOnce(Return(10));
+  EXPECT_CALL(has_ref_, IntConstMethod0()).WillOnce(Return(11));
+  EXPECT_CALL(no_ref_, IntMethod0()).WillOnce(Return(12));
+  EXPECT_CALL(no_ref_, IntConstMethod0()).WillOnce(Return(13));
+
+  Closure normal_func_cb = Bind(IgnoreResult(&IntFunc0));
+  normal_func_cb.Run();
+
+  Closure non_void_method_cb =
+      Bind(IgnoreResult(&HasRef::IntMethod0), &has_ref_);
+  non_void_method_cb.Run();
+
+  Closure non_void_const_method_cb =
+      Bind(IgnoreResult(&HasRef::IntConstMethod0), &has_ref_);
+  non_void_const_method_cb.Run();
+
+  WeakPtrFactory<NoRef> weak_factory(&no_ref_);
+  WeakPtrFactory<const NoRef> const_weak_factory(const_no_ref_ptr_);
+
+  Closure non_void_weak_method_cb  =
+      Bind(IgnoreResult(&NoRef::IntMethod0), weak_factory.GetWeakPtr());
+  non_void_weak_method_cb.Run();
+
+  Closure non_void_weak_const_method_cb =
+      Bind(IgnoreResult(&NoRef::IntConstMethod0), weak_factory.GetWeakPtr());
+  non_void_weak_const_method_cb.Run();
+
+  weak_factory.InvalidateWeakPtrs();
+  non_void_weak_const_method_cb.Run();
+  non_void_weak_method_cb.Run();
 }
 
 // Argument binding tests.
@@ -881,33 +538,51 @@
 //   - Argument gets type converted.
 //   - Pointer argument gets converted.
 //   - Const Reference forces conversion.
-TYPED_TEST(BindVariantsTest, ArgumentBinding) {
+TEST_F(BindTest, ArgumentBinding) {
   int n = 2;
 
-  EXPECT_EQ(n, TypeParam::Bind(&Identity, n).Run());
-  EXPECT_EQ(&n, TypeParam::Bind(&PolymorphicIdentity<int*>, &n).Run());
-  EXPECT_EQ(3, TypeParam::Bind(&Identity, 3).Run());
-  EXPECT_STREQ("hi", TypeParam::Bind(&CStringIdentity, "hi").Run());
-  EXPECT_EQ(4, TypeParam::Bind(&PolymorphicIdentity<int>, 4).Run());
+  Callback<int()> bind_primitive_cb = Bind(&Identity, n);
+  EXPECT_EQ(n, bind_primitive_cb.Run());
+
+  Callback<int*()> bind_primitive_pointer_cb =
+      Bind(&PolymorphicIdentity<int*>, &n);
+  EXPECT_EQ(&n, bind_primitive_pointer_cb.Run());
+
+  Callback<int()> bind_int_literal_cb = Bind(&Identity, 3);
+  EXPECT_EQ(3, bind_int_literal_cb.Run());
+
+  Callback<const char*()> bind_string_literal_cb =
+      Bind(&CStringIdentity, "hi");
+  EXPECT_STREQ("hi", bind_string_literal_cb.Run());
+
+  Callback<int()> bind_template_function_cb =
+      Bind(&PolymorphicIdentity<int>, 4);
+  EXPECT_EQ(4, bind_template_function_cb.Run());
 
   NoRefParent p;
   p.value = 5;
-  EXPECT_EQ(5, TypeParam::Bind(&UnwrapNoRefParent, p).Run());
+  Callback<int()> bind_object_cb = Bind(&UnwrapNoRefParent, p);
+  EXPECT_EQ(5, bind_object_cb.Run());
 
   IncompleteType* incomplete_ptr = reinterpret_cast<IncompleteType*>(123);
-  EXPECT_EQ(incomplete_ptr,
-            TypeParam::Bind(&PolymorphicIdentity<IncompleteType*>,
-                            incomplete_ptr).Run());
+  Callback<IncompleteType*()> bind_incomplete_ptr_cb =
+      Bind(&PolymorphicIdentity<IncompleteType*>, incomplete_ptr);
+  EXPECT_EQ(incomplete_ptr, bind_incomplete_ptr_cb.Run());
 
   NoRefChild c;
   c.value = 6;
-  EXPECT_EQ(6, TypeParam::Bind(&UnwrapNoRefParent, c).Run());
+  Callback<int()> bind_promotes_cb = Bind(&UnwrapNoRefParent, c);
+  EXPECT_EQ(6, bind_promotes_cb.Run());
 
   c.value = 7;
-  EXPECT_EQ(7, TypeParam::Bind(&UnwrapNoRefParentPtr, &c).Run());
+  Callback<int()> bind_pointer_promotes_cb =
+      Bind(&UnwrapNoRefParentPtr, &c);
+  EXPECT_EQ(7, bind_pointer_promotes_cb.Run());
 
   c.value = 8;
-  EXPECT_EQ(8, TypeParam::Bind(&UnwrapNoRefParentConstRef, c).Run());
+  Callback<int()> bind_const_reference_promotes_cb =
+      Bind(&UnwrapNoRefParentConstRef, c);
+  EXPECT_EQ(8, bind_const_reference_promotes_cb.Run());
 }
 
 // Unbound argument type support tests.
@@ -918,67 +593,198 @@
 //   - Unbound unsized array.
 //   - Unbound sized array.
 //   - Unbound array-of-arrays.
-TYPED_TEST(BindVariantsTest, UnboundArgumentTypeSupport) {
-  CallbackType<TypeParam, void(int)> unbound_value_cb =
-      TypeParam::Bind(&VoidPolymorphic<int>::Run);
-  CallbackType<TypeParam, void(int*)> unbound_pointer_cb =
-      TypeParam::Bind(&VoidPolymorphic<int*>::Run);
-  CallbackType<TypeParam, void(int&)> unbound_ref_cb =
-      TypeParam::Bind(&VoidPolymorphic<int&>::Run);
-  CallbackType<TypeParam, void(const int&)> unbound_const_ref_cb =
-      TypeParam::Bind(&VoidPolymorphic<const int&>::Run);
-  CallbackType<TypeParam, void(int[])> unbound_unsized_array_cb =
-      TypeParam::Bind(&VoidPolymorphic<int[]>::Run);
-  CallbackType<TypeParam, void(int[2])> unbound_sized_array_cb =
-      TypeParam::Bind(&VoidPolymorphic<int[2]>::Run);
-  CallbackType<TypeParam, void(int[][2])> unbound_array_of_arrays_cb =
-      TypeParam::Bind(&VoidPolymorphic<int[][2]>::Run);
-  CallbackType<TypeParam, void(int&)> unbound_ref_with_bound_arg =
-      TypeParam::Bind(&VoidPolymorphic<int, int&>::Run, 1);
+TEST_F(BindTest, UnboundArgumentTypeSupport) {
+  Callback<void(int)> unbound_value_cb = Bind(&VoidPolymorphic<int>::Run);
+  Callback<void(int*)> unbound_pointer_cb = Bind(&VoidPolymorphic<int*>::Run);
+  Callback<void(int&)> unbound_ref_cb = Bind(&VoidPolymorphic<int&>::Run);
+  Callback<void(const int&)> unbound_const_ref_cb =
+      Bind(&VoidPolymorphic<const int&>::Run);
+  Callback<void(int[])> unbound_unsized_array_cb =
+      Bind(&VoidPolymorphic<int[]>::Run);
+  Callback<void(int[2])> unbound_sized_array_cb =
+      Bind(&VoidPolymorphic<int[2]>::Run);
+  Callback<void(int[][2])> unbound_array_of_arrays_cb =
+      Bind(&VoidPolymorphic<int[][2]>::Run);
+
+  Callback<void(int&)> unbound_ref_with_bound_arg =
+      Bind(&VoidPolymorphic<int, int&>::Run, 1);
 }
 
 // Function with unbound reference parameter.
 //   - Original parameter is modified by callback.
-TYPED_TEST(BindVariantsTest, UnboundReferenceSupport) {
+TEST_F(BindTest, UnboundReferenceSupport) {
   int n = 0;
-  CallbackType<TypeParam, void(int&)> unbound_ref_cb =
-      TypeParam::Bind(&RefArgSet);
-  std::move(unbound_ref_cb).Run(n);
+  Callback<void(int&)> unbound_ref_cb = Bind(&RefArgSet);
+  unbound_ref_cb.Run(n);
   EXPECT_EQ(2, n);
 }
 
+// Functions that take reference parameters.
+//  - Forced reference parameter type still stores a copy.
+//  - Forced const reference parameter type still stores a copy.
+TEST_F(BindTest, ReferenceArgumentBinding) {
+  int n = 1;
+  int& ref_n = n;
+  const int& const_ref_n = n;
+
+  Callback<int()> ref_copies_cb = Bind(&Identity, ref_n);
+  EXPECT_EQ(n, ref_copies_cb.Run());
+  n++;
+  EXPECT_EQ(n - 1, ref_copies_cb.Run());
+
+  Callback<int()> const_ref_copies_cb = Bind(&Identity, const_ref_n);
+  EXPECT_EQ(n, const_ref_copies_cb.Run());
+  n++;
+  EXPECT_EQ(n - 1, const_ref_copies_cb.Run());
+}
+
+// Check that we can pass in arrays and have them be stored as a pointer.
+//  - Array of values stores a pointer.
+//  - Array of const values stores a pointer.
+TEST_F(BindTest, ArrayArgumentBinding) {
+  int array[4] = {1, 1, 1, 1};
+  const int (*const_array_ptr)[4] = &array;
+
+  Callback<int()> array_cb = Bind(&ArrayGet, array, 1);
+  EXPECT_EQ(1, array_cb.Run());
+
+  Callback<int()> const_array_cb = Bind(&ArrayGet, *const_array_ptr, 1);
+  EXPECT_EQ(1, const_array_cb.Run());
+
+  array[1] = 3;
+  EXPECT_EQ(3, array_cb.Run());
+  EXPECT_EQ(3, const_array_cb.Run());
+}
+
 // Unretained() wrapper support.
 //   - Method bound to Unretained() non-const object.
 //   - Const method bound to Unretained() non-const object.
 //   - Const method bound to Unretained() const object.
-TYPED_TEST(BindVariantsTest, Unretained) {
-  StrictMock<NoRef> no_ref;
-  const NoRef* const_no_ref_ptr = &no_ref;
+TEST_F(BindTest, Unretained) {
+  EXPECT_CALL(no_ref_, VoidMethod0());
+  EXPECT_CALL(no_ref_, VoidConstMethod0()).Times(2);
 
-  EXPECT_CALL(no_ref, VoidMethod0());
-  EXPECT_CALL(no_ref, VoidConstMethod0()).Times(2);
+  Callback<void()> method_cb =
+      Bind(&NoRef::VoidMethod0, Unretained(&no_ref_));
+  method_cb.Run();
 
-  TypeParam::Bind(&NoRef::VoidMethod0, Unretained(&no_ref)).Run();
-  TypeParam::Bind(&NoRef::VoidConstMethod0, Unretained(&no_ref)).Run();
-  TypeParam::Bind(&NoRef::VoidConstMethod0, Unretained(const_no_ref_ptr)).Run();
+  Callback<void()> const_method_cb =
+      Bind(&NoRef::VoidConstMethod0, Unretained(&no_ref_));
+  const_method_cb.Run();
+
+  Callback<void()> const_method_const_ptr_cb =
+      Bind(&NoRef::VoidConstMethod0, Unretained(const_no_ref_ptr_));
+  const_method_const_ptr_cb.Run();
 }
 
-TYPED_TEST(BindVariantsTest, ScopedRefptr) {
-  StrictMock<HasRef> has_ref;
-  EXPECT_CALL(has_ref, AddRef()).Times(1);
-  EXPECT_CALL(has_ref, Release()).Times(1);
+// WeakPtr() support.
+//   - Method bound to WeakPtr<> to non-const object.
+//   - Const method bound to WeakPtr<> to non-const object.
+//   - Const method bound to WeakPtr<> to const object.
+//   - Normal Function with WeakPtr<> as P1 can have return type and is
+//     not canceled.
+TEST_F(BindTest, WeakPtr) {
+  EXPECT_CALL(no_ref_, VoidMethod0());
+  EXPECT_CALL(no_ref_, VoidConstMethod0()).Times(2);
 
-  const scoped_refptr<HasRef> refptr(&has_ref);
-  CallbackType<TypeParam, int()> scoped_refptr_const_ref_cb =
-      TypeParam::Bind(&FunctionWithScopedRefptrFirstParam,
-                      base::ConstRef(refptr), 1);
-  EXPECT_EQ(1, std::move(scoped_refptr_const_ref_cb).Run());
+  WeakPtrFactory<NoRef> weak_factory(&no_ref_);
+  WeakPtrFactory<const NoRef> const_weak_factory(const_no_ref_ptr_);
+
+  Closure method_cb =
+      Bind(&NoRef::VoidMethod0, weak_factory.GetWeakPtr());
+  method_cb.Run();
+
+  Closure const_method_cb =
+      Bind(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
+  const_method_cb.Run();
+
+  Closure const_method_const_ptr_cb =
+      Bind(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
+  const_method_const_ptr_cb.Run();
+
+  Callback<int(int)> normal_func_cb =
+      Bind(&FunctionWithWeakFirstParam, weak_factory.GetWeakPtr());
+  EXPECT_EQ(1, normal_func_cb.Run(1));
+
+  weak_factory.InvalidateWeakPtrs();
+  const_weak_factory.InvalidateWeakPtrs();
+
+  method_cb.Run();
+  const_method_cb.Run();
+  const_method_const_ptr_cb.Run();
+
+  // Still runs even after the pointers are invalidated.
+  EXPECT_EQ(2, normal_func_cb.Run(2));
 }
 
-TYPED_TEST(BindVariantsTest, UniquePtrReceiver) {
+// ConstRef() wrapper support.
+//   - Binding w/o ConstRef takes a copy.
+//   - Binding a ConstRef takes a reference.
+//   - Binding ConstRef to a function ConstRef does not copy on invoke.
+TEST_F(BindTest, ConstRef) {
+  int n = 1;
+
+  Callback<int()> copy_cb = Bind(&Identity, n);
+  Callback<int()> const_ref_cb = Bind(&Identity, ConstRef(n));
+  EXPECT_EQ(n, copy_cb.Run());
+  EXPECT_EQ(n, const_ref_cb.Run());
+  n++;
+  EXPECT_EQ(n - 1, copy_cb.Run());
+  EXPECT_EQ(n, const_ref_cb.Run());
+
+  int copies = 0;
+  int assigns = 0;
+  int move_constructs = 0;
+  int move_assigns = 0;
+  CopyMoveCounter counter(&copies, &assigns, &move_constructs, &move_assigns);
+  Callback<int()> all_const_ref_cb =
+      Bind(&GetCopies, ConstRef(counter));
+  EXPECT_EQ(0, all_const_ref_cb.Run());
+  EXPECT_EQ(0, copies);
+  EXPECT_EQ(0, assigns);
+  EXPECT_EQ(0, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+}
+
+TEST_F(BindTest, ScopedRefptr) {
+  EXPECT_CALL(has_ref_, AddRef()).Times(1);
+  EXPECT_CALL(has_ref_, Release()).Times(1);
+
+  const scoped_refptr<HasRef> refptr(&has_ref_);
+  Callback<int()> scoped_refptr_const_ref_cb =
+      Bind(&FunctionWithScopedRefptrFirstParam, base::ConstRef(refptr), 1);
+  EXPECT_EQ(1, scoped_refptr_const_ref_cb.Run());
+}
+
+// Test Owned() support.
+TEST_F(BindTest, Owned) {
+  int deletes = 0;
+  DeleteCounter* counter = new DeleteCounter(&deletes);
+
+  // If we don't capture, delete happens on Callback destruction/reset.
+  // return the same value.
+  Callback<DeleteCounter*()> no_capture_cb =
+      Bind(&PolymorphicIdentity<DeleteCounter*>, Owned(counter));
+  ASSERT_EQ(counter, no_capture_cb.Run());
+  ASSERT_EQ(counter, no_capture_cb.Run());
+  EXPECT_EQ(0, deletes);
+  no_capture_cb.Reset();  // This should trigger a delete.
+  EXPECT_EQ(1, deletes);
+
+  deletes = 0;
+  counter = new DeleteCounter(&deletes);
+  base::Closure own_object_cb =
+      Bind(&DeleteCounter::VoidMethod0, Owned(counter));
+  own_object_cb.Run();
+  EXPECT_EQ(0, deletes);
+  own_object_cb.Reset();
+  EXPECT_EQ(1, deletes);
+}
+
+TEST_F(BindTest, UniquePtrReceiver) {
   std::unique_ptr<StrictMock<NoRef>> no_ref(new StrictMock<NoRef>);
   EXPECT_CALL(*no_ref, VoidMethod0()).Times(1);
-  TypeParam::Bind(&NoRef::VoidMethod0, std::move(no_ref)).Run();
+  Bind(&NoRef::VoidMethod0, std::move(no_ref)).Run();
 }
 
 // Tests for Passed() wrapper support:
@@ -997,6 +803,7 @@
 
 using MoveOnlyTypesToTest =
     ::testing::Types<std::unique_ptr<DeleteCounter>,
+                     std::unique_ptr<DeleteCounter>,
                      std::unique_ptr<DeleteCounter, CustomDeleter>>;
 TYPED_TEST_CASE(BindMoveOnlyTypeTest, MoveOnlyTypesToTest);
 
@@ -1247,7 +1054,7 @@
   EXPECT_TRUE(internal::IsConvertibleToRunType<decltype(f)>::value);
 
   int i = 0;
-  auto g = [i]() { (void)i; };
+  auto g = [i]() {};
   EXPECT_FALSE(internal::IsConvertibleToRunType<decltype(g)>::value);
 
   auto h = [](int, double) { return 'k'; };
@@ -1267,126 +1074,6 @@
   EXPECT_EQ(42, x);
 }
 
-TEST_F(BindTest, Cancellation) {
-  EXPECT_CALL(no_ref_, VoidMethodWithIntArg(_)).Times(2);
-
-  WeakPtrFactory<NoRef> weak_factory(&no_ref_);
-  RepeatingCallback<void(int)> cb =
-      BindRepeating(&NoRef::VoidMethodWithIntArg, weak_factory.GetWeakPtr());
-  RepeatingClosure cb2 = BindRepeating(cb, 8);
-  OnceClosure cb3 = BindOnce(cb, 8);
-
-  OnceCallback<void(int)> cb4 =
-      BindOnce(&NoRef::VoidMethodWithIntArg, weak_factory.GetWeakPtr());
-  EXPECT_FALSE(cb4.IsCancelled());
-
-  OnceClosure cb5 = BindOnce(std::move(cb4), 8);
-
-  EXPECT_FALSE(cb.IsCancelled());
-  EXPECT_FALSE(cb2.IsCancelled());
-  EXPECT_FALSE(cb3.IsCancelled());
-  EXPECT_FALSE(cb5.IsCancelled());
-
-  cb.Run(6);
-  cb2.Run();
-
-  weak_factory.InvalidateWeakPtrs();
-
-  EXPECT_TRUE(cb.IsCancelled());
-  EXPECT_TRUE(cb2.IsCancelled());
-  EXPECT_TRUE(cb3.IsCancelled());
-  EXPECT_TRUE(cb5.IsCancelled());
-
-  cb.Run(6);
-  cb2.Run();
-  std::move(cb3).Run();
-  std::move(cb5).Run();
-}
-
-TEST_F(BindTest, OnceCallback) {
-  // Check if Callback variants have declarations of conversions as expected.
-  // Copy constructor and assignment of RepeatingCallback.
-  static_assert(std::is_constructible<
-      RepeatingClosure, const RepeatingClosure&>::value,
-      "RepeatingClosure should be copyable.");
-  static_assert(is_assignable<
-      RepeatingClosure, const RepeatingClosure&>::value,
-      "RepeatingClosure should be copy-assignable.");
-
-  // Move constructor and assignment of RepeatingCallback.
-  static_assert(std::is_constructible<
-      RepeatingClosure, RepeatingClosure&&>::value,
-      "RepeatingClosure should be movable.");
-  static_assert(is_assignable<
-      RepeatingClosure, RepeatingClosure&&>::value,
-      "RepeatingClosure should be move-assignable");
-
-  // Conversions from OnceCallback to RepeatingCallback.
-  static_assert(!std::is_constructible<
-      RepeatingClosure, const OnceClosure&>::value,
-      "OnceClosure should not be convertible to RepeatingClosure.");
-  static_assert(!is_assignable<
-      RepeatingClosure, const OnceClosure&>::value,
-      "OnceClosure should not be convertible to RepeatingClosure.");
-
-  // Destructive conversions from OnceCallback to RepeatingCallback.
-  static_assert(!std::is_constructible<
-      RepeatingClosure, OnceClosure&&>::value,
-      "OnceClosure should not be convertible to RepeatingClosure.");
-  static_assert(!is_assignable<
-      RepeatingClosure, OnceClosure&&>::value,
-      "OnceClosure should not be convertible to RepeatingClosure.");
-
-  // Copy constructor and assignment of OnceCallback.
-  static_assert(!std::is_constructible<
-      OnceClosure, const OnceClosure&>::value,
-      "OnceClosure should not be copyable.");
-  static_assert(!is_assignable<
-      OnceClosure, const OnceClosure&>::value,
-      "OnceClosure should not be copy-assignable");
-
-  // Move constructor and assignment of OnceCallback.
-  static_assert(std::is_constructible<
-      OnceClosure, OnceClosure&&>::value,
-      "OnceClosure should be movable.");
-  static_assert(is_assignable<
-      OnceClosure, OnceClosure&&>::value,
-      "OnceClosure should be move-assignable.");
-
-  // Conversions from RepeatingCallback to OnceCallback.
-  static_assert(std::is_constructible<
-      OnceClosure, const RepeatingClosure&>::value,
-      "RepeatingClosure should be convertible to OnceClosure.");
-  static_assert(is_assignable<
-      OnceClosure, const RepeatingClosure&>::value,
-      "RepeatingClosure should be convertible to OnceClosure.");
-
-  // Destructive conversions from RepeatingCallback to OnceCallback.
-  static_assert(std::is_constructible<
-      OnceClosure, RepeatingClosure&&>::value,
-      "RepeatingClosure should be convertible to OnceClosure.");
-  static_assert(is_assignable<
-      OnceClosure, RepeatingClosure&&>::value,
-      "RepeatingClosure should be covretible to OnceClosure.");
-
-  OnceClosure cb = BindOnce(&VoidPolymorphic<>::Run);
-  std::move(cb).Run();
-
-  // RepeatingCallback should be convertible to OnceCallback.
-  OnceClosure cb2 = BindRepeating(&VoidPolymorphic<>::Run);
-  std::move(cb2).Run();
-
-  RepeatingClosure cb3 = BindRepeating(&VoidPolymorphic<>::Run);
-  cb = cb3;
-  std::move(cb).Run();
-
-  cb = std::move(cb2);
-
-  OnceCallback<void(int)> cb4 = BindOnce(
-      &VoidPolymorphic<std::unique_ptr<int>, int>::Run, MakeUnique<int>(0));
-  BindOnce(std::move(cb4), 1).Run();
-}
-
 // Callback construction and assignment tests.
 //   - Construction from an InvokerStorageHolder should not cause ref/deref.
 //   - Assignment from other callback should only cause one ref
@@ -1414,12 +1101,17 @@
 }
 #endif
 
+#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
+
 // Test null callbacks cause a DCHECK.
 TEST(BindDeathTest, NullCallback) {
   base::Callback<void(int)> null_cb;
   ASSERT_TRUE(null_cb.is_null());
-  EXPECT_DCHECK_DEATH(base::Bind(null_cb, 42));
+  EXPECT_DEATH(base::Bind(null_cb, 42), "");
 }
 
+#endif  // (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) &&
+        //     GTEST_HAS_DEATH_TEST
+
 }  // namespace
 }  // namespace base
diff --git a/base/bit_cast.h b/base/bit_cast.h
index 90dd925..c9514bc 100644
--- a/base/bit_cast.h
+++ b/base/bit_cast.h
@@ -9,7 +9,6 @@
 #include <type_traits>
 
 #include "base/compiler_specific.h"
-#include "base/template_util.h"
 #include "build/build_config.h"
 
 // bit_cast<Dest,Source> is a template function that implements the equivalent
@@ -64,10 +63,34 @@
 inline Dest bit_cast(const Source& source) {
   static_assert(sizeof(Dest) == sizeof(Source),
                 "bit_cast requires source and destination to be the same size");
-  static_assert(base::is_trivially_copyable<Dest>::value,
-                "bit_cast requires the destination type to be copyable");
-  static_assert(base::is_trivially_copyable<Source>::value,
-                "bit_cast requires the source type to be copyable");
+
+#if (__GNUC__ > 5 || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1) || \
+     (defined(__clang__) && defined(_LIBCPP_VERSION)))
+  // GCC 5.1 contains the first libstdc++ with is_trivially_copyable.
+  // Assume libc++ Just Works: is_trivially_copyable added on May 13th 2011.
+  // However, with libc++ when GCC is the compiler the trait is buggy, see
+  // crbug.com/607158, so fall back to the less strict variant for non-clang.
+  static_assert(std::is_trivially_copyable<Dest>::value,
+                "non-trivially-copyable bit_cast is undefined");
+  static_assert(std::is_trivially_copyable<Source>::value,
+                "non-trivially-copyable bit_cast is undefined");
+#elif HAS_FEATURE(is_trivially_copyable)
+  // The compiler supports an equivalent intrinsic.
+  static_assert(__is_trivially_copyable(Dest),
+                "non-trivially-copyable bit_cast is undefined");
+  static_assert(__is_trivially_copyable(Source),
+                "non-trivially-copyable bit_cast is undefined");
+#elif COMPILER_GCC
+  // Fallback to compiler intrinsic on GCC and clang (which pretends to be
+  // GCC). This isn't quite the same as is_trivially_copyable but it'll do for
+  // our purpose.
+  static_assert(__has_trivial_copy(Dest),
+                "non-trivially-copyable bit_cast is undefined");
+  static_assert(__has_trivial_copy(Source),
+                "non-trivially-copyable bit_cast is undefined");
+#else
+  // Do nothing, let the bots handle it.
+#endif
 
   Dest dest;
   memcpy(&dest, &source, sizeof(dest));
diff --git a/base/bits.h b/base/bits.h
index d101cb7..a3a59d1 100644
--- a/base/bits.h
+++ b/base/bits.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -10,13 +10,8 @@
 #include <stddef.h>
 #include <stdint.h>
 
-#include "base/compiler_specific.h"
 #include "base/logging.h"
 
-#if defined(COMPILER_MSVC)
-#include <intrin.h>
-#endif
-
 namespace base {
 namespace bits {
 
@@ -54,58 +49,6 @@
   return (size + alignment - 1) & ~(alignment - 1);
 }
 
-// These functions count the number of leading zeros in a binary value, starting
-// with the most significant bit. C does not have an operator to do this, but
-// fortunately the various compilers have built-ins that map to fast underlying
-// processor instructions.
-#if defined(COMPILER_MSVC)
-
-ALWAYS_INLINE uint32_t CountLeadingZeroBits32(uint32_t x) {
-  unsigned long index;
-  return LIKELY(_BitScanReverse(&index, x)) ? (31 - index) : 32;
-}
-
-#if defined(ARCH_CPU_64_BITS)
-
-// MSVC only supplies _BitScanForward64 when building for a 64-bit target.
-ALWAYS_INLINE uint64_t CountLeadingZeroBits64(uint64_t x) {
-  unsigned long index;
-  return LIKELY(_BitScanReverse64(&index, x)) ? (63 - index) : 64;
-}
-
-#endif
-
-#elif defined(COMPILER_GCC)
-
-// This is very annoying. __builtin_clz has undefined behaviour for an input of
-// 0, even though there's clearly a return value that makes sense, and even
-// though some processor clz instructions have defined behaviour for 0. We could
-// drop to raw __asm__ to do better, but we'll avoid doing that unless we see
-// proof that we need to.
-ALWAYS_INLINE uint32_t CountLeadingZeroBits32(uint32_t x) {
-  return LIKELY(x) ? __builtin_clz(x) : 32;
-}
-
-ALWAYS_INLINE uint64_t CountLeadingZeroBits64(uint64_t x) {
-  return LIKELY(x) ? __builtin_clzll(x) : 64;
-}
-
-#endif
-
-#if defined(ARCH_CPU_64_BITS)
-
-ALWAYS_INLINE size_t CountLeadingZeroBitsSizeT(size_t x) {
-  return CountLeadingZeroBits64(x);
-}
-
-#else
-
-ALWAYS_INLINE size_t CountLeadingZeroBitsSizeT(size_t x) {
-  return CountLeadingZeroBits32(x);
-}
-
-#endif
-
 }  // namespace bits
 }  // namespace base
 
diff --git a/base/bits_unittest.cc b/base/bits_unittest.cc
index 270b8ef..4f5b6ea 100644
--- a/base/bits_unittest.cc
+++ b/base/bits_unittest.cc
@@ -61,25 +61,5 @@
   EXPECT_EQ(kSizeTMax / 2 + 1, Align(1, kSizeTMax / 2 + 1));
 }
 
-TEST(BitsTest, CLZWorks) {
-  EXPECT_EQ(32u, CountLeadingZeroBits32(0u));
-  EXPECT_EQ(31u, CountLeadingZeroBits32(1u));
-  EXPECT_EQ(1u, CountLeadingZeroBits32(1u << 30));
-  EXPECT_EQ(0u, CountLeadingZeroBits32(1u << 31));
-
-#if defined(ARCH_CPU_64_BITS)
-  EXPECT_EQ(64u, CountLeadingZeroBitsSizeT(0ull));
-  EXPECT_EQ(63u, CountLeadingZeroBitsSizeT(1ull));
-  EXPECT_EQ(32u, CountLeadingZeroBitsSizeT(1ull << 31));
-  EXPECT_EQ(1u, CountLeadingZeroBitsSizeT(1ull << 62));
-  EXPECT_EQ(0u, CountLeadingZeroBitsSizeT(1ull << 63));
-#else
-  EXPECT_EQ(32u, CountLeadingZeroBitsSizeT(0u));
-  EXPECT_EQ(31u, CountLeadingZeroBitsSizeT(1u));
-  EXPECT_EQ(1u, CountLeadingZeroBitsSizeT(1u << 30));
-  EXPECT_EQ(0u, CountLeadingZeroBitsSizeT(1u << 31));
-#endif
-}
-
 }  // namespace bits
 }  // namespace base
diff --git a/base/callback.h b/base/callback.h
index 40bd520..e087c73 100644
--- a/base/callback.h
+++ b/base/callback.h
@@ -12,130 +12,382 @@
 // Closure should #include "base/callback_forward.h" instead of this file.
 
 // -----------------------------------------------------------------------------
-// Usage documentation
+// Introduction
 // -----------------------------------------------------------------------------
 //
-// See //docs/callback.md for documentation.
+// The templated Callback class is a generalized function object. Together
+// with the Bind() function in bind.h, they provide a type-safe method for
+// performing partial application of functions.
+//
+// Partial application (or "currying") is the process of binding a subset of
+// a function's arguments to produce another function that takes fewer
+// arguments. This can be used to pass around a unit of delayed execution,
+// much like lexical closures are used in other languages. For example, it
+// is used in Chromium code to schedule tasks on different MessageLoops.
+//
+// A callback with no unbound input parameters (base::Callback<void()>)
+// is called a base::Closure. Note that this is NOT the same as what other
+// languages refer to as a closure -- it does not retain a reference to its
+// enclosing environment.
+//
+// MEMORY MANAGEMENT AND PASSING
+//
+// The Callback objects themselves should be passed by const-reference, and
+// stored by copy. They internally store their state via a refcounted class
+// and thus do not need to be deleted.
+//
+// The reason to pass via a const-reference is to avoid unnecessary
+// AddRef/Release pairs to the internal state.
+//
+//
+// -----------------------------------------------------------------------------
+// Quick reference for basic stuff
+// -----------------------------------------------------------------------------
+//
+// BINDING A BARE FUNCTION
+//
+//   int Return5() { return 5; }
+//   base::Callback<int()> func_cb = base::Bind(&Return5);
+//   LOG(INFO) << func_cb.Run();  // Prints 5.
+//
+// BINDING A CLASS METHOD
+//
+//   The first argument to bind is the member function to call, the second is
+//   the object on which to call it.
+//
+//   class Ref : public base::RefCountedThreadSafe<Ref> {
+//    public:
+//     int Foo() { return 3; }
+//     void PrintBye() { LOG(INFO) << "bye."; }
+//   };
+//   scoped_refptr<Ref> ref = new Ref();
+//   base::Callback<void()> ref_cb = base::Bind(&Ref::Foo, ref);
+//   LOG(INFO) << ref_cb.Run();  // Prints out 3.
+//
+//   By default the object must support RefCounted or you will get a compiler
+//   error. If you're passing between threads, be sure it's
+//   RefCountedThreadSafe! See "Advanced binding of member functions" below if
+//   you don't want to use reference counting.
+//
+// RUNNING A CALLBACK
+//
+//   Callbacks can be run with their "Run" method, which has the same
+//   signature as the template argument to the callback.
+//
+//   void DoSomething(const base::Callback<void(int, std::string)>& callback) {
+//     callback.Run(5, "hello");
+//   }
+//
+//   Callbacks can be run more than once (they don't get deleted or marked when
+//   run). However, this precludes using base::Passed (see below).
+//
+//   void DoSomething(const base::Callback<double(double)>& callback) {
+//     double myresult = callback.Run(3.14159);
+//     myresult += callback.Run(2.71828);
+//   }
+//
+// PASSING UNBOUND INPUT PARAMETERS
+//
+//   Unbound parameters are specified at the time a callback is Run(). They are
+//   specified in the Callback template type:
+//
+//   void MyFunc(int i, const std::string& str) {}
+//   base::Callback<void(int, const std::string&)> cb = base::Bind(&MyFunc);
+//   cb.Run(23, "hello, world");
+//
+// PASSING BOUND INPUT PARAMETERS
+//
+//   Bound parameters are specified when you create thee callback as arguments
+//   to Bind(). They will be passed to the function and the Run()ner of the
+//   callback doesn't see those values or even know that the function it's
+//   calling.
+//
+//   void MyFunc(int i, const std::string& str) {}
+//   base::Callback<void()> cb = base::Bind(&MyFunc, 23, "hello world");
+//   cb.Run();
+//
+//   A callback with no unbound input parameters (base::Callback<void()>)
+//   is called a base::Closure. So we could have also written:
+//
+//   base::Closure cb = base::Bind(&MyFunc, 23, "hello world");
+//
+//   When calling member functions, bound parameters just go after the object
+//   pointer.
+//
+//   base::Closure cb = base::Bind(&MyClass::MyFunc, this, 23, "hello world");
+//
+// PARTIAL BINDING OF PARAMETERS
+//
+//   You can specify some parameters when you create the callback, and specify
+//   the rest when you execute the callback.
+//
+//   void MyFunc(int i, const std::string& str) {}
+//   base::Callback<void(const std::string&)> cb = base::Bind(&MyFunc, 23);
+//   cb.Run("hello world");
+//
+//   When calling a function bound parameters are first, followed by unbound
+//   parameters.
+//
+//
+// -----------------------------------------------------------------------------
+// Quick reference for advanced binding
+// -----------------------------------------------------------------------------
+//
+// BINDING A CLASS METHOD WITH WEAK POINTERS
+//
+//   base::Bind(&MyClass::Foo, GetWeakPtr());
+//
+//   The callback will not be run if the object has already been destroyed.
+//   DANGER: weak pointers are not threadsafe, so don't use this
+//   when passing between threads!
+//
+// BINDING A CLASS METHOD WITH MANUAL LIFETIME MANAGEMENT
+//
+//   base::Bind(&MyClass::Foo, base::Unretained(this));
+//
+//   This disables all lifetime management on the object. You're responsible
+//   for making sure the object is alive at the time of the call. You break it,
+//   you own it!
+//
+// BINDING A CLASS METHOD AND HAVING THE CALLBACK OWN THE CLASS
+//
+//   MyClass* myclass = new MyClass;
+//   base::Bind(&MyClass::Foo, base::Owned(myclass));
+//
+//   The object will be deleted when the callback is destroyed, even if it's
+//   not run (like if you post a task during shutdown). Potentially useful for
+//   "fire and forget" cases.
+//
+// IGNORING RETURN VALUES
+//
+//   Sometimes you want to call a function that returns a value in a callback
+//   that doesn't expect a return value.
+//
+//   int DoSomething(int arg) { cout << arg << endl; }
+//   base::Callback<void(int)> cb =
+//       base::Bind(base::IgnoreResult(&DoSomething));
+//
+//
+// -----------------------------------------------------------------------------
+// Quick reference for binding parameters to Bind()
+// -----------------------------------------------------------------------------
+//
+// Bound parameters are specified as arguments to Bind() and are passed to the
+// function. A callback with no parameters or no unbound parameters is called a
+// Closure (base::Callback<void()> and base::Closure are the same thing).
+//
+// PASSING PARAMETERS OWNED BY THE CALLBACK
+//
+//   void Foo(int* arg) { cout << *arg << endl; }
+//   int* pn = new int(1);
+//   base::Closure foo_callback = base::Bind(&foo, base::Owned(pn));
+//
+//   The parameter will be deleted when the callback is destroyed, even if it's
+//   not run (like if you post a task during shutdown).
+//
+// PASSING PARAMETERS AS A scoped_ptr
+//
+//   void TakesOwnership(std::unique_ptr<Foo> arg) {}
+//   std::unique_ptr<Foo> f(new Foo);
+//   // f becomes null during the following call.
+//   base::Closure cb = base::Bind(&TakesOwnership, base::Passed(&f));
+//
+//   Ownership of the parameter will be with the callback until the it is run,
+//   when ownership is passed to the callback function. This means the callback
+//   can only be run once. If the callback is never run, it will delete the
+//   object when it's destroyed.
+//
+// PASSING PARAMETERS AS A scoped_refptr
+//
+//   void TakesOneRef(scoped_refptr<Foo> arg) {}
+//   scoped_refptr<Foo> f(new Foo)
+//   base::Closure cb = base::Bind(&TakesOneRef, f);
+//
+//   This should "just work." The closure will take a reference as long as it
+//   is alive, and another reference will be taken for the called function.
+//
+// PASSING PARAMETERS BY REFERENCE
+//
+//   Const references are *copied* unless ConstRef is used. Example:
+//
+//   void foo(const int& arg) { printf("%d %p\n", arg, &arg); }
+//   int n = 1;
+//   base::Closure has_copy = base::Bind(&foo, n);
+//   base::Closure has_ref = base::Bind(&foo, base::ConstRef(n));
+//   n = 2;
+//   foo(n);                        // Prints "2 0xaaaaaaaaaaaa"
+//   has_copy.Run();                // Prints "1 0xbbbbbbbbbbbb"
+//   has_ref.Run();                 // Prints "2 0xaaaaaaaaaaaa"
+//
+//   Normally parameters are copied in the closure. DANGER: ConstRef stores a
+//   const reference instead, referencing the original parameter. This means
+//   that you must ensure the object outlives the callback!
+//
+//
+// -----------------------------------------------------------------------------
+// Implementation notes
+// -----------------------------------------------------------------------------
+//
+// WHERE IS THIS DESIGN FROM:
+//
+// The design Callback and Bind is heavily influenced by C++'s
+// tr1::function/tr1::bind, and by the "Google Callback" system used inside
+// Google.
+//
+//
+// HOW THE IMPLEMENTATION WORKS:
+//
+// There are three main components to the system:
+//   1) The Callback classes.
+//   2) The Bind() functions.
+//   3) The arguments wrappers (e.g., Unretained() and ConstRef()).
+//
+// The Callback classes represent a generic function pointer. Internally,
+// it stores a refcounted piece of state that represents the target function
+// and all its bound parameters.  Each Callback specialization has a templated
+// constructor that takes an BindState<>*.  In the context of the constructor,
+// the static type of this BindState<> pointer uniquely identifies the
+// function it is representing, all its bound parameters, and a Run() method
+// that is capable of invoking the target.
+//
+// Callback's constructor takes the BindState<>* that has the full static type
+// and erases the target function type as well as the types of the bound
+// parameters.  It does this by storing a pointer to the specific Run()
+// function, and upcasting the state of BindState<>* to a
+// BindStateBase*. This is safe as long as this BindStateBase pointer
+// is only used with the stored Run() pointer.
+//
+// To BindState<> objects are created inside the Bind() functions.
+// These functions, along with a set of internal templates, are responsible for
+//
+//  - Unwrapping the function signature into return type, and parameters
+//  - Determining the number of parameters that are bound
+//  - Creating the BindState storing the bound parameters
+//  - Performing compile-time asserts to avoid error-prone behavior
+//  - Returning an Callback<> with an arity matching the number of unbound
+//    parameters and that knows the correct refcounting semantics for the
+//    target object if we are binding a method.
+//
+// The Bind functions do the above using type-inference, and template
+// specializations.
+//
+// By default Bind() will store copies of all bound parameters, and attempt
+// to refcount a target object if the function being bound is a class method.
+// These copies are created even if the function takes parameters as const
+// references. (Binding to non-const references is forbidden, see bind.h.)
+//
+// To change this behavior, we introduce a set of argument wrappers
+// (e.g., Unretained(), and ConstRef()).  These are simple container templates
+// that are passed by value, and wrap a pointer to argument.  See the
+// file-level comment in base/bind_helpers.h for more info.
+//
+// These types are passed to the Unwrap() functions, and the MaybeRefcount()
+// functions respectively to modify the behavior of Bind().  The Unwrap()
+// and MaybeRefcount() functions change behavior by doing partial
+// specialization based on whether or not a parameter is a wrapper type.
+//
+// ConstRef() is similar to tr1::cref.  Unretained() is specific to Chromium.
+//
+//
+// WHY NOT TR1 FUNCTION/BIND?
+//
+// Direct use of tr1::function and tr1::bind was considered, but ultimately
+// rejected because of the number of copy constructors invocations involved
+// in the binding of arguments during construction, and the forwarding of
+// arguments during invocation.  These copies will no longer be an issue in
+// C++0x because C++0x will support rvalue reference allowing for the compiler
+// to avoid these copies.  However, waiting for C++0x is not an option.
+//
+// Measured with valgrind on gcc version 4.4.3 (Ubuntu 4.4.3-4ubuntu5), the
+// tr1::bind call itself will invoke a non-trivial copy constructor three times
+// for each bound parameter.  Also, each when passing a tr1::function, each
+// bound argument will be copied again.
+//
+// In addition to the copies taken at binding and invocation, copying a
+// tr1::function causes a copy to be made of all the bound parameters and
+// state.
+//
+// Furthermore, in Chromium, it is desirable for the Callback to take a
+// reference on a target object when representing a class method call.  This
+// is not supported by tr1.
+//
+// Lastly, tr1::function and tr1::bind has a more general and flexible API.
+// This includes things like argument reordering by use of
+// tr1::bind::placeholder, support for non-const reference parameters, and some
+// limited amount of subtyping of the tr1::function object (e.g.,
+// tr1::function<int(int)> is convertible to tr1::function<void(int)>).
+//
+// These are not features that are required in Chromium. Some of them, such as
+// allowing for reference parameters, and subtyping of functions, may actually
+// become a source of errors. Removing support for these features actually
+// allows for a simpler implementation, and a terser Currying API.
+//
+//
+// WHY NOT GOOGLE CALLBACKS?
+//
+// The Google callback system also does not support refcounting.  Furthermore,
+// its implementation has a number of strange edge cases with respect to type
+// conversion of its arguments.  In particular, the argument's constness must
+// at times match exactly the function signature, or the type-inference might
+// break.  Given the above, writing a custom solution was easier.
+//
+//
+// MISSING FUNCTIONALITY
+//  - Invoking the return of Bind.  Bind(&foo).Run() does not work;
+//  - Binding arrays to functions that take a non-const pointer.
+//    Example:
+//      void Foo(const char* ptr);
+//      void Bar(char* ptr);
+//      Bind(&Foo, "test");
+//      Bind(&Bar, "test");  // This fails because ptr is not const.
+//
+// If you are thinking of forward declaring Callback in your own header file,
+// please include "base/callback_forward.h" instead.
 
 namespace base {
 
-namespace internal {
-
-template <typename CallbackType>
-struct IsOnceCallback : std::false_type {};
-
-template <typename Signature>
-struct IsOnceCallback<OnceCallback<Signature>> : std::true_type {};
-
-// RunMixin provides different variants of `Run()` function to `Callback<>`
-// based on the type of callback.
-template <typename CallbackType>
-class RunMixin;
-
-// Specialization for OnceCallback.
-template <typename R, typename... Args>
-class RunMixin<OnceCallback<R(Args...)>> {
+template <typename R, typename... Args, internal::CopyMode copy_mode>
+class Callback<R(Args...), copy_mode>
+    : public internal::CallbackBase<copy_mode> {
  private:
-  using CallbackType = OnceCallback<R(Args...)>;
+  using PolymorphicInvoke = R (*)(internal::BindStateBase*, Args&&...);
 
  public:
-  using PolymorphicInvoke = R(*)(internal::BindStateBase*, Args&&...);
-
-  R Run(Args... /* args */) const & {
-    // Note: even though this static_assert will trivially always fail, it
-    // cannot be simply replaced with static_assert(false, ...) because:
-    // - Per [dcl.dcl]/p4, a program is ill-formed if the constant-expression
-    //   argument does not evaluate to true.
-    // - Per [temp.res]/p8, if no valid specialization can be generated for a
-    //   template definition, and that template is not instantiated, the
-    //   template definition is ill-formed, no diagnostic required.
-    // These two clauses, taken together, would allow a conforming C++ compiler
-    // to immediately reject static_assert(false, ...), even inside an
-    // uninstantiated template.
-    static_assert(!IsOnceCallback<CallbackType>::value,
-                  "OnceCallback::Run() may only be invoked on a non-const "
-                  "rvalue, i.e. std::move(callback).Run().");
-  }
-
-  R Run(Args... args) && {
-    // Move the callback instance into a local variable before the invocation,
-    // that ensures the internal state is cleared after the invocation.
-    // It's not safe to touch |this| after the invocation, since running the
-    // bound function may destroy |this|.
-    CallbackType cb = static_cast<CallbackType&&>(*this);
-    PolymorphicInvoke f =
-        reinterpret_cast<PolymorphicInvoke>(cb.polymorphic_invoke());
-    return f(cb.bind_state_.get(), std::forward<Args>(args)...);
-  }
-};
-
-// Specialization for RepeatingCallback.
-template <typename R, typename... Args>
-class RunMixin<RepeatingCallback<R(Args...)>> {
- private:
-  using CallbackType = RepeatingCallback<R(Args...)>;
-
- public:
-  using PolymorphicInvoke = R(*)(internal::BindStateBase*, Args&&...);
-
-  R Run(Args... args) const {
-    const CallbackType& cb = static_cast<const CallbackType&>(*this);
-    PolymorphicInvoke f =
-        reinterpret_cast<PolymorphicInvoke>(cb.polymorphic_invoke());
-    return f(cb.bind_state_.get(), std::forward<Args>(args)...);
-  }
-};
-
-template <typename From, typename To>
-struct IsCallbackConvertible : std::false_type {};
-
-template <typename Signature>
-struct IsCallbackConvertible<RepeatingCallback<Signature>,
-                             OnceCallback<Signature>> : std::true_type {};
-
-}  // namespace internal
-
-template <typename R,
-          typename... Args,
-          internal::CopyMode copy_mode,
-          internal::RepeatMode repeat_mode>
-class Callback<R(Args...), copy_mode, repeat_mode>
-    : public internal::CallbackBase<copy_mode>,
-      public internal::RunMixin<Callback<R(Args...), copy_mode, repeat_mode>> {
- public:
-  static_assert(repeat_mode != internal::RepeatMode::Once ||
-                copy_mode == internal::CopyMode::MoveOnly,
-                "OnceCallback must be MoveOnly.");
-
-  using RunType = R(Args...);
+  // MSVC 2013 doesn't support Type Alias of function types.
+  // Revisit this after we update it to newer version.
+  typedef R RunType(Args...);
 
   Callback() : internal::CallbackBase<copy_mode>(nullptr) {}
 
-  explicit Callback(internal::BindStateBase* bind_state)
+  Callback(internal::BindStateBase* bind_state,
+           PolymorphicInvoke invoke_func)
       : internal::CallbackBase<copy_mode>(bind_state) {
-  }
-
-  template <typename OtherCallback,
-            typename = typename std::enable_if<
-                internal::IsCallbackConvertible<OtherCallback, Callback>::value
-            >::type>
-  Callback(OtherCallback other)
-      : internal::CallbackBase<copy_mode>(std::move(other)) {}
-
-  template <typename OtherCallback,
-            typename = typename std::enable_if<
-                internal::IsCallbackConvertible<OtherCallback, Callback>::value
-            >::type>
-  Callback& operator=(OtherCallback other) {
-    static_cast<internal::CallbackBase<copy_mode>&>(*this) = std::move(other);
-    return *this;
+    using InvokeFuncStorage =
+        typename internal::CallbackBase<copy_mode>::InvokeFuncStorage;
+    this->polymorphic_invoke_ =
+        reinterpret_cast<InvokeFuncStorage>(invoke_func);
   }
 
   bool Equals(const Callback& other) const {
     return this->EqualsInternal(other);
   }
 
-  friend class internal::RunMixin<Callback>;
+  // Run() makes an extra copy compared to directly calling the bound function
+  // if an argument is passed-by-value and is copyable-but-not-movable:
+  // i.e. below copies CopyableNonMovableType twice.
+  //   void F(CopyableNonMovableType) {}
+  //   Bind(&F).Run(CopyableNonMovableType());
+  //
+  // We can not fully apply Perfect Forwarding idiom to the callchain from
+  // Callback::Run() to the target function. Perfect Forwarding requires
+  // knowing how the caller will pass the arguments. However, the signature of
+  // InvokerType::Run() needs to be fixed in the callback constructor, so Run()
+  // cannot template its arguments based on how it's called.
+  R Run(Args... args) const {
+    PolymorphicInvoke f =
+        reinterpret_cast<PolymorphicInvoke>(this->polymorphic_invoke_);
+    return f(this->bind_state_.get(), std::forward<Args>(args)...);
+  }
 };
 
 }  // namespace base
diff --git a/base/callback_forward.h b/base/callback_forward.h
index 13eed0e..8b9b89c 100644
--- a/base/callback_forward.h
+++ b/base/callback_forward.h
@@ -12,37 +12,19 @@
 // MoveOnly indicates the Callback is not copyable but movable, and Copyable
 // indicates it is copyable and movable.
 enum class CopyMode {
-  MoveOnly,
-  Copyable,
-};
-
-enum class RepeatMode {
-  Once,
-  Repeating,
+  MoveOnly, Copyable,
 };
 
 }  // namespace internal
 
 template <typename Signature,
-          internal::CopyMode copy_mode = internal::CopyMode::Copyable,
-          internal::RepeatMode repeat_mode = internal::RepeatMode::Repeating>
+          internal::CopyMode copy_mode = internal::CopyMode::Copyable>
 class Callback;
 
 // Syntactic sugar to make Callback<void()> easier to declare since it
 // will be used in a lot of APIs with delayed execution.
 using Closure = Callback<void()>;
 
-template <typename Signature>
-using OnceCallback = Callback<Signature,
-                              internal::CopyMode::MoveOnly,
-                              internal::RepeatMode::Once>;
-template <typename Signature>
-using RepeatingCallback = Callback<Signature,
-                                   internal::CopyMode::Copyable,
-                                   internal::RepeatMode::Repeating>;
-using OnceClosure = OnceCallback<void()>;
-using RepeatingClosure = RepeatingCallback<void()>;
-
 }  // namespace base
 
 #endif  // BASE_CALLBACK_FORWARD_H_
diff --git a/base/callback_helpers.h b/base/callback_helpers.h
index ec3d6cb..782371f 100644
--- a/base/callback_helpers.h
+++ b/base/callback_helpers.h
@@ -20,13 +20,10 @@
 
 namespace base {
 
-template <typename Signature,
-          internal::CopyMode copy_mode,
-          internal::RepeatMode repeat_mode>
-base::Callback<Signature, copy_mode, repeat_mode> ResetAndReturn(
-    base::Callback<Signature, copy_mode, repeat_mode>* cb) {
-  base::Callback<Signature, copy_mode, repeat_mode> ret(std::move(*cb));
-  DCHECK(!*cb);
+template <typename Sig>
+base::Callback<Sig> ResetAndReturn(base::Callback<Sig>* cb) {
+  base::Callback<Sig> ret(*cb);
+  cb->Reset();
   return ret;
 }
 
diff --git a/base/callback_helpers_unittest.cc b/base/callback_helpers_unittest.cc
index 6c48d7c..8283996 100644
--- a/base/callback_helpers_unittest.cc
+++ b/base/callback_helpers_unittest.cc
@@ -14,24 +14,6 @@
   (*value)++;
 }
 
-TEST(CallbackHelpersTest, TestResetAndReturn) {
-  int run_count = 0;
-
-  base::Closure cb = base::Bind(&Increment, &run_count);
-  EXPECT_EQ(0, run_count);
-  base::ResetAndReturn(&cb).Run();
-  EXPECT_EQ(1, run_count);
-  EXPECT_FALSE(cb);
-
-  run_count = 0;
-
-  base::OnceClosure cb2 = base::BindOnce(&Increment, &run_count);
-  EXPECT_EQ(0, run_count);
-  base::ResetAndReturn(&cb2).Run();
-  EXPECT_EQ(1, run_count);
-  EXPECT_FALSE(cb2);
-}
-
 TEST(CallbackHelpersTest, TestScopedClosureRunnerExitScope) {
   int run_count = 0;
   {
diff --git a/base/callback_internal.cc b/base/callback_internal.cc
index 4330e9c..4c8ccae 100644
--- a/base/callback_internal.cc
+++ b/base/callback_internal.cc
@@ -9,75 +9,40 @@
 namespace base {
 namespace internal {
 
-namespace {
-
-bool ReturnFalse(const BindStateBase*) {
-  return false;
-}
-
-}  // namespace
-
-BindStateBase::BindStateBase(InvokeFuncStorage polymorphic_invoke,
-                             void (*destructor)(const BindStateBase*))
-    : BindStateBase(polymorphic_invoke, destructor, &ReturnFalse) {
-}
-
-BindStateBase::BindStateBase(InvokeFuncStorage polymorphic_invoke,
-                             void (*destructor)(const BindStateBase*),
-                             bool (*is_cancelled)(const BindStateBase*))
-    : polymorphic_invoke_(polymorphic_invoke),
-      ref_count_(0),
-      destructor_(destructor),
-      is_cancelled_(is_cancelled) {}
-
-void BindStateBase::AddRef() const {
+void BindStateBase::AddRef() {
   AtomicRefCountInc(&ref_count_);
 }
 
-void BindStateBase::Release() const {
+void BindStateBase::Release() {
   if (!AtomicRefCountDec(&ref_count_))
     destructor_(this);
 }
 
-CallbackBase<CopyMode::MoveOnly>::CallbackBase(CallbackBase&& c) = default;
-
-CallbackBase<CopyMode::MoveOnly>&
-CallbackBase<CopyMode::MoveOnly>::operator=(CallbackBase&& c) = default;
-
-CallbackBase<CopyMode::MoveOnly>::CallbackBase(
-    const CallbackBase<CopyMode::Copyable>& c)
-    : bind_state_(c.bind_state_) {}
-
-CallbackBase<CopyMode::MoveOnly>& CallbackBase<CopyMode::MoveOnly>::operator=(
-    const CallbackBase<CopyMode::Copyable>& c) {
-  bind_state_ = c.bind_state_;
-  return *this;
+CallbackBase<CopyMode::MoveOnly>::CallbackBase(CallbackBase&& c)
+    : bind_state_(std::move(c.bind_state_)),
+      polymorphic_invoke_(c.polymorphic_invoke_) {
+  c.polymorphic_invoke_ = nullptr;
 }
 
-CallbackBase<CopyMode::MoveOnly>::CallbackBase(
-    CallbackBase<CopyMode::Copyable>&& c)
-    : bind_state_(std::move(c.bind_state_)) {}
-
-CallbackBase<CopyMode::MoveOnly>& CallbackBase<CopyMode::MoveOnly>::operator=(
-    CallbackBase<CopyMode::Copyable>&& c) {
+CallbackBase<CopyMode::MoveOnly>&
+CallbackBase<CopyMode::MoveOnly>::operator=(CallbackBase&& c) {
   bind_state_ = std::move(c.bind_state_);
+  polymorphic_invoke_ = c.polymorphic_invoke_;
+  c.polymorphic_invoke_ = nullptr;
   return *this;
 }
 
 void CallbackBase<CopyMode::MoveOnly>::Reset() {
+  polymorphic_invoke_ = nullptr;
   // NULL the bind_state_ last, since it may be holding the last ref to whatever
   // object owns us, and we may be deleted after that.
   bind_state_ = nullptr;
 }
 
-bool CallbackBase<CopyMode::MoveOnly>::IsCancelled() const {
-  DCHECK(bind_state_);
-  return bind_state_->IsCancelled();
-}
-
 bool CallbackBase<CopyMode::MoveOnly>::EqualsInternal(
     const CallbackBase& other) const {
-  return bind_state_ == other.bind_state_;
+  return bind_state_.get() == other.bind_state_.get() &&
+         polymorphic_invoke_ == other.polymorphic_invoke_;
 }
 
 CallbackBase<CopyMode::MoveOnly>::CallbackBase(
@@ -92,18 +57,24 @@
     const CallbackBase& c)
     : CallbackBase<CopyMode::MoveOnly>(nullptr) {
   bind_state_ = c.bind_state_;
+  polymorphic_invoke_ = c.polymorphic_invoke_;
 }
 
-CallbackBase<CopyMode::Copyable>::CallbackBase(CallbackBase&& c) = default;
+CallbackBase<CopyMode::Copyable>::CallbackBase(CallbackBase&& c)
+    : CallbackBase<CopyMode::MoveOnly>(std::move(c)) {}
 
 CallbackBase<CopyMode::Copyable>&
 CallbackBase<CopyMode::Copyable>::operator=(const CallbackBase& c) {
   bind_state_ = c.bind_state_;
+  polymorphic_invoke_ = c.polymorphic_invoke_;
   return *this;
 }
 
 CallbackBase<CopyMode::Copyable>&
-CallbackBase<CopyMode::Copyable>::operator=(CallbackBase&& c) = default;
+CallbackBase<CopyMode::Copyable>::operator=(CallbackBase&& c) {
+  *static_cast<CallbackBase<CopyMode::MoveOnly>*>(this) = std::move(c);
+  return *this;
+}
 
 template class CallbackBase<CopyMode::MoveOnly>;
 template class CallbackBase<CopyMode::Copyable>;
diff --git a/base/callback_internal.h b/base/callback_internal.h
index d6dcfeb..0fe0b2d 100644
--- a/base/callback_internal.h
+++ b/base/callback_internal.h
@@ -30,16 +30,10 @@
 // Creating a vtable for every BindState template instantiation results in a lot
 // of bloat. Its only task is to call the destructor which can be done with a
 // function pointer.
-class BASE_EXPORT BindStateBase {
- public:
-  using InvokeFuncStorage = void(*)();
-
+class BindStateBase {
  protected:
-  BindStateBase(InvokeFuncStorage polymorphic_invoke,
-                void (*destructor)(const BindStateBase*));
-  BindStateBase(InvokeFuncStorage polymorphic_invoke,
-                void (*destructor)(const BindStateBase*),
-                bool (*is_cancelled)(const BindStateBase*));
+  explicit BindStateBase(void (*destructor)(BindStateBase*))
+      : ref_count_(0), destructor_(destructor) {}
   ~BindStateBase() = default;
 
  private:
@@ -47,24 +41,13 @@
   template <CopyMode copy_mode>
   friend class CallbackBase;
 
-  bool IsCancelled() const {
-    return is_cancelled_(this);
-  }
+  void AddRef();
+  void Release();
 
-  void AddRef() const;
-  void Release() const;
-
-  // In C++, it is safe to cast function pointers to function pointers of
-  // another type. It is not okay to use void*. We create a InvokeFuncStorage
-  // that that can store our function pointer, and then cast it back to
-  // the original type on usage.
-  InvokeFuncStorage polymorphic_invoke_;
-
-  mutable AtomicRefCount ref_count_;
+  AtomicRefCount ref_count_;
 
   // Pointer to a function that will properly destroy |this|.
-  void (*destructor_)(const BindStateBase*);
-  bool (*is_cancelled_)(const BindStateBase*);
+  void (*destructor_)(BindStateBase*);
 
   DISALLOW_COPY_AND_ASSIGN(BindStateBase);
 };
@@ -79,43 +62,36 @@
   CallbackBase(CallbackBase&& c);
   CallbackBase& operator=(CallbackBase&& c);
 
-  explicit CallbackBase(const CallbackBase<CopyMode::Copyable>& c);
-  CallbackBase& operator=(const CallbackBase<CopyMode::Copyable>& c);
-
-  explicit CallbackBase(CallbackBase<CopyMode::Copyable>&& c);
-  CallbackBase& operator=(CallbackBase<CopyMode::Copyable>&& c);
-
   // Returns true if Callback is null (doesn't refer to anything).
   bool is_null() const { return bind_state_.get() == NULL; }
   explicit operator bool() const { return !is_null(); }
 
-  // Returns true if the callback invocation will be nop due to an cancellation.
-  // It's invalid to call this on uninitialized callback.
-  bool IsCancelled() const;
-
   // Returns the Callback into an uninitialized state.
   void Reset();
 
  protected:
-  using InvokeFuncStorage = BindStateBase::InvokeFuncStorage;
+  // In C++, it is safe to cast function pointers to function pointers of
+  // another type. It is not okay to use void*. We create a InvokeFuncStorage
+  // that that can store our function pointer, and then cast it back to
+  // the original type on usage.
+  using InvokeFuncStorage = void(*)();
 
   // Returns true if this callback equals |other|. |other| may be null.
   bool EqualsInternal(const CallbackBase& other) const;
 
   // Allow initializing of |bind_state_| via the constructor to avoid default
-  // initialization of the scoped_refptr.
+  // initialization of the scoped_refptr.  We do not also initialize
+  // |polymorphic_invoke_| here because doing a normal assignment in the
+  // derived Callback templates makes for much nicer compiler errors.
   explicit CallbackBase(BindStateBase* bind_state);
 
-  InvokeFuncStorage polymorphic_invoke() const {
-    return bind_state_->polymorphic_invoke_;
-  }
-
   // Force the destructor to be instantiated inside this translation unit so
   // that our subclasses will not get inlined versions.  Avoids more template
   // bloat.
   ~CallbackBase();
 
   scoped_refptr<BindStateBase> bind_state_;
+  InvokeFuncStorage polymorphic_invoke_ = nullptr;
 };
 
 // CallbackBase<Copyable> is a direct base class of Copyable Callbacks.
diff --git a/base/callback_unittest.cc b/base/callback_unittest.cc
index a417369..ce453a1 100644
--- a/base/callback_unittest.cc
+++ b/base/callback_unittest.cc
@@ -14,7 +14,7 @@
 
 namespace base {
 
-void NopInvokeFunc() {}
+void NopInvokeFunc(internal::BindStateBase*) {}
 
 // White-box testpoints to inject into a Callback<> object for checking
 // comparators and emptiness APIs.  Use a BindState that is specialized
@@ -22,26 +22,20 @@
 // chance of colliding with another instantiation and breaking the
 // one-definition-rule.
 struct FakeBindState1 : internal::BindStateBase {
-  FakeBindState1() : BindStateBase(&NopInvokeFunc, &Destroy, &IsCancelled) {}
+  FakeBindState1() : BindStateBase(&Destroy) {}
  private:
   ~FakeBindState1() {}
-  static void Destroy(const internal::BindStateBase* self) {
-    delete static_cast<const FakeBindState1*>(self);
-  }
-  static bool IsCancelled(const internal::BindStateBase*) {
-    return false;
+  static void Destroy(internal::BindStateBase* self) {
+    delete static_cast<FakeBindState1*>(self);
   }
 };
 
 struct FakeBindState2 : internal::BindStateBase {
-  FakeBindState2() : BindStateBase(&NopInvokeFunc, &Destroy, &IsCancelled) {}
+  FakeBindState2() : BindStateBase(&Destroy) {}
  private:
   ~FakeBindState2() {}
-  static void Destroy(const internal::BindStateBase* self) {
-    delete static_cast<const FakeBindState2*>(self);
-  }
-  static bool IsCancelled(const internal::BindStateBase*) {
-    return false;
+  static void Destroy(internal::BindStateBase* self) {
+    delete static_cast<FakeBindState2*>(self);
   }
 };
 
@@ -50,8 +44,8 @@
 class CallbackTest : public ::testing::Test {
  public:
   CallbackTest()
-      : callback_a_(new FakeBindState1()),
-        callback_b_(new FakeBindState2()) {
+      : callback_a_(new FakeBindState1(), &NopInvokeFunc),
+        callback_b_(new FakeBindState2(), &NopInvokeFunc) {
   }
 
   ~CallbackTest() override {}
@@ -94,7 +88,7 @@
   EXPECT_FALSE(callback_b_.Equals(callback_a_));
 
   // We should compare based on instance, not type.
-  Callback<void()> callback_c(new FakeBindState1());
+  Callback<void()> callback_c(new FakeBindState1(), &NopInvokeFunc);
   Callback<void()> callback_a2 = callback_a_;
   EXPECT_TRUE(callback_a_.Equals(callback_a2));
   EXPECT_FALSE(callback_a_.Equals(callback_c));
@@ -115,17 +109,6 @@
   EXPECT_TRUE(callback_a_.Equals(null_callback_));
 }
 
-TEST_F(CallbackTest, Move) {
-  // Moving should reset the callback.
-  ASSERT_FALSE(callback_a_.is_null());
-  ASSERT_FALSE(callback_a_.Equals(null_callback_));
-
-  auto tmp = std::move(callback_a_);
-
-  EXPECT_TRUE(callback_a_.is_null());
-  EXPECT_TRUE(callback_a_.Equals(null_callback_));
-}
-
 struct TestForReentrancy {
   TestForReentrancy()
       : cb_already_run(false),
diff --git a/base/cancelable_callback.h b/base/cancelable_callback.h
index 13cbd0c..0034fdd 100644
--- a/base/cancelable_callback.h
+++ b/base/cancelable_callback.h
@@ -26,18 +26,16 @@
 // to the message loop, the intensive test runs, the message loop is run,
 // then the callback is cancelled.
 //
-// RunLoop run_loop;
-//
 // void TimeoutCallback(const std::string& timeout_message) {
 //   FAIL() << timeout_message;
-//   run_loop.QuitWhenIdle();
+//   MessageLoop::current()->QuitWhenIdle();
 // }
 //
 // CancelableClosure timeout(base::Bind(&TimeoutCallback, "Test timed out."));
-// ThreadTaskRunnerHandle::Get()->PostDelayedTask(FROM_HERE, timeout.callback(),
-//                                                TimeDelta::FromSeconds(4));
+// MessageLoop::current()->PostDelayedTask(FROM_HERE, timeout.callback(),
+//                                         4000)  // 4 seconds to run.
 // RunIntensiveTest();
-// run_loop.Run();
+// MessageLoop::current()->Run();
 // timeout.Cancel();  // Hopefully this is hit before the timeout callback runs.
 //
 
diff --git a/base/command_line.cc b/base/command_line.cc
index 3033fcf..099bb18 100644
--- a/base/command_line.cc
+++ b/base/command_line.cc
@@ -149,10 +149,7 @@
 
 }  // namespace
 
-CommandLine::CommandLine(NoProgram /* no_program */)
-    : argv_(1),
-      begin_args_(1) {
-}
+CommandLine::CommandLine(NoProgram) : argv_(1), begin_args_(1) {}
 
 CommandLine::CommandLine(const FilePath& program)
     : argv_(1),
@@ -455,7 +452,9 @@
 
 CommandLine::StringType CommandLine::GetArgumentsStringInternal(
     bool quote_placeholders) const {
-  ALLOW_UNUSED_PARAM(quote_placeholders);
+#if !defined(OS_WIN)
+  (void)quote_placeholders;  // Avoid an unused warning.
+#endif
   StringType params;
   // Append switches and arguments.
   bool parse_switches = true;
diff --git a/base/compiler_specific.h b/base/compiler_specific.h
index 358a5c9..c2a02de 100644
--- a/base/compiler_specific.h
+++ b/base/compiler_specific.h
@@ -85,9 +85,6 @@
 //   ALLOW_UNUSED_LOCAL(x);
 #define ALLOW_UNUSED_LOCAL(x) false ? (void)x : (void)0
 
-// Used for Arc++ where -Wno-unused-parameter is used.
-#define ALLOW_UNUSED_PARAM(x) false ? (void)x : (void)0
-
 // Annotate a typedef or function indicating it's ok if it's not used.
 // Use like:
 //   typedef Foo Bar ALLOW_UNUSED_TYPE;
@@ -108,14 +105,6 @@
 #define NOINLINE
 #endif
 
-#if COMPILER_GCC && defined(NDEBUG)
-#define ALWAYS_INLINE inline __attribute__((__always_inline__))
-#elif COMPILER_MSVC && defined(NDEBUG)
-#define ALWAYS_INLINE __forceinline
-#else
-#define ALWAYS_INLINE inline
-#endif
-
 // Specify memory alignment for structs, classes, etc.
 // Use like:
 //   class ALIGNAS(16) MyClass { ... }
@@ -165,16 +154,6 @@
 // If available, it would look like:
 //   __attribute__((format(wprintf, format_param, dots_param)))
 
-// Sanitizers annotations.
-#if defined(__has_attribute)
-#if __has_attribute(no_sanitize)
-#define NO_SANITIZE(what) __attribute__((no_sanitize(what)))
-#endif
-#endif
-#if !defined(NO_SANITIZE)
-#define NO_SANITIZE(what)
-#endif
-
 // MemorySanitizer annotations.
 #if defined(MEMORY_SANITIZER) && !defined(OS_NACL)
 #include <sanitizer/msan_interface.h>
@@ -195,15 +174,6 @@
 #define MSAN_CHECK_MEM_IS_INITIALIZED(p, size)
 #endif  // MEMORY_SANITIZER
 
-// DISABLE_CFI_PERF -- Disable Control Flow Integrity for perf reasons.
-#if !defined(DISABLE_CFI_PERF)
-#if defined(__clang__) && defined(OFFICIAL_BUILD)
-#define DISABLE_CFI_PERF __attribute__((no_sanitize("cfi")))
-#else
-#define DISABLE_CFI_PERF
-#endif
-#endif
-
 // Macro useful for writing cross-platform function pointers.
 #if !defined(CDECL)
 #if defined(OS_WIN)
@@ -222,14 +192,6 @@
 #endif  // defined(COMPILER_GCC)
 #endif  // !defined(UNLIKELY)
 
-#if !defined(LIKELY)
-#if defined(COMPILER_GCC)
-#define LIKELY(x) __builtin_expect(!!(x), 1)
-#else
-#define LIKELY(x) (x)
-#endif  // defined(COMPILER_GCC)
-#endif  // !defined(LIKELY)
-
 // Compiler feature-detection.
 // clang.llvm.org/docs/LanguageExtensions.html#has-feature-and-has-extension
 #if defined(__has_feature)
diff --git a/base/containers/mru_cache.h b/base/containers/mru_cache.h
index 4005489..6c1d626 100644
--- a/base/containers/mru_cache.h
+++ b/base/containers/mru_cache.h
@@ -209,12 +209,10 @@
 
 // A container that does not do anything to free its data. Use this when storing
 // value types (as opposed to pointers) in the list.
-template <class KeyType,
-          class PayloadType,
-          class CompareType = std::less<KeyType>>
-class MRUCache : public MRUCacheBase<KeyType, PayloadType, CompareType> {
+template <class KeyType, class PayloadType>
+class MRUCache : public MRUCacheBase<KeyType, PayloadType, std::less<KeyType>> {
  private:
-  using ParentType = MRUCacheBase<KeyType, PayloadType, CompareType>;
+  using ParentType = MRUCacheBase<KeyType, PayloadType, std::less<KeyType>>;
 
  public:
   // See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
diff --git a/base/containers/scoped_ptr_hash_map.h b/base/containers/scoped_ptr_hash_map.h
new file mode 100644
index 0000000..f513f06
--- /dev/null
+++ b/base/containers/scoped_ptr_hash_map.h
@@ -0,0 +1,176 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_SCOPED_PTR_HASH_MAP_H_
+#define BASE_CONTAINERS_SCOPED_PTR_HASH_MAP_H_
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <memory>
+#include <utility>
+
+#include "base/containers/hash_tables.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/stl_util.h"
+
+namespace base {
+
+// Deprecated. Use std::unordered_map instead. https://crbug.com/579229
+//
+// This type acts like a hash_map<K, std::unique_ptr<V, D> >, based on top of
+// base::hash_map. The ScopedPtrHashMap has ownership of all values in the data
+// structure.
+template <typename Key, typename ScopedPtr>
+class ScopedPtrHashMap {
+  typedef base::hash_map<Key, typename ScopedPtr::element_type*> Container;
+
+ public:
+  typedef typename Container::key_type key_type;
+  typedef typename Container::mapped_type mapped_type;
+  typedef typename Container::value_type value_type;
+  typedef typename Container::iterator iterator;
+  typedef typename Container::const_iterator const_iterator;
+
+  ScopedPtrHashMap() {}
+
+  ~ScopedPtrHashMap() { clear(); }
+
+  void swap(ScopedPtrHashMap<Key, ScopedPtr>& other) {
+    data_.swap(other.data_);
+  }
+
+  // Replaces value but not key if key is already present.
+  iterator set(const Key& key, ScopedPtr data) {
+    iterator it = find(key);
+    if (it != end()) {
+      // Let ScopedPtr decide how to delete. For example, it may use custom
+      // deleter.
+      ScopedPtr(it->second).reset();
+      it->second = data.release();
+      return it;
+    }
+
+    return data_.insert(std::make_pair(key, data.release())).first;
+  }
+
+  // Does nothing if key is already present
+  std::pair<iterator, bool> add(const Key& key, ScopedPtr data) {
+    std::pair<iterator, bool> result =
+        data_.insert(std::make_pair(key, data.get()));
+    if (result.second)
+      ::ignore_result(data.release());
+    return result;
+  }
+
+  void erase(iterator it) {
+    // Let ScopedPtr decide how to delete.
+    ScopedPtr(it->second).reset();
+    data_.erase(it);
+  }
+
+  size_t erase(const Key& k) {
+    iterator it = data_.find(k);
+    if (it == data_.end())
+      return 0;
+    erase(it);
+    return 1;
+  }
+
+  ScopedPtr take(iterator it) {
+    DCHECK(it != data_.end());
+    if (it == data_.end())
+      return ScopedPtr();
+
+    ScopedPtr ret(it->second);
+    it->second = NULL;
+    return ret;
+  }
+
+  ScopedPtr take(const Key& k) {
+    iterator it = find(k);
+    if (it == data_.end())
+      return ScopedPtr();
+
+    return take(it);
+  }
+
+  ScopedPtr take_and_erase(iterator it) {
+    DCHECK(it != data_.end());
+    if (it == data_.end())
+      return ScopedPtr();
+
+    ScopedPtr ret(it->second);
+    data_.erase(it);
+    return ret;
+  }
+
+  ScopedPtr take_and_erase(const Key& k) {
+    iterator it = find(k);
+    if (it == data_.end())
+      return ScopedPtr();
+
+    return take_and_erase(it);
+  }
+
+  // Returns the element in the hash_map that matches the given key.
+  // If no such element exists it returns NULL.
+  typename ScopedPtr::element_type* get(const Key& k) const {
+    const_iterator it = find(k);
+    if (it == end())
+      return NULL;
+    return it->second;
+  }
+
+  inline bool contains(const Key& k) const { return data_.count(k) > 0; }
+
+  inline void clear() {
+    auto it = data_.begin();
+    while (it != data_.end()) {
+      // NOTE: Like STLDeleteContainerPointers, deleting behind the iterator.
+      // Deleting the value does not always invalidate the iterator, but it may
+      // do so if the key is a pointer into the value object.
+      auto temp = it;
+      ++it;
+      // Let ScopedPtr decide how to delete.
+      ScopedPtr(temp->second).reset();
+    }
+    data_.clear();
+  }
+
+  inline const_iterator find(const Key& k) const { return data_.find(k); }
+  inline iterator find(const Key& k) { return data_.find(k); }
+
+  inline size_t count(const Key& k) const { return data_.count(k); }
+  inline std::pair<const_iterator, const_iterator> equal_range(
+      const Key& k) const {
+    return data_.equal_range(k);
+  }
+  inline std::pair<iterator, iterator> equal_range(const Key& k) {
+    return data_.equal_range(k);
+  }
+
+  inline size_t size() const { return data_.size(); }
+  inline size_t max_size() const { return data_.max_size(); }
+
+  inline bool empty() const { return data_.empty(); }
+
+  inline size_t bucket_count() const { return data_.bucket_count(); }
+  inline void resize(size_t size) { return data_.resize(size); }
+
+  inline iterator begin() { return data_.begin(); }
+  inline const_iterator begin() const { return data_.begin(); }
+  inline iterator end() { return data_.end(); }
+  inline const_iterator end() const { return data_.end(); }
+
+ private:
+  Container data_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedPtrHashMap);
+};
+
+}  // namespace base
+
+#endif  // BASE_CONTAINERS_SCOPED_PTR_HASH_MAP_H_
diff --git a/base/containers/small_map.h b/base/containers/small_map.h
index 2945d58..82ed6c5 100644
--- a/base/containers/small_map.h
+++ b/base/containers/small_map.h
@@ -32,7 +32,7 @@
 //
 //  - If you only ever keep a couple of items and have very simple usage,
 //    consider whether a using a vector and brute-force searching it will be
-//    the most efficient. It's not a lot of generated code (less than a
+//    the most efficient. It's not a lot of generated code (less then a
 //    red-black tree if your key is "weird" and not eliminated as duplicate of
 //    something else) and will probably be faster and do fewer heap allocations
 //    than std::map if you have just a couple of items.
@@ -510,8 +510,8 @@
     size_ = 0;
   }
 
-  // Invalidates iterators. Returns iterator following the last removed element.
-  iterator erase(const iterator& position) {
+  // Invalidates iterators.
+  void erase(const iterator& position) {
     if (size_ >= 0) {
       int i = position.array_iter_ - array_;
       array_[i].Destroy();
@@ -519,11 +519,10 @@
       if (i != size_) {
         array_[i].InitFromMove(std::move(array_[size_]));
         array_[size_].Destroy();
-        return iterator(array_ + i);
       }
-      return end();
+    } else {
+      map_->erase(position.hash_iter_);
     }
-    return iterator(map_->erase(position.hash_iter_));
   }
 
   size_t erase(const key_type& key) {
@@ -575,13 +574,17 @@
 
   // We want to call constructors and destructors manually, but we don't
   // want to allocate and deallocate the memory used for them separately.
-  // So, we use this crazy ManualConstructor class. Since C++11 it's possible
-  // to use objects in unions like this, but the ManualDestructor syntax is
-  // a bit better and doesn't have limitations on object type.
+  // So, we use this crazy ManualConstructor class.
   //
   // Since array_ and map_ are mutually exclusive, we'll put them in a
-  // union.
+  // union, too.  We add in a dummy_ value which quiets MSVC from otherwise
+  // giving an erroneous "union member has copy constructor" error message
+  // (C2621). This dummy member has to come before array_ to quiet the
+  // compiler.
+  //
+  // TODO(brettw) remove this and use C++11 unions when we require C++11.
   union {
+    ManualConstructor<value_type> dummy_;
     ManualConstructor<value_type> array_[kArraySize];
     ManualConstructor<NormalMap> map_;
   };
diff --git a/base/cpu.cc b/base/cpu.cc
index 848208f..de4a001 100644
--- a/base/cpu.cc
+++ b/base/cpu.cc
@@ -16,6 +16,7 @@
 
 #if defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
 #include "base/files/file_util.h"
+#include "base/lazy_instance.h"
 #endif
 
 #if defined(ARCH_CPU_X86_FAMILY)
@@ -42,7 +43,6 @@
     has_ssse3_(false),
     has_sse41_(false),
     has_sse42_(false),
-    has_popcnt_(false),
     has_avx_(false),
     has_avx2_(false),
     has_aesni_(false),
@@ -59,22 +59,23 @@
 #if defined(__pic__) && defined(__i386__)
 
 void __cpuid(int cpu_info[4], int info_type) {
-  __asm__ volatile(
-      "mov %%ebx, %%edi\n"
-      "cpuid\n"
-      "xchg %%edi, %%ebx\n"
-      : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]),
-        "=d"(cpu_info[3])
-      : "a"(info_type), "c"(0));
+  __asm__ volatile (
+    "mov %%ebx, %%edi\n"
+    "cpuid\n"
+    "xchg %%edi, %%ebx\n"
+    : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
+    : "a"(info_type)
+  );
 }
 
 #else
 
 void __cpuid(int cpu_info[4], int info_type) {
-  __asm__ volatile("cpuid\n"
-                   : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]),
-                     "=d"(cpu_info[3])
-                   : "a"(info_type), "c"(0));
+  __asm__ volatile (
+    "cpuid\n"
+    : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
+    : "a"(info_type)
+  );
 }
 
 #endif
@@ -93,8 +94,9 @@
 #endif  // ARCH_CPU_X86_FAMILY
 
 #if defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
-std::string* CpuInfoBrand() {
-  static std::string* brand = []() {
+class LazyCpuInfoValue {
+ public:
+  LazyCpuInfoValue() {
     // This function finds the value from /proc/cpuinfo under the key "model
     // name" or "Processor". "model name" is used in Linux 3.8 and later (3.7
     // and later for arm64) and is shown once per CPU. "Processor" is used in
@@ -107,23 +109,30 @@
     ReadFileToString(FilePath("/proc/cpuinfo"), &contents);
     DCHECK(!contents.empty());
     if (contents.empty()) {
-      return new std::string();
+      return;
     }
 
     std::istringstream iss(contents);
     std::string line;
     while (std::getline(iss, line)) {
-      if ((line.compare(0, strlen(kModelNamePrefix), kModelNamePrefix) == 0 ||
+      if (brand_.empty() &&
+          (line.compare(0, strlen(kModelNamePrefix), kModelNamePrefix) == 0 ||
            line.compare(0, strlen(kProcessorPrefix), kProcessorPrefix) == 0)) {
-        return new std::string(line.substr(strlen(kModelNamePrefix)));
+        brand_.assign(line.substr(strlen(kModelNamePrefix)));
       }
     }
+  }
 
-    return new std::string();
-  }();
+  const std::string& brand() const { return brand_; }
 
-  return brand;
-}
+ private:
+  std::string brand_;
+  DISALLOW_COPY_AND_ASSIGN(LazyCpuInfoValue);
+};
+
+base::LazyInstance<LazyCpuInfoValue>::Leaky g_lazy_cpuinfo =
+    LAZY_INSTANCE_INITIALIZER;
+
 #endif  // defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) ||
         // defined(OS_LINUX))
 
@@ -168,8 +177,6 @@
     has_ssse3_ = (cpu_info[2] & 0x00000200) != 0;
     has_sse41_ = (cpu_info[2] & 0x00080000) != 0;
     has_sse42_ = (cpu_info[2] & 0x00100000) != 0;
-    has_popcnt_ = (cpu_info[2] & 0x00800000) != 0;
-
     // AVX instructions will generate an illegal instruction exception unless
     //   a) they are supported by the CPU,
     //   b) XSAVE is supported by the CPU and
@@ -212,7 +219,7 @@
     has_non_stop_time_stamp_counter_ = (cpu_info[3] & (1 << 8)) != 0;
   }
 #elif defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
-  cpu_brand_.assign(*CpuInfoBrand());
+  cpu_brand_.assign(g_lazy_cpuinfo.Get().brand());
 #endif
 }
 
diff --git a/base/cpu.h b/base/cpu.h
index 0e24df6..0e4303b 100644
--- a/base/cpu.h
+++ b/base/cpu.h
@@ -46,7 +46,6 @@
   bool has_ssse3() const { return has_ssse3_; }
   bool has_sse41() const { return has_sse41_; }
   bool has_sse42() const { return has_sse42_; }
-  bool has_popcnt() const { return has_popcnt_; }
   bool has_avx() const { return has_avx_; }
   bool has_avx2() const { return has_avx2_; }
   bool has_aesni() const { return has_aesni_; }
@@ -75,7 +74,6 @@
   bool has_ssse3_;
   bool has_sse41_;
   bool has_sse42_;
-  bool has_popcnt_;
   bool has_avx_;
   bool has_avx2_;
   bool has_aesni_;
diff --git a/base/cpu_unittest.cc b/base/cpu_unittest.cc
index 9cabfd6..ec14620 100644
--- a/base/cpu_unittest.cc
+++ b/base/cpu_unittest.cc
@@ -57,11 +57,6 @@
     __asm__ __volatile__("crc32 %%eax, %%eax\n" : : : "eax");
   }
 
-  if (cpu.has_popcnt()) {
-    // Execute a POPCNT instruction.
-    __asm__ __volatile__("popcnt %%eax, %%eax\n" : : : "eax");
-  }
-
   if (cpu.has_avx()) {
     // Execute an AVX instruction.
     __asm__ __volatile__("vzeroupper\n" : : : "xmm0");
@@ -105,11 +100,6 @@
     __asm crc32 eax, eax;
   }
 
-  if (cpu.has_popcnt()) {
-    // Execute a POPCNT instruction.
-    __asm popcnt eax, eax;
-  }
-
 // Visual C 2012 required for AVX.
 #if _MSC_VER >= 1700
   if (cpu.has_avx()) {
diff --git a/base/critical_closure.h b/base/critical_closure.h
index 1b10cde..6ebd7af 100644
--- a/base/critical_closure.h
+++ b/base/critical_closure.h
@@ -25,15 +25,21 @@
 // This class wraps a closure so it can continue to run for a period of time
 // when the application goes to the background by using
 // |ios::ScopedCriticalAction|.
+template <typename R>
 class CriticalClosure {
  public:
-  explicit CriticalClosure(const Closure& closure);
-  ~CriticalClosure();
-  void Run();
+  explicit CriticalClosure(const Callback<R(void)>& closure)
+      : closure_(closure) {}
+
+  ~CriticalClosure() {}
+
+  R Run() {
+    return closure_.Run();
+  }
 
  private:
   ios::ScopedCriticalAction critical_action_;
-  Closure closure_;
+  Callback<R(void)> closure_;
 
   DISALLOW_COPY_AND_ASSIGN(CriticalClosure);
 };
@@ -41,7 +47,8 @@
 
 }  // namespace internal
 
-// Returns a closure that will continue to run for a period of time when the
+// Returns a closure (which may return a result, but must not require any extra
+// arguments) that will continue to run for a period of time when the
 // application goes to the background if possible on platforms where
 // applications don't execute while backgrounded, otherwise the original task is
 // returned.
@@ -55,13 +62,15 @@
 // background running time, |MakeCriticalClosure| should be applied on them
 // before posting.
 #if defined(OS_IOS)
-inline Closure MakeCriticalClosure(const Closure& closure) {
+template <typename R>
+Callback<R(void)> MakeCriticalClosure(const Callback<R(void)>& closure) {
   DCHECK(internal::IsMultiTaskingSupported());
-  return base::Bind(&internal::CriticalClosure::Run,
-                    Owned(new internal::CriticalClosure(closure)));
+  return base::Bind(&internal::CriticalClosure<R>::Run,
+                    Owned(new internal::CriticalClosure<R>(closure)));
 }
 #else  // defined(OS_IOS)
-inline Closure MakeCriticalClosure(const Closure& closure) {
+template <typename R>
+inline Callback<R(void)> MakeCriticalClosure(const Callback<R(void)>& closure) {
   // No-op for platforms where the application does not need to acquire
   // background time for closures to finish when it goes into the background.
   return closure;
diff --git a/base/debug/activity_tracker.cc b/base/debug/activity_tracker.cc
deleted file mode 100644
index 40e9b95..0000000
--- a/base/debug/activity_tracker.cc
+++ /dev/null
@@ -1,1389 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/debug/activity_tracker.h"
-
-#include <algorithm>
-#include <limits>
-#include <utility>
-
-#include "base/atomic_sequence_num.h"
-#include "base/debug/stack_trace.h"
-#include "base/files/file.h"
-#include "base/files/file_path.h"
-#include "base/files/memory_mapped_file.h"
-#include "base/logging.h"
-#include "base/memory/ptr_util.h"
-#include "base/metrics/field_trial.h"
-#include "base/metrics/histogram_macros.h"
-#include "base/pending_task.h"
-#include "base/pickle.h"
-#include "base/process/process.h"
-#include "base/process/process_handle.h"
-#include "base/stl_util.h"
-#include "base/strings/string_util.h"
-#include "base/threading/platform_thread.h"
-
-namespace base {
-namespace debug {
-
-namespace {
-
-// A number that identifies the memory as having been initialized. It's
-// arbitrary but happens to be the first 4 bytes of SHA1(ThreadActivityTracker).
-// A version number is added on so that major structure changes won't try to
-// read an older version (since the cookie won't match).
-const uint32_t kHeaderCookie = 0xC0029B24UL + 2;  // v2
-
-// The minimum depth a stack should support.
-const int kMinStackDepth = 2;
-
-// The amount of memory set aside for holding arbitrary user data (key/value
-// pairs) globally or associated with ActivityData entries.
-const size_t kUserDataSize = 1 << 10;     // 1 KiB
-const size_t kGlobalDataSize = 16 << 10;  // 16 KiB
-const size_t kMaxUserDataNameLength =
-    static_cast<size_t>(std::numeric_limits<uint8_t>::max());
-
-// A constant used to indicate that module information is changing.
-const uint32_t kModuleInformationChanging = 0x80000000;
-
-union ThreadRef {
-  int64_t as_id;
-#if defined(OS_WIN)
-  // On Windows, the handle itself is often a pseudo-handle with a common
-  // value meaning "this thread" and so the thread-id is used. The former
-  // can be converted to a thread-id with a system call.
-  PlatformThreadId as_tid;
-#elif defined(OS_POSIX)
-  // On Posix, the handle is always a unique identifier so no conversion
-  // needs to be done. However, it's value is officially opaque so there
-  // is no one correct way to convert it to a numerical identifier.
-  PlatformThreadHandle::Handle as_handle;
-#endif
-};
-
-// Determines the previous aligned index.
-size_t RoundDownToAlignment(size_t index, size_t alignment) {
-  return index & (0 - alignment);
-}
-
-// Determines the next aligned index.
-size_t RoundUpToAlignment(size_t index, size_t alignment) {
-  return (index + (alignment - 1)) & (0 - alignment);
-}
-
-}  // namespace
-
-
-// It doesn't matter what is contained in this (though it will be all zeros)
-// as only the address of it is important.
-const ActivityData kNullActivityData = {};
-
-ActivityData ActivityData::ForThread(const PlatformThreadHandle& handle) {
-  ThreadRef thread_ref;
-  thread_ref.as_id = 0;  // Zero the union in case other is smaller.
-#if defined(OS_WIN)
-  thread_ref.as_tid = ::GetThreadId(handle.platform_handle());
-#elif defined(OS_POSIX)
-  thread_ref.as_handle = handle.platform_handle();
-#endif
-  return ForThread(thread_ref.as_id);
-}
-
-ActivityTrackerMemoryAllocator::ActivityTrackerMemoryAllocator(
-    PersistentMemoryAllocator* allocator,
-    uint32_t object_type,
-    uint32_t object_free_type,
-    size_t object_size,
-    size_t cache_size,
-    bool make_iterable)
-    : allocator_(allocator),
-      object_type_(object_type),
-      object_free_type_(object_free_type),
-      object_size_(object_size),
-      cache_size_(cache_size),
-      make_iterable_(make_iterable),
-      iterator_(allocator),
-      cache_values_(new Reference[cache_size]),
-      cache_used_(0) {
-  DCHECK(allocator);
-}
-
-ActivityTrackerMemoryAllocator::~ActivityTrackerMemoryAllocator() {}
-
-ActivityTrackerMemoryAllocator::Reference
-ActivityTrackerMemoryAllocator::GetObjectReference() {
-  // First see if there is a cached value that can be returned. This is much
-  // faster than searching the memory system for free blocks.
-  while (cache_used_ > 0) {
-    Reference cached = cache_values_[--cache_used_];
-    // Change the type of the cached object to the proper type and return it.
-    // If the type-change fails that means another thread has taken this from
-    // under us (via the search below) so ignore it and keep trying. Don't
-    // clear the memory because that was done when the type was made "free".
-    if (allocator_->ChangeType(cached, object_type_, object_free_type_, false))
-      return cached;
-  }
-
-  // Fetch the next "free" object from persistent memory. Rather than restart
-  // the iterator at the head each time and likely waste time going again
-  // through objects that aren't relevant, the iterator continues from where
-  // it last left off and is only reset when the end is reached. If the
-  // returned reference matches |last|, then it has wrapped without finding
-  // anything.
-  const Reference last = iterator_.GetLast();
-  while (true) {
-    uint32_t type;
-    Reference found = iterator_.GetNext(&type);
-    if (found && type == object_free_type_) {
-      // Found a free object. Change it to the proper type and return it. If
-      // the type-change fails that means another thread has taken this from
-      // under us so ignore it and keep trying.
-      if (allocator_->ChangeType(found, object_type_, object_free_type_, false))
-        return found;
-    }
-    if (found == last) {
-      // Wrapped. No desired object was found.
-      break;
-    }
-    if (!found) {
-      // Reached end; start over at the beginning.
-      iterator_.Reset();
-    }
-  }
-
-  // No free block was found so instead allocate a new one.
-  Reference allocated = allocator_->Allocate(object_size_, object_type_);
-  if (allocated && make_iterable_)
-    allocator_->MakeIterable(allocated);
-  return allocated;
-}
-
-void ActivityTrackerMemoryAllocator::ReleaseObjectReference(Reference ref) {
-  // Mark object as free.
-  bool success = allocator_->ChangeType(ref, object_free_type_, object_type_,
-                                        /*clear=*/true);
-  DCHECK(success);
-
-  // Add this reference to our "free" cache if there is space. If not, the type
-  // has still been changed to indicate that it is free so this (or another)
-  // thread can find it, albeit more slowly, using the iteration method above.
-  if (cache_used_ < cache_size_)
-    cache_values_[cache_used_++] = ref;
-}
-
-// static
-void Activity::FillFrom(Activity* activity,
-                        const void* program_counter,
-                        const void* origin,
-                        Type type,
-                        const ActivityData& data) {
-  activity->time_internal = base::TimeTicks::Now().ToInternalValue();
-  activity->calling_address = reinterpret_cast<uintptr_t>(program_counter);
-  activity->origin_address = reinterpret_cast<uintptr_t>(origin);
-  activity->activity_type = type;
-  activity->data = data;
-
-#if defined(SYZYASAN)
-  // Create a stacktrace from the current location and get the addresses.
-  StackTrace stack_trace;
-  size_t stack_depth;
-  const void* const* stack_addrs = stack_trace.Addresses(&stack_depth);
-  // Copy the stack addresses, ignoring the first one (here).
-  size_t i;
-  for (i = 1; i < stack_depth && i < kActivityCallStackSize; ++i) {
-    activity->call_stack[i - 1] = reinterpret_cast<uintptr_t>(stack_addrs[i]);
-  }
-  activity->call_stack[i - 1] = 0;
-#else
-  activity->call_stack[0] = 0;
-#endif
-}
-
-ActivityUserData::TypedValue::TypedValue() {}
-ActivityUserData::TypedValue::TypedValue(const TypedValue& other) = default;
-ActivityUserData::TypedValue::~TypedValue() {}
-
-StringPiece ActivityUserData::TypedValue::Get() const {
-  DCHECK_EQ(RAW_VALUE, type_);
-  return long_value_;
-}
-
-StringPiece ActivityUserData::TypedValue::GetString() const {
-  DCHECK_EQ(STRING_VALUE, type_);
-  return long_value_;
-}
-
-bool ActivityUserData::TypedValue::GetBool() const {
-  DCHECK_EQ(BOOL_VALUE, type_);
-  return short_value_ != 0;
-}
-
-char ActivityUserData::TypedValue::GetChar() const {
-  DCHECK_EQ(CHAR_VALUE, type_);
-  return static_cast<char>(short_value_);
-}
-
-int64_t ActivityUserData::TypedValue::GetInt() const {
-  DCHECK_EQ(SIGNED_VALUE, type_);
-  return static_cast<int64_t>(short_value_);
-}
-
-uint64_t ActivityUserData::TypedValue::GetUint() const {
-  DCHECK_EQ(UNSIGNED_VALUE, type_);
-  return static_cast<uint64_t>(short_value_);
-}
-
-StringPiece ActivityUserData::TypedValue::GetReference() const {
-  DCHECK_EQ(RAW_VALUE_REFERENCE, type_);
-  return ref_value_;
-}
-
-StringPiece ActivityUserData::TypedValue::GetStringReference() const {
-  DCHECK_EQ(STRING_VALUE_REFERENCE, type_);
-  return ref_value_;
-}
-
-ActivityUserData::ValueInfo::ValueInfo() {}
-ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default;
-ActivityUserData::ValueInfo::~ValueInfo() {}
-
-StaticAtomicSequenceNumber ActivityUserData::next_id_;
-
-ActivityUserData::ActivityUserData(void* memory, size_t size)
-    : memory_(reinterpret_cast<char*>(memory)),
-      available_(RoundDownToAlignment(size, kMemoryAlignment)),
-      id_(reinterpret_cast<std::atomic<uint32_t>*>(memory)) {
-  // It's possible that no user data is being stored.
-  if (!memory_)
-    return;
-
-  DCHECK_LT(kMemoryAlignment, available_);
-  if (id_->load(std::memory_order_relaxed) == 0) {
-    // Generate a new ID and store it in the first 32-bit word of memory_.
-    // |id_| must be non-zero for non-sink instances.
-    uint32_t id;
-    while ((id = next_id_.GetNext()) == 0)
-      ;
-    id_->store(id, std::memory_order_relaxed);
-    DCHECK_NE(0U, id_->load(std::memory_order_relaxed));
-  }
-  memory_ += kMemoryAlignment;
-  available_ -= kMemoryAlignment;
-
-  // If there is already data present, load that. This allows the same class
-  // to be used for analysis through snapshots.
-  ImportExistingData();
-}
-
-ActivityUserData::~ActivityUserData() {}
-
-void ActivityUserData::Set(StringPiece name,
-                           ValueType type,
-                           const void* memory,
-                           size_t size) {
-  DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length());
-  size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1),
-                  size);
-
-  // It's possible that no user data is being stored.
-  if (!memory_)
-    return;
-
-  // The storage of a name is limited so use that limit during lookup.
-  if (name.length() > kMaxUserDataNameLength)
-    name.set(name.data(), kMaxUserDataNameLength);
-
-  ValueInfo* info;
-  auto existing = values_.find(name);
-  if (existing != values_.end()) {
-    info = &existing->second;
-  } else {
-    // The name size is limited to what can be held in a single byte but
-    // because there are not alignment constraints on strings, it's set tight
-    // against the header. Its extent (the reserved space, even if it's not
-    // all used) is calculated so that, when pressed against the header, the
-    // following field will be aligned properly.
-    size_t name_size = name.length();
-    size_t name_extent =
-        RoundUpToAlignment(sizeof(Header) + name_size, kMemoryAlignment) -
-        sizeof(Header);
-    size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment);
-
-    // The "base size" is the size of the header and (padded) string key. Stop
-    // now if there's not room enough for even this.
-    size_t base_size = sizeof(Header) + name_extent;
-    if (base_size > available_)
-      return;
-
-    // The "full size" is the size for storing the entire value.
-    size_t full_size = std::min(base_size + value_extent, available_);
-
-    // If the value is actually a single byte, see if it can be stuffed at the
-    // end of the name extent rather than wasting kMemoryAlignment bytes.
-    if (size == 1 && name_extent > name_size) {
-      full_size = base_size;
-      --name_extent;
-      --base_size;
-    }
-
-    // Truncate the stored size to the amount of available memory. Stop now if
-    // there's not any room for even part of the value.
-    if (size != 0) {
-      size = std::min(full_size - base_size, size);
-      if (size == 0)
-        return;
-    }
-
-    // Allocate a chunk of memory.
-    Header* header = reinterpret_cast<Header*>(memory_);
-    memory_ += full_size;
-    available_ -= full_size;
-
-    // Datafill the header and name records. Memory must be zeroed. The |type|
-    // is written last, atomically, to release all the other values.
-    DCHECK_EQ(END_OF_VALUES, header->type.load(std::memory_order_relaxed));
-    DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed));
-    header->name_size = static_cast<uint8_t>(name_size);
-    header->record_size = full_size;
-    char* name_memory = reinterpret_cast<char*>(header) + sizeof(Header);
-    void* value_memory =
-        reinterpret_cast<char*>(header) + sizeof(Header) + name_extent;
-    memcpy(name_memory, name.data(), name_size);
-    header->type.store(type, std::memory_order_release);
-
-    // Create an entry in |values_| so that this field can be found and changed
-    // later on without having to allocate new entries.
-    StringPiece persistent_name(name_memory, name_size);
-    auto inserted =
-        values_.insert(std::make_pair(persistent_name, ValueInfo()));
-    DCHECK(inserted.second);  // True if inserted, false if existed.
-    info = &inserted.first->second;
-    info->name = persistent_name;
-    info->memory = value_memory;
-    info->size_ptr = &header->value_size;
-    info->extent = full_size - sizeof(Header) - name_extent;
-    info->type = type;
-  }
-
-  // Copy the value data to storage. The |size| is written last, atomically, to
-  // release the copied data. Until then, a parallel reader will just ignore
-  // records with a zero size.
-  DCHECK_EQ(type, info->type);
-  size = std::min(size, info->extent);
-  info->size_ptr->store(0, std::memory_order_seq_cst);
-  memcpy(info->memory, memory, size);
-  info->size_ptr->store(size, std::memory_order_release);
-}
-
-void ActivityUserData::SetReference(StringPiece name,
-                                    ValueType type,
-                                    const void* memory,
-                                    size_t size) {
-  ReferenceRecord rec;
-  rec.address = reinterpret_cast<uintptr_t>(memory);
-  rec.size = size;
-  Set(name, type, &rec, sizeof(rec));
-}
-
-void ActivityUserData::ImportExistingData() const {
-  while (available_ > sizeof(Header)) {
-    Header* header = reinterpret_cast<Header*>(memory_);
-    ValueType type =
-        static_cast<ValueType>(header->type.load(std::memory_order_acquire));
-    if (type == END_OF_VALUES)
-      return;
-    if (header->record_size > available_)
-      return;
-
-    size_t value_offset = RoundUpToAlignment(sizeof(Header) + header->name_size,
-                                             kMemoryAlignment);
-    if (header->record_size == value_offset &&
-        header->value_size.load(std::memory_order_relaxed) == 1) {
-      value_offset -= 1;
-    }
-    if (value_offset + header->value_size > header->record_size)
-      return;
-
-    ValueInfo info;
-    info.name = StringPiece(memory_ + sizeof(Header), header->name_size);
-    info.type = type;
-    info.memory = memory_ + value_offset;
-    info.size_ptr = &header->value_size;
-    info.extent = header->record_size - value_offset;
-
-    StringPiece key(info.name);
-    values_.insert(std::make_pair(key, std::move(info)));
-
-    memory_ += header->record_size;
-    available_ -= header->record_size;
-  }
-}
-
-bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
-  DCHECK(output_snapshot);
-  DCHECK(output_snapshot->empty());
-
-  // Find any new data that may have been added by an active instance of this
-  // class that is adding records.
-  ImportExistingData();
-
-  for (const auto& entry : values_) {
-    TypedValue value;
-    value.type_ = entry.second.type;
-    DCHECK_GE(entry.second.extent,
-              entry.second.size_ptr->load(std::memory_order_relaxed));
-
-    switch (entry.second.type) {
-      case RAW_VALUE:
-      case STRING_VALUE:
-        value.long_value_ =
-            std::string(reinterpret_cast<char*>(entry.second.memory),
-                        entry.second.size_ptr->load(std::memory_order_relaxed));
-        break;
-      case RAW_VALUE_REFERENCE:
-      case STRING_VALUE_REFERENCE: {
-        ReferenceRecord* ref =
-            reinterpret_cast<ReferenceRecord*>(entry.second.memory);
-        value.ref_value_ = StringPiece(
-            reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
-            static_cast<size_t>(ref->size));
-      } break;
-      case BOOL_VALUE:
-      case CHAR_VALUE:
-        value.short_value_ = *reinterpret_cast<char*>(entry.second.memory);
-        break;
-      case SIGNED_VALUE:
-      case UNSIGNED_VALUE:
-        value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory);
-        break;
-      case END_OF_VALUES:  // Included for completeness purposes.
-        NOTREACHED();
-    }
-    auto inserted = output_snapshot->insert(
-        std::make_pair(entry.second.name.as_string(), std::move(value)));
-    DCHECK(inserted.second);  // True if inserted, false if existed.
-  }
-
-  return true;
-}
-
-const void* ActivityUserData::GetBaseAddress() {
-  // The |memory_| pointer advances as elements are written but the |id_|
-  // value is always at the start of the block so just return that.
-  return id_;
-}
-
-// This information is kept for every thread that is tracked. It is filled
-// the very first time the thread is seen. All fields must be of exact sizes
-// so there is no issue moving between 32 and 64-bit builds.
-struct ThreadActivityTracker::Header {
-  // Defined in .h for analyzer access. Increment this if structure changes!
-  static constexpr uint32_t kPersistentTypeId =
-      GlobalActivityTracker::kTypeIdActivityTracker;
-
-  // Expected size for 32/64-bit check.
-  static constexpr size_t kExpectedInstanceSize = 80;
-
-  // This unique number indicates a valid initialization of the memory.
-  std::atomic<uint32_t> cookie;
-
-  // The number of Activity slots (spaces that can hold an Activity) that
-  // immediately follow this structure in memory.
-  uint32_t stack_slots;
-
-  // The process-id and thread-id (thread_ref.as_id) to which this data belongs.
-  // These identifiers are not guaranteed to mean anything but are unique, in
-  // combination, among all active trackers. It would be nice to always have
-  // the process_id be a 64-bit value but the necessity of having it atomic
-  // (for the memory barriers it provides) limits it to the natural word size
-  // of the machine.
-#ifdef ARCH_CPU_64_BITS
-  std::atomic<int64_t> process_id;
-#else
-  std::atomic<int32_t> process_id;
-  int32_t process_id_padding;
-#endif
-  ThreadRef thread_ref;
-
-  // The start-time and start-ticks when the data was created. Each activity
-  // record has a |time_internal| value that can be converted to a "wall time"
-  // with these two values.
-  int64_t start_time;
-  int64_t start_ticks;
-
-  // The current depth of the stack. This may be greater than the number of
-  // slots. If the depth exceeds the number of slots, the newest entries
-  // won't be recorded.
-  std::atomic<uint32_t> current_depth;
-
-  // A memory location used to indicate if changes have been made to the stack
-  // that would invalidate an in-progress read of its contents. The active
-  // tracker will zero the value whenever something gets popped from the
-  // stack. A monitoring tracker can write a non-zero value here, copy the
-  // stack contents, and read the value to know, if it is still non-zero, that
-  // the contents didn't change while being copied. This can handle concurrent
-  // snapshot operations only if each snapshot writes a different bit (which
-  // is not the current implementation so no parallel snapshots allowed).
-  std::atomic<uint32_t> stack_unchanged;
-
-  // The name of the thread (up to a maximum length). Dynamic-length names
-  // are not practical since the memory has to come from the same persistent
-  // allocator that holds this structure and to which this object has no
-  // reference.
-  char thread_name[32];
-};
-
-ThreadActivityTracker::Snapshot::Snapshot() {}
-ThreadActivityTracker::Snapshot::~Snapshot() {}
-
-ThreadActivityTracker::ScopedActivity::ScopedActivity(
-    ThreadActivityTracker* tracker,
-    const void* program_counter,
-    const void* origin,
-    Activity::Type type,
-    const ActivityData& data)
-    : tracker_(tracker) {
-  if (tracker_)
-    activity_id_ = tracker_->PushActivity(program_counter, origin, type, data);
-}
-
-ThreadActivityTracker::ScopedActivity::~ScopedActivity() {
-  if (tracker_)
-    tracker_->PopActivity(activity_id_);
-}
-
-void ThreadActivityTracker::ScopedActivity::ChangeTypeAndData(
-    Activity::Type type,
-    const ActivityData& data) {
-  if (tracker_)
-    tracker_->ChangeActivity(activity_id_, type, data);
-}
-
-ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
-    : header_(static_cast<Header*>(base)),
-      stack_(reinterpret_cast<Activity*>(reinterpret_cast<char*>(base) +
-                                         sizeof(Header))),
-      stack_slots_(
-          static_cast<uint32_t>((size - sizeof(Header)) / sizeof(Activity))) {
-  DCHECK(thread_checker_.CalledOnValidThread());
-
-  // Verify the parameters but fail gracefully if they're not valid so that
-  // production code based on external inputs will not crash.  IsValid() will
-  // return false in this case.
-  if (!base ||
-      // Ensure there is enough space for the header and at least a few records.
-      size < sizeof(Header) + kMinStackDepth * sizeof(Activity) ||
-      // Ensure that the |stack_slots_| calculation didn't overflow.
-      (size - sizeof(Header)) / sizeof(Activity) >
-          std::numeric_limits<uint32_t>::max()) {
-    NOTREACHED();
-    return;
-  }
-
-  // Ensure that the thread reference doesn't exceed the size of the ID number.
-  // This won't compile at the global scope because Header is a private struct.
-  static_assert(
-      sizeof(header_->thread_ref) == sizeof(header_->thread_ref.as_id),
-      "PlatformThreadHandle::Handle is too big to hold in 64-bit ID");
-
-  // Ensure that the alignment of Activity.data is properly aligned to a
-  // 64-bit boundary so there are no interoperability-issues across cpu
-  // architectures.
-  static_assert(offsetof(Activity, data) % sizeof(uint64_t) == 0,
-                "ActivityData.data is not 64-bit aligned");
-
-  // Provided memory should either be completely initialized or all zeros.
-  if (header_->cookie.load(std::memory_order_relaxed) == 0) {
-    // This is a new file. Double-check other fields and then initialize.
-    DCHECK_EQ(0, header_->process_id.load(std::memory_order_relaxed));
-    DCHECK_EQ(0, header_->thread_ref.as_id);
-    DCHECK_EQ(0, header_->start_time);
-    DCHECK_EQ(0, header_->start_ticks);
-    DCHECK_EQ(0U, header_->stack_slots);
-    DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed));
-    DCHECK_EQ(0U, header_->stack_unchanged.load(std::memory_order_relaxed));
-    DCHECK_EQ(0, stack_[0].time_internal);
-    DCHECK_EQ(0U, stack_[0].origin_address);
-    DCHECK_EQ(0U, stack_[0].call_stack[0]);
-    DCHECK_EQ(0U, stack_[0].data.task.sequence_id);
-
-#if defined(OS_WIN)
-    header_->thread_ref.as_tid = PlatformThread::CurrentId();
-#elif defined(OS_POSIX)
-    header_->thread_ref.as_handle =
-        PlatformThread::CurrentHandle().platform_handle();
-#endif
-    header_->process_id.store(GetCurrentProcId(), std::memory_order_relaxed);
-
-    header_->start_time = base::Time::Now().ToInternalValue();
-    header_->start_ticks = base::TimeTicks::Now().ToInternalValue();
-    header_->stack_slots = stack_slots_;
-    strlcpy(header_->thread_name, PlatformThread::GetName(),
-            sizeof(header_->thread_name));
-
-    // This is done last so as to guarantee that everything above is "released"
-    // by the time this value gets written.
-    header_->cookie.store(kHeaderCookie, std::memory_order_release);
-
-    valid_ = true;
-    DCHECK(IsValid());
-  } else {
-    // This is a file with existing data. Perform basic consistency checks.
-    valid_ = true;
-    valid_ = IsValid();
-  }
-}
-
-ThreadActivityTracker::~ThreadActivityTracker() {}
-
-ThreadActivityTracker::ActivityId ThreadActivityTracker::PushActivity(
-    const void* program_counter,
-    const void* origin,
-    Activity::Type type,
-    const ActivityData& data) {
-  // A thread-checker creates a lock to check the thread-id which means
-  // re-entry into this code if lock acquisitions are being tracked.
-  DCHECK(type == Activity::ACT_LOCK_ACQUIRE ||
-         thread_checker_.CalledOnValidThread());
-
-  // Get the current depth of the stack. No access to other memory guarded
-  // by this variable is done here so a "relaxed" load is acceptable.
-  uint32_t depth = header_->current_depth.load(std::memory_order_relaxed);
-
-  // Handle the case where the stack depth has exceeded the storage capacity.
-  // Extra entries will be lost leaving only the base of the stack.
-  if (depth >= stack_slots_) {
-    // Since no other threads modify the data, no compare/exchange is needed.
-    // Since no other memory is being modified, a "relaxed" store is acceptable.
-    header_->current_depth.store(depth + 1, std::memory_order_relaxed);
-    return depth;
-  }
-
-  // Get a pointer to the next activity and load it. No atomicity is required
-  // here because the memory is known only to this thread. It will be made
-  // known to other threads once the depth is incremented.
-  Activity::FillFrom(&stack_[depth], program_counter, origin, type, data);
-
-  // Save the incremented depth. Because this guards |activity| memory filled
-  // above that may be read by another thread once the recorded depth changes,
-  // a "release" store is required.
-  header_->current_depth.store(depth + 1, std::memory_order_release);
-
-  // The current depth is used as the activity ID because it simply identifies
-  // an entry. Once an entry is pop'd, it's okay to reuse the ID.
-  return depth;
-}
-
-void ThreadActivityTracker::ChangeActivity(ActivityId id,
-                                           Activity::Type type,
-                                           const ActivityData& data) {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  DCHECK(type != Activity::ACT_NULL || &data != &kNullActivityData);
-  DCHECK_LT(id, header_->current_depth.load(std::memory_order_acquire));
-
-  // Update the information if it is being recorded (i.e. within slot limit).
-  if (id < stack_slots_) {
-    Activity* activity = &stack_[id];
-
-    if (type != Activity::ACT_NULL) {
-      DCHECK_EQ(activity->activity_type & Activity::ACT_CATEGORY_MASK,
-                type & Activity::ACT_CATEGORY_MASK);
-      activity->activity_type = type;
-    }
-
-    if (&data != &kNullActivityData)
-      activity->data = data;
-  }
-}
-
-void ThreadActivityTracker::PopActivity(ActivityId id) {
-  // Do an atomic decrement of the depth. No changes to stack entries guarded
-  // by this variable are done here so a "relaxed" operation is acceptable.
-  // |depth| will receive the value BEFORE it was modified which means the
-  // return value must also be decremented. The slot will be "free" after
-  // this call but since only a single thread can access this object, the
-  // data will remain valid until this method returns or calls outside.
-  uint32_t depth =
-      header_->current_depth.fetch_sub(1, std::memory_order_relaxed) - 1;
-
-  // Validate that everything is running correctly.
-  DCHECK_EQ(id, depth);
-
-  // A thread-checker creates a lock to check the thread-id which means
-  // re-entry into this code if lock acquisitions are being tracked.
-  DCHECK(stack_[depth].activity_type == Activity::ACT_LOCK_ACQUIRE ||
-         thread_checker_.CalledOnValidThread());
-
-  // The stack has shrunk meaning that some other thread trying to copy the
-  // contents for reporting purposes could get bad data. That thread would
-  // have written a non-zero value into |stack_unchanged|; clearing it here
-  // will let that thread detect that something did change. This needs to
-  // happen after the atomic |depth| operation above so a "release" store
-  // is required.
-  header_->stack_unchanged.store(0, std::memory_order_release);
-}
-
-std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData(
-    ActivityId id,
-    ActivityTrackerMemoryAllocator* allocator) {
-  // User-data is only stored for activities actually held in the stack.
-  if (id < stack_slots_) {
-    // Don't allow user data for lock acquisition as recursion may occur.
-    if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) {
-      NOTREACHED();
-      return MakeUnique<ActivityUserData>(nullptr, 0);
-    }
-
-    // Get (or reuse) a block of memory and create a real UserData object
-    // on it.
-    PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference();
-    void* memory =
-        allocator->GetAsArray<char>(ref, PersistentMemoryAllocator::kSizeAny);
-    if (memory) {
-      std::unique_ptr<ActivityUserData> user_data =
-          MakeUnique<ActivityUserData>(memory, kUserDataSize);
-      stack_[id].user_data_ref = ref;
-      stack_[id].user_data_id = user_data->id();
-      return user_data;
-    }
-  }
-
-  // Return a dummy object that will still accept (but ignore) Set() calls.
-  return MakeUnique<ActivityUserData>(nullptr, 0);
-}
-
-bool ThreadActivityTracker::HasUserData(ActivityId id) {
-  // User-data is only stored for activities actually held in the stack.
-  return (id < stack_slots_ && stack_[id].user_data_ref);
-}
-
-void ThreadActivityTracker::ReleaseUserData(
-    ActivityId id,
-    ActivityTrackerMemoryAllocator* allocator) {
-  // User-data is only stored for activities actually held in the stack.
-  if (id < stack_slots_ && stack_[id].user_data_ref) {
-    allocator->ReleaseObjectReference(stack_[id].user_data_ref);
-    stack_[id].user_data_ref = 0;
-  }
-}
-
-bool ThreadActivityTracker::IsValid() const {
-  if (header_->cookie.load(std::memory_order_acquire) != kHeaderCookie ||
-      header_->process_id.load(std::memory_order_relaxed) == 0 ||
-      header_->thread_ref.as_id == 0 ||
-      header_->start_time == 0 ||
-      header_->start_ticks == 0 ||
-      header_->stack_slots != stack_slots_ ||
-      header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') {
-    return false;
-  }
-
-  return valid_;
-}
-
-bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
-  DCHECK(output_snapshot);
-
-  // There is no "called on valid thread" check for this method as it can be
-  // called from other threads or even other processes. It is also the reason
-  // why atomic operations must be used in certain places above.
-
-  // It's possible for the data to change while reading it in such a way that it
-  // invalidates the read. Make several attempts but don't try forever.
-  const int kMaxAttempts = 10;
-  uint32_t depth;
-
-  // Stop here if the data isn't valid.
-  if (!IsValid())
-    return false;
-
-  // Allocate the maximum size for the stack so it doesn't have to be done
-  // during the time-sensitive snapshot operation. It is shrunk once the
-  // actual size is known.
-  output_snapshot->activity_stack.reserve(stack_slots_);
-
-  for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
-    // Remember the process and thread IDs to ensure they aren't replaced
-    // during the snapshot operation. Use "acquire" to ensure that all the
-    // non-atomic fields of the structure are valid (at least at the current
-    // moment in time).
-    const int64_t starting_process_id =
-        header_->process_id.load(std::memory_order_acquire);
-    const int64_t starting_thread_id = header_->thread_ref.as_id;
-
-    // Write a non-zero value to |stack_unchanged| so it's possible to detect
-    // at the end that nothing has changed since copying the data began. A
-    // "cst" operation is required to ensure it occurs before everything else.
-    // Using "cst" memory ordering is relatively expensive but this is only
-    // done during analysis so doesn't directly affect the worker threads.
-    header_->stack_unchanged.store(1, std::memory_order_seq_cst);
-
-    // Fetching the current depth also "acquires" the contents of the stack.
-    depth = header_->current_depth.load(std::memory_order_acquire);
-    uint32_t count = std::min(depth, stack_slots_);
-    output_snapshot->activity_stack.resize(count);
-    if (count > 0) {
-      // Copy the existing contents. Memcpy is used for speed.
-      memcpy(&output_snapshot->activity_stack[0], stack_,
-             count * sizeof(Activity));
-    }
-
-    // Retry if something changed during the copy. A "cst" operation ensures
-    // it must happen after all the above operations.
-    if (!header_->stack_unchanged.load(std::memory_order_seq_cst))
-      continue;
-
-    // Stack copied. Record it's full depth.
-    output_snapshot->activity_stack_depth = depth;
-
-    // TODO(bcwhite): Snapshot other things here.
-
-    // Get the general thread information. Loading of "process_id" is guaranteed
-    // to be last so that it's possible to detect below if any content has
-    // changed while reading it. It's technically possible for a thread to end,
-    // have its data cleared, a new thread get created with the same IDs, and
-    // it perform an action which starts tracking all in the time since the
-    // ID reads above but the chance is so unlikely that it's not worth the
-    // effort and complexity of protecting against it (perhaps with an
-    // "unchanged" field like is done for the stack).
-    output_snapshot->thread_name =
-        std::string(header_->thread_name, sizeof(header_->thread_name) - 1);
-    output_snapshot->thread_id = header_->thread_ref.as_id;
-    output_snapshot->process_id =
-        header_->process_id.load(std::memory_order_seq_cst);
-
-    // All characters of the thread-name buffer were copied so as to not break
-    // if the trailing NUL were missing. Now limit the length if the actual
-    // name is shorter.
-    output_snapshot->thread_name.resize(
-        strlen(output_snapshot->thread_name.c_str()));
-
-    // If the process or thread ID has changed then the tracker has exited and
-    // the memory reused by a new one. Try again.
-    if (output_snapshot->process_id != starting_process_id ||
-        output_snapshot->thread_id != starting_thread_id) {
-      continue;
-    }
-
-    // Only successful if the data is still valid once everything is done since
-    // it's possible for the thread to end somewhere in the middle and all its
-    // values become garbage.
-    if (!IsValid())
-      return false;
-
-    // Change all the timestamps in the activities from "ticks" to "wall" time.
-    const Time start_time = Time::FromInternalValue(header_->start_time);
-    const int64_t start_ticks = header_->start_ticks;
-    for (Activity& activity : output_snapshot->activity_stack) {
-      activity.time_internal =
-          (start_time +
-           TimeDelta::FromInternalValue(activity.time_internal - start_ticks))
-              .ToInternalValue();
-    }
-
-    // Success!
-    return true;
-  }
-
-  // Too many attempts.
-  return false;
-}
-
-// static
-size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) {
-  return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header);
-}
-
-// The instantiation of the GlobalActivityTracker object.
-// The object held here will obviously not be destructed at process exit
-// but that's best since PersistentMemoryAllocator objects (that underlie
-// GlobalActivityTracker objects) are explicitly forbidden from doing anything
-// essential at exit anyway due to the fact that they depend on data managed
-// elsewhere and which could be destructed first. An AtomicWord is used instead
-// of std::atomic because the latter can create global ctors and dtors.
-subtle::AtomicWord GlobalActivityTracker::g_tracker_ = 0;
-
-GlobalActivityTracker::ModuleInfo::ModuleInfo() {}
-GlobalActivityTracker::ModuleInfo::ModuleInfo(ModuleInfo&& rhs) = default;
-GlobalActivityTracker::ModuleInfo::ModuleInfo(const ModuleInfo& rhs) = default;
-GlobalActivityTracker::ModuleInfo::~ModuleInfo() {}
-
-GlobalActivityTracker::ModuleInfo& GlobalActivityTracker::ModuleInfo::operator=(
-    ModuleInfo&& rhs) = default;
-GlobalActivityTracker::ModuleInfo& GlobalActivityTracker::ModuleInfo::operator=(
-    const ModuleInfo& rhs) = default;
-
-GlobalActivityTracker::ModuleInfoRecord::ModuleInfoRecord() {}
-GlobalActivityTracker::ModuleInfoRecord::~ModuleInfoRecord() {}
-
-bool GlobalActivityTracker::ModuleInfoRecord::DecodeTo(
-    GlobalActivityTracker::ModuleInfo* info,
-    size_t record_size) const {
-  // Get the current "changes" indicator, acquiring all the other values.
-  uint32_t current_changes = changes.load(std::memory_order_acquire);
-
-  // Copy out the dynamic information.
-  info->is_loaded = loaded != 0;
-  info->address = static_cast<uintptr_t>(address);
-  info->load_time = load_time;
-
-  // Check to make sure no information changed while being read. A "seq-cst"
-  // operation is expensive but is only done during analysis and it's the only
-  // way to ensure this occurs after all the accesses above. If changes did
-  // occur then return a "not loaded" result so that |size| and |address|
-  // aren't expected to be accurate.
-  if ((current_changes & kModuleInformationChanging) != 0 ||
-      changes.load(std::memory_order_seq_cst) != current_changes) {
-    info->is_loaded = false;
-  }
-
-  // Copy out the static information. These never change so don't have to be
-  // protected by the atomic |current_changes| operations.
-  info->size = static_cast<size_t>(size);
-  info->timestamp = timestamp;
-  info->age = age;
-  memcpy(info->identifier, identifier, sizeof(info->identifier));
-
-  if (offsetof(ModuleInfoRecord, pickle) + pickle_size > record_size)
-    return false;
-  Pickle pickler(pickle, pickle_size);
-  PickleIterator iter(pickler);
-  return iter.ReadString(&info->file) && iter.ReadString(&info->debug_file);
-}
-
-bool GlobalActivityTracker::ModuleInfoRecord::EncodeFrom(
-    const GlobalActivityTracker::ModuleInfo& info,
-    size_t record_size) {
-  Pickle pickler;
-  bool okay =
-      pickler.WriteString(info.file) && pickler.WriteString(info.debug_file);
-  if (!okay) {
-    NOTREACHED();
-    return false;
-  }
-  if (offsetof(ModuleInfoRecord, pickle) + pickler.size() > record_size) {
-    NOTREACHED();
-    return false;
-  }
-
-  // These fields never changes and are done before the record is made
-  // iterable so no thread protection is necessary.
-  size = info.size;
-  timestamp = info.timestamp;
-  age = info.age;
-  memcpy(identifier, info.identifier, sizeof(identifier));
-  memcpy(pickle, pickler.data(), pickler.size());
-  pickle_size = pickler.size();
-  changes.store(0, std::memory_order_relaxed);
-
-  // Now set those fields that can change.
-  return UpdateFrom(info);
-}
-
-bool GlobalActivityTracker::ModuleInfoRecord::UpdateFrom(
-    const GlobalActivityTracker::ModuleInfo& info) {
-  // Updates can occur after the record is made visible so make changes atomic.
-  // A "strong" exchange ensures no false failures.
-  uint32_t old_changes = changes.load(std::memory_order_relaxed);
-  uint32_t new_changes = old_changes | kModuleInformationChanging;
-  if ((old_changes & kModuleInformationChanging) != 0 ||
-      !changes.compare_exchange_strong(old_changes, new_changes,
-                                       std::memory_order_acquire,
-                                       std::memory_order_acquire)) {
-    NOTREACHED() << "Multiple sources are updating module information.";
-    return false;
-  }
-
-  loaded = info.is_loaded ? 1 : 0;
-  address = info.address;
-  load_time = Time::Now().ToInternalValue();
-
-  bool success = changes.compare_exchange_strong(new_changes, old_changes + 1,
-                                                 std::memory_order_release,
-                                                 std::memory_order_relaxed);
-  DCHECK(success);
-  return true;
-}
-
-// static
-size_t GlobalActivityTracker::ModuleInfoRecord::EncodedSize(
-    const GlobalActivityTracker::ModuleInfo& info) {
-  PickleSizer sizer;
-  sizer.AddString(info.file);
-  sizer.AddString(info.debug_file);
-
-  return offsetof(ModuleInfoRecord, pickle) + sizeof(Pickle::Header) +
-         sizer.payload_size();
-}
-
-GlobalActivityTracker::ScopedThreadActivity::ScopedThreadActivity(
-    const void* program_counter,
-    const void* origin,
-    Activity::Type type,
-    const ActivityData& data,
-    bool lock_allowed)
-    : ThreadActivityTracker::ScopedActivity(GetOrCreateTracker(lock_allowed),
-                                            program_counter,
-                                            origin,
-                                            type,
-                                            data) {}
-
-GlobalActivityTracker::ScopedThreadActivity::~ScopedThreadActivity() {
-  if (tracker_ && tracker_->HasUserData(activity_id_)) {
-    GlobalActivityTracker* global = GlobalActivityTracker::Get();
-    AutoLock lock(global->user_data_allocator_lock_);
-    tracker_->ReleaseUserData(activity_id_, &global->user_data_allocator_);
-  }
-}
-
-ActivityUserData& GlobalActivityTracker::ScopedThreadActivity::user_data() {
-  if (!user_data_) {
-    if (tracker_) {
-      GlobalActivityTracker* global = GlobalActivityTracker::Get();
-      AutoLock lock(global->user_data_allocator_lock_);
-      user_data_ =
-          tracker_->GetUserData(activity_id_, &global->user_data_allocator_);
-    } else {
-      user_data_ = MakeUnique<ActivityUserData>(nullptr, 0);
-    }
-  }
-  return *user_data_;
-}
-
-GlobalActivityTracker::GlobalUserData::GlobalUserData(void* memory, size_t size)
-    : ActivityUserData(memory, size) {}
-
-GlobalActivityTracker::GlobalUserData::~GlobalUserData() {}
-
-void GlobalActivityTracker::GlobalUserData::Set(StringPiece name,
-                                                ValueType type,
-                                                const void* memory,
-                                                size_t size) {
-  AutoLock lock(data_lock_);
-  ActivityUserData::Set(name, type, memory, size);
-}
-
-GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker(
-    PersistentMemoryAllocator::Reference mem_reference,
-    void* base,
-    size_t size)
-    : ThreadActivityTracker(base, size),
-      mem_reference_(mem_reference),
-      mem_base_(base) {}
-
-GlobalActivityTracker::ManagedActivityTracker::~ManagedActivityTracker() {
-  // The global |g_tracker_| must point to the owner of this class since all
-  // objects of this type must be destructed before |g_tracker_| can be changed
-  // (something that only occurs in tests).
-  DCHECK(g_tracker_);
-  GlobalActivityTracker::Get()->ReturnTrackerMemory(this);
-}
-
-void GlobalActivityTracker::CreateWithAllocator(
-    std::unique_ptr<PersistentMemoryAllocator> allocator,
-    int stack_depth) {
-  // There's no need to do anything with the result. It is self-managing.
-  GlobalActivityTracker* global_tracker =
-      new GlobalActivityTracker(std::move(allocator), stack_depth);
-  // Create a tracker for this thread since it is known.
-  global_tracker->CreateTrackerForCurrentThread();
-}
-
-#if !defined(OS_NACL)
-// static
-void GlobalActivityTracker::CreateWithFile(const FilePath& file_path,
-                                           size_t size,
-                                           uint64_t id,
-                                           StringPiece name,
-                                           int stack_depth) {
-  DCHECK(!file_path.empty());
-  DCHECK_GE(static_cast<uint64_t>(std::numeric_limits<int64_t>::max()), size);
-
-  // Create and map the file into memory and make it globally available.
-  std::unique_ptr<MemoryMappedFile> mapped_file(new MemoryMappedFile());
-  bool success =
-      mapped_file->Initialize(File(file_path,
-                                   File::FLAG_CREATE_ALWAYS | File::FLAG_READ |
-                                   File::FLAG_WRITE | File::FLAG_SHARE_DELETE),
-                              {0, static_cast<int64_t>(size)},
-                              MemoryMappedFile::READ_WRITE_EXTEND);
-  DCHECK(success);
-  CreateWithAllocator(MakeUnique<FilePersistentMemoryAllocator>(
-                          std::move(mapped_file), size, id, name, false),
-                      stack_depth);
-}
-#endif  // !defined(OS_NACL)
-
-// static
-void GlobalActivityTracker::CreateWithLocalMemory(size_t size,
-                                                  uint64_t id,
-                                                  StringPiece name,
-                                                  int stack_depth) {
-  CreateWithAllocator(
-      MakeUnique<LocalPersistentMemoryAllocator>(size, id, name), stack_depth);
-}
-
-ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
-  DCHECK(!this_thread_tracker_.Get());
-
-  PersistentMemoryAllocator::Reference mem_reference;
-
-  {
-    base::AutoLock autolock(thread_tracker_allocator_lock_);
-    mem_reference = thread_tracker_allocator_.GetObjectReference();
-  }
-
-  if (!mem_reference) {
-    // Failure. This shouldn't happen. But be graceful if it does, probably
-    // because the underlying allocator wasn't given enough memory to satisfy
-    // to all possible requests.
-    NOTREACHED();
-    // Report the thread-count at which the allocator was full so that the
-    // failure can be seen and underlying memory resized appropriately.
-    UMA_HISTOGRAM_COUNTS_1000(
-        "ActivityTracker.ThreadTrackers.MemLimitTrackerCount",
-        thread_tracker_count_.load(std::memory_order_relaxed));
-    // Return null, just as if tracking wasn't enabled.
-    return nullptr;
-  }
-
-  // Convert the memory block found above into an actual memory address.
-  // Doing the conversion as a Header object enacts the 32/64-bit size
-  // consistency checks which would not otherwise be done. Unfortunately,
-  // some older compilers and MSVC don't have standard-conforming definitions
-  // of std::atomic which cause it not to be plain-old-data. Don't check on
-  // those platforms assuming that the checks on other platforms will be
-  // sufficient.
-  // TODO(bcwhite): Review this after major compiler releases.
-  DCHECK(mem_reference);
-  void* mem_base;
-  mem_base =
-      allocator_->GetAsObject<ThreadActivityTracker::Header>(mem_reference);
-
-  DCHECK(mem_base);
-  DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference));
-
-  // Create a tracker with the acquired memory and set it as the tracker
-  // for this particular thread in thread-local-storage.
-  ManagedActivityTracker* tracker =
-      new ManagedActivityTracker(mem_reference, mem_base, stack_memory_size_);
-  DCHECK(tracker->IsValid());
-  this_thread_tracker_.Set(tracker);
-  int old_count = thread_tracker_count_.fetch_add(1, std::memory_order_relaxed);
-
-  UMA_HISTOGRAM_ENUMERATION("ActivityTracker.ThreadTrackers.Count",
-                            old_count + 1, kMaxThreadCount);
-  return tracker;
-}
-
-void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
-  ThreadActivityTracker* tracker =
-      reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
-  if (tracker)
-    delete tracker;
-}
-
-void GlobalActivityTracker::RecordLogMessage(StringPiece message) {
-  // Allocate at least one extra byte so the string is NUL terminated. All
-  // memory returned by the allocator is guaranteed to be zeroed.
-  PersistentMemoryAllocator::Reference ref =
-      allocator_->Allocate(message.size() + 1, kTypeIdGlobalLogMessage);
-  char* memory = allocator_->GetAsArray<char>(ref, kTypeIdGlobalLogMessage,
-                                              message.size() + 1);
-  if (memory) {
-    memcpy(memory, message.data(), message.size());
-    allocator_->MakeIterable(ref);
-  }
-}
-
-void GlobalActivityTracker::RecordModuleInfo(const ModuleInfo& info) {
-  AutoLock lock(modules_lock_);
-  auto found = modules_.find(info.file);
-  if (found != modules_.end()) {
-    ModuleInfoRecord* record = found->second;
-    DCHECK(record);
-
-    // Update the basic state of module information that has been already
-    // recorded. It is assumed that the string information (identifier,
-    // version, etc.) remain unchanged which means that there's no need
-    // to create a new record to accommodate a possibly longer length.
-    record->UpdateFrom(info);
-    return;
-  }
-
-  size_t required_size = ModuleInfoRecord::EncodedSize(info);
-  ModuleInfoRecord* record = allocator_->New<ModuleInfoRecord>(required_size);
-  if (!record)
-    return;
-
-  bool success = record->EncodeFrom(info, required_size);
-  DCHECK(success);
-  allocator_->MakeIterable(record);
-  modules_.insert(std::make_pair(info.file, record));
-}
-
-void GlobalActivityTracker::RecordFieldTrial(const std::string& trial_name,
-                                             StringPiece group_name) {
-  const std::string key = std::string("FieldTrial.") + trial_name;
-  global_data_.SetString(key, group_name);
-}
-
-GlobalActivityTracker::GlobalActivityTracker(
-    std::unique_ptr<PersistentMemoryAllocator> allocator,
-    int stack_depth)
-    : allocator_(std::move(allocator)),
-      stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)),
-      this_thread_tracker_(&OnTLSDestroy),
-      thread_tracker_count_(0),
-      thread_tracker_allocator_(allocator_.get(),
-                                kTypeIdActivityTracker,
-                                kTypeIdActivityTrackerFree,
-                                stack_memory_size_,
-                                kCachedThreadMemories,
-                                /*make_iterable=*/true),
-      user_data_allocator_(allocator_.get(),
-                           kTypeIdUserDataRecord,
-                           kTypeIdUserDataRecordFree,
-                           kUserDataSize,
-                           kCachedUserDataMemories,
-                           /*make_iterable=*/false),
-      global_data_(
-          allocator_->GetAsArray<char>(
-              allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord),
-              kTypeIdGlobalDataRecord,
-              PersistentMemoryAllocator::kSizeAny),
-          kGlobalDataSize) {
-  // Ensure the passed memory is valid and empty (iterator finds nothing).
-  uint32_t type;
-  DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type));
-
-  // Ensure that there is no other global object and then make this one such.
-  DCHECK(!g_tracker_);
-  subtle::Release_Store(&g_tracker_, reinterpret_cast<uintptr_t>(this));
-
-  // The global records must be iterable in order to be found by an analyzer.
-  allocator_->MakeIterable(allocator_->GetAsReference(
-      global_data_.GetBaseAddress(), kTypeIdGlobalDataRecord));
-
-  // Fetch and record all activated field trials.
-  FieldTrial::ActiveGroups active_groups;
-  FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
-  for (auto& group : active_groups)
-    RecordFieldTrial(group.trial_name, group.group_name);
-}
-
-GlobalActivityTracker::~GlobalActivityTracker() {
-  DCHECK_EQ(Get(), this);
-  DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed));
-  subtle::Release_Store(&g_tracker_, 0);
-}
-
-void GlobalActivityTracker::ReturnTrackerMemory(
-    ManagedActivityTracker* tracker) {
-  PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_;
-  void* mem_base = tracker->mem_base_;
-  DCHECK(mem_reference);
-  DCHECK(mem_base);
-
-  // Remove the destructed tracker from the set of known ones.
-  DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed));
-  thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed);
-
-  // Release this memory for re-use at a later time.
-  base::AutoLock autolock(thread_tracker_allocator_lock_);
-  thread_tracker_allocator_.ReleaseObjectReference(mem_reference);
-}
-
-// static
-void GlobalActivityTracker::OnTLSDestroy(void* value) {
-  delete reinterpret_cast<ManagedActivityTracker*>(value);
-}
-
-ScopedActivity::ScopedActivity(const void* program_counter,
-                               uint8_t action,
-                               uint32_t id,
-                               int32_t info)
-    : GlobalActivityTracker::ScopedThreadActivity(
-          program_counter,
-          nullptr,
-          static_cast<Activity::Type>(Activity::ACT_GENERIC | action),
-          ActivityData::ForGeneric(id, info),
-          /*lock_allowed=*/true),
-      id_(id) {
-  // The action must not affect the category bits of the activity type.
-  DCHECK_EQ(0, action & Activity::ACT_CATEGORY_MASK);
-}
-
-void ScopedActivity::ChangeAction(uint8_t action) {
-  DCHECK_EQ(0, action & Activity::ACT_CATEGORY_MASK);
-  ChangeTypeAndData(static_cast<Activity::Type>(Activity::ACT_GENERIC | action),
-                    kNullActivityData);
-}
-
-void ScopedActivity::ChangeInfo(int32_t info) {
-  ChangeTypeAndData(Activity::ACT_NULL, ActivityData::ForGeneric(id_, info));
-}
-
-void ScopedActivity::ChangeActionAndInfo(uint8_t action, int32_t info) {
-  DCHECK_EQ(0, action & Activity::ACT_CATEGORY_MASK);
-  ChangeTypeAndData(static_cast<Activity::Type>(Activity::ACT_GENERIC | action),
-                    ActivityData::ForGeneric(id_, info));
-}
-
-ScopedTaskRunActivity::ScopedTaskRunActivity(
-    const void* program_counter,
-    const base::PendingTask& task)
-    : GlobalActivityTracker::ScopedThreadActivity(
-          program_counter,
-          task.posted_from.program_counter(),
-          Activity::ACT_TASK_RUN,
-          ActivityData::ForTask(task.sequence_num),
-          /*lock_allowed=*/true) {}
-
-ScopedLockAcquireActivity::ScopedLockAcquireActivity(
-    const void* program_counter,
-    const base::internal::LockImpl* lock)
-    : GlobalActivityTracker::ScopedThreadActivity(
-          program_counter,
-          nullptr,
-          Activity::ACT_LOCK_ACQUIRE,
-          ActivityData::ForLock(lock),
-          /*lock_allowed=*/false) {}
-
-ScopedEventWaitActivity::ScopedEventWaitActivity(
-    const void* program_counter,
-    const base::WaitableEvent* event)
-    : GlobalActivityTracker::ScopedThreadActivity(
-          program_counter,
-          nullptr,
-          Activity::ACT_EVENT_WAIT,
-          ActivityData::ForEvent(event),
-          /*lock_allowed=*/true) {}
-
-ScopedThreadJoinActivity::ScopedThreadJoinActivity(
-    const void* program_counter,
-    const base::PlatformThreadHandle* thread)
-    : GlobalActivityTracker::ScopedThreadActivity(
-          program_counter,
-          nullptr,
-          Activity::ACT_THREAD_JOIN,
-          ActivityData::ForThread(*thread),
-          /*lock_allowed=*/true) {}
-
-#if !defined(OS_NACL) && !defined(OS_IOS)
-ScopedProcessWaitActivity::ScopedProcessWaitActivity(
-    const void* program_counter,
-    const base::Process* process)
-    : GlobalActivityTracker::ScopedThreadActivity(
-          program_counter,
-          nullptr,
-          Activity::ACT_PROCESS_WAIT,
-          ActivityData::ForProcess(process->Pid()),
-          /*lock_allowed=*/true) {}
-#endif
-
-}  // namespace debug
-}  // namespace base
diff --git a/base/debug/activity_tracker.h b/base/debug/activity_tracker.h
deleted file mode 100644
index 719a318..0000000
--- a/base/debug/activity_tracker.h
+++ /dev/null
@@ -1,1102 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Activity tracking provides a low-overhead method of collecting information
-// about the state of the application for analysis both while it is running
-// and after it has terminated unexpectedly. Its primary purpose is to help
-// locate reasons the browser becomes unresponsive by providing insight into
-// what all the various threads and processes are (or were) doing.
-
-#ifndef BASE_DEBUG_ACTIVITY_TRACKER_H_
-#define BASE_DEBUG_ACTIVITY_TRACKER_H_
-
-// std::atomic is undesired due to performance issues when used as global
-// variables. There are no such instances here. This module uses the
-// PersistentMemoryAllocator which also uses std::atomic and is written
-// by the same author.
-#include <atomic>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "base/atomicops.h"
-#include "base/base_export.h"
-#include "base/compiler_specific.h"
-#include "base/gtest_prod_util.h"
-#include "base/location.h"
-#include "base/metrics/persistent_memory_allocator.h"
-#include "base/strings/string_piece.h"
-#include "base/strings/utf_string_conversions.h"
-#include "base/threading/platform_thread.h"
-#include "base/threading/thread_checker.h"
-#include "base/threading/thread_local_storage.h"
-
-namespace base {
-
-struct PendingTask;
-
-class FilePath;
-class Lock;
-class PlatformThreadHandle;
-class Process;
-class StaticAtomicSequenceNumber;
-class WaitableEvent;
-
-namespace debug {
-
-class ThreadActivityTracker;
-
-
-enum : int {
-  // The maximum number of call-stack addresses stored per activity. This
-  // cannot be changed without also changing the version number of the
-  // structure. See kTypeIdActivityTracker in GlobalActivityTracker.
-  kActivityCallStackSize = 10,
-};
-
-// The data associated with an activity is dependent upon the activity type.
-// This union defines all of the various fields. All fields must be explicitly
-// sized types to ensure no interoperability problems between 32-bit and
-// 64-bit systems.
-union ActivityData {
-  // Generic activities don't have any defined structure.
-  struct {
-    uint32_t id;   // An arbitrary identifier used for association.
-    int32_t info;  // An arbitrary value used for information purposes.
-  } generic;
-  struct {
-    uint64_t sequence_id;  // The sequence identifier of the posted task.
-  } task;
-  struct {
-    uint64_t lock_address;  // The memory address of the lock object.
-  } lock;
-  struct {
-    uint64_t event_address;  // The memory address of the event object.
-  } event;
-  struct {
-    int64_t thread_id;  // A unique identifier for a thread within a process.
-  } thread;
-  struct {
-    int64_t process_id;  // A unique identifier for a process.
-  } process;
-
-  // These methods create an ActivityData object from the appropriate
-  // parameters. Objects of this type should always be created this way to
-  // ensure that no fields remain unpopulated should the set of recorded
-  // fields change. They're defined inline where practical because they
-  // reduce to loading a small local structure with a few values, roughly
-  // the same as loading all those values into parameters.
-
-  static ActivityData ForGeneric(uint32_t id, int32_t info) {
-    ActivityData data;
-    data.generic.id = id;
-    data.generic.info = info;
-    return data;
-  }
-
-  static ActivityData ForTask(uint64_t sequence) {
-    ActivityData data;
-    data.task.sequence_id = sequence;
-    return data;
-  }
-
-  static ActivityData ForLock(const void* lock) {
-    ActivityData data;
-    data.lock.lock_address = reinterpret_cast<uintptr_t>(lock);
-    return data;
-  }
-
-  static ActivityData ForEvent(const void* event) {
-    ActivityData data;
-    data.event.event_address = reinterpret_cast<uintptr_t>(event);
-    return data;
-  }
-
-  static ActivityData ForThread(const PlatformThreadHandle& handle);
-  static ActivityData ForThread(const int64_t id) {
-    ActivityData data;
-    data.thread.thread_id = id;
-    return data;
-  }
-
-  static ActivityData ForProcess(const int64_t id) {
-    ActivityData data;
-    data.process.process_id = id;
-    return data;
-  }
-};
-
-// A "null" activity-data that can be passed to indicate "do not change".
-extern const ActivityData kNullActivityData;
-
-
-// A helper class that is used for managing memory allocations within a
-// persistent memory allocator. Instances of this class are NOT thread-safe.
-// Use from a single thread or protect access with a lock.
-class BASE_EXPORT ActivityTrackerMemoryAllocator {
- public:
-  using Reference = PersistentMemoryAllocator::Reference;
-
-  // Creates a instance for allocating objects of a fixed |object_type|, a
-  // corresponding |object_free| type, and the |object_size|. An internal
-  // cache of the last |cache_size| released references will be kept for
-  // quick future fetches. If |make_iterable| then allocated objects will
-  // be marked "iterable" in the allocator.
-  ActivityTrackerMemoryAllocator(PersistentMemoryAllocator* allocator,
-                                 uint32_t object_type,
-                                 uint32_t object_free_type,
-                                 size_t object_size,
-                                 size_t cache_size,
-                                 bool make_iterable);
-  ~ActivityTrackerMemoryAllocator();
-
-  // Gets a reference to an object of the configured type. This can return
-  // a null reference if it was not possible to allocate the memory.
-  Reference GetObjectReference();
-
-  // Returns an object to the "free" pool.
-  void ReleaseObjectReference(Reference ref);
-
-  // Helper function to access an object allocated using this instance.
-  template <typename T>
-  T* GetAsObject(Reference ref) {
-    return allocator_->GetAsObject<T>(ref);
-  }
-
-  // Similar to GetAsObject() but converts references to arrays of objects.
-  template <typename T>
-  T* GetAsArray(Reference ref, size_t count) {
-    return allocator_->GetAsArray<T>(ref, object_type_, count);
-  }
-
-  // The current "used size" of the internal cache, visible for testing.
-  size_t cache_used() const { return cache_used_; }
-
- private:
-  PersistentMemoryAllocator* const allocator_;
-  const uint32_t object_type_;
-  const uint32_t object_free_type_;
-  const size_t object_size_;
-  const size_t cache_size_;
-  const bool make_iterable_;
-
-  // An iterator for going through persistent memory looking for free'd objects.
-  PersistentMemoryAllocator::Iterator iterator_;
-
-  // The cache of released object memories.
-  std::unique_ptr<Reference[]> cache_values_;
-  size_t cache_used_;
-
-  DISALLOW_COPY_AND_ASSIGN(ActivityTrackerMemoryAllocator);
-};
-
-
-// This structure is the full contents recorded for every activity pushed
-// onto the stack. The |activity_type| indicates what is actually stored in
-// the |data| field. All fields must be explicitly sized types to ensure no
-// interoperability problems between 32-bit and 64-bit systems.
-struct Activity {
-  // SHA1(base::debug::Activity): Increment this if structure changes!
-  static constexpr uint32_t kPersistentTypeId = 0x99425159 + 1;
-  // Expected size for 32/64-bit check. Update this if structure changes!
-  static constexpr size_t kExpectedInstanceSize =
-      48 + 8 * kActivityCallStackSize;
-
-  // The type of an activity on the stack. Activities are broken into
-  // categories with the category ID taking the top 4 bits and the lower
-  // bits representing an action within that category. This combination
-  // makes it easy to "switch" based on the type during analysis.
-  enum Type : uint8_t {
-    // This "null" constant is used to indicate "do not change" in calls.
-    ACT_NULL = 0,
-
-    // Task activities involve callbacks posted to a thread or thread-pool
-    // using the PostTask() method or any of its friends.
-    ACT_TASK = 1 << 4,
-    ACT_TASK_RUN = ACT_TASK,
-
-    // Lock activities involve the acquisition of "mutex" locks.
-    ACT_LOCK = 2 << 4,
-    ACT_LOCK_ACQUIRE = ACT_LOCK,
-    ACT_LOCK_RELEASE,
-
-    // Event activities involve operations on a WaitableEvent.
-    ACT_EVENT = 3 << 4,
-    ACT_EVENT_WAIT = ACT_EVENT,
-    ACT_EVENT_SIGNAL,
-
-    // Thread activities involve the life management of threads.
-    ACT_THREAD = 4 << 4,
-    ACT_THREAD_START = ACT_THREAD,
-    ACT_THREAD_JOIN,
-
-    // Process activities involve the life management of processes.
-    ACT_PROCESS = 5 << 4,
-    ACT_PROCESS_START = ACT_PROCESS,
-    ACT_PROCESS_WAIT,
-
-    // Generic activities are user defined and can be anything.
-    ACT_GENERIC = 15 << 4,
-
-    // These constants can be used to separate the category and action from
-    // a combined activity type.
-    ACT_CATEGORY_MASK = 0xF << 4,
-    ACT_ACTION_MASK = 0xF
-  };
-
-  // Internal representation of time. During collection, this is in "ticks"
-  // but when returned in a snapshot, it is "wall time".
-  int64_t time_internal;
-
-  // The address that pushed the activity onto the stack as a raw number.
-  uint64_t calling_address;
-
-  // The address that is the origin of the activity if it not obvious from
-  // the call stack. This is useful for things like tasks that are posted
-  // from a completely different thread though most activities will leave
-  // it null.
-  uint64_t origin_address;
-
-  // Array of program-counters that make up the top of the call stack.
-  // Despite the fixed size, this list is always null-terminated. Entries
-  // after the terminator have no meaning and may or may not also be null.
-  // The list will be completely empty if call-stack collection is not
-  // enabled.
-  uint64_t call_stack[kActivityCallStackSize];
-
-  // Reference to arbitrary user data within the persistent memory segment
-  // and a unique identifier for it.
-  uint32_t user_data_ref;
-  uint32_t user_data_id;
-
-  // The (enumerated) type of the activity. This defines what fields of the
-  // |data| record are valid.
-  uint8_t activity_type;
-
-  // Padding to ensure that the next member begins on a 64-bit boundary
-  // even on 32-bit builds which ensures inter-operability between CPU
-  // architectures. New fields can be taken from this space.
-  uint8_t padding[7];
-
-  // Information specific to the |activity_type|.
-  ActivityData data;
-
-  static void FillFrom(Activity* activity,
-                       const void* program_counter,
-                       const void* origin,
-                       Type type,
-                       const ActivityData& data);
-};
-
-// This class manages arbitrary user data that can be associated with activities
-// done by a thread by supporting key/value pairs of any type. This can provide
-// additional information during debugging. It is also used to store arbitrary
-// global data. All updates must be done from the same thread.
-class BASE_EXPORT ActivityUserData {
- public:
-  // List of known value type. REFERENCE types must immediately follow the non-
-  // external types.
-  enum ValueType : uint8_t {
-    END_OF_VALUES = 0,
-    RAW_VALUE,
-    RAW_VALUE_REFERENCE,
-    STRING_VALUE,
-    STRING_VALUE_REFERENCE,
-    CHAR_VALUE,
-    BOOL_VALUE,
-    SIGNED_VALUE,
-    UNSIGNED_VALUE,
-  };
-
-  class BASE_EXPORT TypedValue {
-   public:
-    TypedValue();
-    TypedValue(const TypedValue& other);
-    ~TypedValue();
-
-    ValueType type() const { return type_; }
-
-    // These methods return the extracted value in the correct format.
-    StringPiece Get() const;
-    StringPiece GetString() const;
-    bool GetBool() const;
-    char GetChar() const;
-    int64_t GetInt() const;
-    uint64_t GetUint() const;
-
-    // These methods return references to process memory as originally provided
-    // to corresponding Set calls. USE WITH CAUTION! There is no guarantee that
-    // the referenced memory is assessible or useful.  It's possible that:
-    //  - the memory was free'd and reallocated for a different purpose
-    //  - the memory has been released back to the OS
-    //  - the memory belongs to a different process's address space
-    // Dereferencing the returned StringPiece when the memory is not accessible
-    // will cause the program to SEGV!
-    StringPiece GetReference() const;
-    StringPiece GetStringReference() const;
-
-   private:
-    friend class ActivityUserData;
-
-    ValueType type_;
-    uint64_t short_value_;    // Used to hold copy of numbers, etc.
-    std::string long_value_;  // Used to hold copy of raw/string data.
-    StringPiece ref_value_;   // Used to hold reference to external data.
-  };
-
-  using Snapshot = std::map<std::string, TypedValue>;
-
-  ActivityUserData(void* memory, size_t size);
-  virtual ~ActivityUserData();
-
-  // Gets the unique ID number for this user data. If this changes then the
-  // contents have been overwritten by another thread. The return value is
-  // always non-zero unless it's actually just a data "sink".
-  uint32_t id() const {
-    return memory_ ? id_->load(std::memory_order_relaxed) : 0;
-  }
-
-  // Writes a |value| (as part of a key/value pair) that will be included with
-  // the activity in any reports. The same |name| can be written multiple times
-  // with each successive call overwriting the previously stored |value|. For
-  // raw and string values, the maximum size of successive writes is limited by
-  // the first call. The length of "name" is limited to 255 characters.
-  //
-  // This information is stored on a "best effort" basis. It may be dropped if
-  // the memory buffer is full or the associated activity is beyond the maximum
-  // recording depth.
-  void Set(StringPiece name, const void* memory, size_t size) {
-    Set(name, RAW_VALUE, memory, size);
-  }
-  void SetString(StringPiece name, StringPiece value) {
-    Set(name, STRING_VALUE, value.data(), value.length());
-  }
-  void SetString(StringPiece name, StringPiece16 value) {
-    SetString(name, UTF16ToUTF8(value));
-  }
-  void SetBool(StringPiece name, bool value) {
-    char cvalue = value ? 1 : 0;
-    Set(name, BOOL_VALUE, &cvalue, sizeof(cvalue));
-  }
-  void SetChar(StringPiece name, char value) {
-    Set(name, CHAR_VALUE, &value, sizeof(value));
-  }
-  void SetInt(StringPiece name, int64_t value) {
-    Set(name, SIGNED_VALUE, &value, sizeof(value));
-  }
-  void SetUint(StringPiece name, uint64_t value) {
-    Set(name, UNSIGNED_VALUE, &value, sizeof(value));
-  }
-
-  // These function as above but don't actually copy the data into the
-  // persistent memory. They store unaltered pointers along with a size. These
-  // can be used in conjuction with a memory dump to find certain large pieces
-  // of information.
-  void SetReference(StringPiece name, const void* memory, size_t size) {
-    SetReference(name, RAW_VALUE_REFERENCE, memory, size);
-  }
-  void SetStringReference(StringPiece name, StringPiece value) {
-    SetReference(name, STRING_VALUE_REFERENCE, value.data(), value.length());
-  }
-
-  // Creates a snapshot of the key/value pairs contained within. The returned
-  // data will be fixed, independent of whatever changes afterward. There is
-  // protection against concurrent modification of the values but no protection
-  // against a complete overwrite of the contents; the caller must ensure that
-  // the memory segment is not going to be re-initialized while this runs.
-  bool CreateSnapshot(Snapshot* output_snapshot) const;
-
-  // Gets the base memory address used for storing data.
-  const void* GetBaseAddress();
-
- protected:
-  virtual void Set(StringPiece name,
-                   ValueType type,
-                   const void* memory,
-                   size_t size);
-
- private:
-  FRIEND_TEST_ALL_PREFIXES(ActivityTrackerTest, UserDataTest);
-
-  enum : size_t { kMemoryAlignment = sizeof(uint64_t) };
-
-  // A structure used to reference data held outside of persistent memory.
-  struct ReferenceRecord {
-    uint64_t address;
-    uint64_t size;
-  };
-
-  // Header to a key/value record held in persistent memory.
-  struct Header {
-    std::atomic<uint8_t> type;         // Encoded ValueType
-    uint8_t name_size;                 // Length of "name" key.
-    std::atomic<uint16_t> value_size;  // Actual size of of the stored value.
-    uint16_t record_size;              // Total storage of name, value, header.
-  };
-
-  // This record is used to hold known value is a map so that they can be
-  // found and overwritten later.
-  struct ValueInfo {
-    ValueInfo();
-    ValueInfo(ValueInfo&&);
-    ~ValueInfo();
-
-    StringPiece name;                 // The "key" of the record.
-    ValueType type;                   // The type of the value.
-    void* memory;                     // Where the "value" is held.
-    std::atomic<uint16_t>* size_ptr;  // Address of the actual size of value.
-    size_t extent;                    // The total storage of the value,
-  };                                  // typically rounded up for alignment.
-
-  void SetReference(StringPiece name,
-                    ValueType type,
-                    const void* memory,
-                    size_t size);
-
-  // Loads any data already in the memory segment. This allows for accessing
-  // records created previously.
-  void ImportExistingData() const;
-
-  // A map of all the values within the memory block, keyed by name for quick
-  // updates of the values. This is "mutable" because it changes on "const"
-  // objects even when the actual data values can't change.
-  mutable std::map<StringPiece, ValueInfo> values_;
-
-  // Information about the memory block in which new data can be stored. These
-  // are "mutable" because they change even on "const" objects that are just
-  // skipping already set values.
-  mutable char* memory_;
-  mutable size_t available_;
-
-  // A pointer to the unique ID for this instance.
-  std::atomic<uint32_t>* const id_;
-
-  // This ID is used to create unique indentifiers for user data so that it's
-  // possible to tell if the information has been overwritten.
-  static StaticAtomicSequenceNumber next_id_;
-
-  DISALLOW_COPY_AND_ASSIGN(ActivityUserData);
-};
-
-// This class manages tracking a stack of activities for a single thread in
-// a persistent manner, implementing a bounded-size stack in a fixed-size
-// memory allocation. In order to support an operational mode where another
-// thread is analyzing this data in real-time, atomic operations are used
-// where necessary to guarantee a consistent view from the outside.
-//
-// This class is not generally used directly but instead managed by the
-// GlobalActivityTracker instance and updated using Scoped*Activity local
-// objects.
-class BASE_EXPORT ThreadActivityTracker {
- public:
-  using ActivityId = uint32_t;
-
-  // This structure contains all the common information about the thread so
-  // it doesn't have to be repeated in every entry on the stack. It is defined
-  // and used completely within the .cc file.
-  struct Header;
-
-  // This structure holds a copy of all the internal data at the moment the
-  // "snapshot" operation is done. It is disconnected from the live tracker
-  // so that continued operation of the thread will not cause changes here.
-  struct BASE_EXPORT Snapshot {
-    // Explicit constructor/destructor are needed because of complex types
-    // with non-trivial default constructors and destructors.
-    Snapshot();
-    ~Snapshot();
-
-    // The name of the thread as set when it was created. The name may be
-    // truncated due to internal length limitations.
-    std::string thread_name;
-
-    // The process and thread IDs. These values have no meaning other than
-    // they uniquely identify a running process and a running thread within
-    // that process.  Thread-IDs can be re-used across different processes
-    // and both can be re-used after the process/thread exits.
-    int64_t process_id = 0;
-    int64_t thread_id = 0;
-
-    // The current stack of activities that are underway for this thread. It
-    // is limited in its maximum size with later entries being left off.
-    std::vector<Activity> activity_stack;
-
-    // The current total depth of the activity stack, including those later
-    // entries not recorded in the |activity_stack| vector.
-    uint32_t activity_stack_depth = 0;
-  };
-
-  // This is the base class for having the compiler manage an activity on the
-  // tracker's stack. It does nothing but call methods on the passed |tracker|
-  // if it is not null, making it safe (and cheap) to create these objects
-  // even if activity tracking is not enabled.
-  class BASE_EXPORT ScopedActivity {
-   public:
-    ScopedActivity(ThreadActivityTracker* tracker,
-                   const void* program_counter,
-                   const void* origin,
-                   Activity::Type type,
-                   const ActivityData& data);
-    ~ScopedActivity();
-
-    // Changes some basic metadata about the activity.
-    void ChangeTypeAndData(Activity::Type type, const ActivityData& data);
-
-   protected:
-    // The thread tracker to which this object reports. It can be null if
-    // activity tracking is not (yet) enabled.
-    ThreadActivityTracker* const tracker_;
-
-    // An identifier that indicates a specific activity on the stack.
-    ActivityId activity_id_;
-
-   private:
-    DISALLOW_COPY_AND_ASSIGN(ScopedActivity);
-  };
-
-  // A ThreadActivityTracker runs on top of memory that is managed externally.
-  // It must be large enough for the internal header and a few Activity
-  // blocks. See SizeForStackDepth().
-  ThreadActivityTracker(void* base, size_t size);
-  virtual ~ThreadActivityTracker();
-
-  // Indicates that an activity has started from a given |origin| address in
-  // the code, though it can be null if the creator's address is not known.
-  // The |type| and |data| describe the activity. |program_counter| should be
-  // the result of GetProgramCounter() where push is called. Returned is an
-  // ID that can be used to adjust the pushed activity.
-  ActivityId PushActivity(const void* program_counter,
-                          const void* origin,
-                          Activity::Type type,
-                          const ActivityData& data);
-
-  // An inlined version of the above that gets the program counter where it
-  // is called.
-  ALWAYS_INLINE
-  ActivityId PushActivity(const void* origin,
-                          Activity::Type type,
-                          const ActivityData& data) {
-    return PushActivity(::tracked_objects::GetProgramCounter(), origin, type,
-                        data);
-  }
-
-  // Changes the activity |type| and |data| of the top-most entry on the stack.
-  // This is useful if the information has changed and it is desireable to
-  // track that change without creating a new stack entry. If the type is
-  // ACT_NULL or the data is kNullActivityData then that value will remain
-  // unchanged. The type, if changed, must remain in the same category.
-  // Changing both is not atomic so a snapshot operation could occur between
-  // the update of |type| and |data| or between update of |data| fields.
-  void ChangeActivity(ActivityId id,
-                      Activity::Type type,
-                      const ActivityData& data);
-
-  // Indicates that an activity has completed.
-  void PopActivity(ActivityId id);
-
-  // Sets the user-data information for an activity.
-  std::unique_ptr<ActivityUserData> GetUserData(
-      ActivityId id,
-      ActivityTrackerMemoryAllocator* allocator);
-
-  // Returns if there is true use-data associated with a given ActivityId since
-  // it's possible than any returned object is just a sink.
-  bool HasUserData(ActivityId id);
-
-  // Release the user-data information for an activity.
-  void ReleaseUserData(ActivityId id,
-                       ActivityTrackerMemoryAllocator* allocator);
-
-  // Returns whether the current data is valid or not. It is not valid if
-  // corruption has been detected in the header or other data structures.
-  bool IsValid() const;
-
-  // Gets a copy of the tracker contents for analysis. Returns false if a
-  // snapshot was not possible, perhaps because the data is not valid; the
-  // contents of |output_snapshot| are undefined in that case. The current
-  // implementation does not support concurrent snapshot operations.
-  bool CreateSnapshot(Snapshot* output_snapshot) const;
-
-  // Calculates the memory size required for a given stack depth, including
-  // the internal header structure for the stack.
-  static size_t SizeForStackDepth(int stack_depth);
-
- private:
-  friend class ActivityTrackerTest;
-
-  Header* const header_;        // Pointer to the Header structure.
-  Activity* const stack_;       // The stack of activities.
-  const uint32_t stack_slots_;  // The total number of stack slots.
-
-  bool valid_ = false;          // Tracks whether the data is valid or not.
-
-  base::ThreadChecker thread_checker_;
-
-  DISALLOW_COPY_AND_ASSIGN(ThreadActivityTracker);
-};
-
-
-// The global tracker manages all the individual thread trackers. Memory for
-// the thread trackers is taken from a PersistentMemoryAllocator which allows
-// for the data to be analyzed by a parallel process or even post-mortem.
-class BASE_EXPORT GlobalActivityTracker {
- public:
-  // Type identifiers used when storing in persistent memory so they can be
-  // identified during extraction; the first 4 bytes of the SHA1 of the name
-  // is used as a unique integer. A "version number" is added to the base
-  // so that, if the structure of that object changes, stored older versions
-  // will be safely ignored. These are public so that an external process
-  // can recognize records of this type within an allocator.
-  enum : uint32_t {
-    kTypeIdActivityTracker = 0x5D7381AF + 3,   // SHA1(ActivityTracker) v3
-    kTypeIdUserDataRecord = 0x615EDDD7 + 2,    // SHA1(UserDataRecord) v2
-    kTypeIdGlobalLogMessage = 0x4CF434F9 + 1,  // SHA1(GlobalLogMessage) v1
-    kTypeIdGlobalDataRecord = kTypeIdUserDataRecord + 1000,
-
-    kTypeIdActivityTrackerFree = ~kTypeIdActivityTracker,
-    kTypeIdUserDataRecordFree = ~kTypeIdUserDataRecord,
-  };
-
-  // This structure contains information about a loaded module, as shown to
-  // users of the tracker.
-  struct BASE_EXPORT ModuleInfo {
-    ModuleInfo();
-    ModuleInfo(ModuleInfo&& rhs);
-    ModuleInfo(const ModuleInfo& rhs);
-    ~ModuleInfo();
-
-    ModuleInfo& operator=(ModuleInfo&& rhs);
-    ModuleInfo& operator=(const ModuleInfo& rhs);
-
-    // Information about where and when the module was loaded/unloaded.
-    bool is_loaded = false;  // Was the last operation a load or unload?
-    uintptr_t address = 0;   // Address of the last load operation.
-    int64_t load_time = 0;   // Time of last change; set automatically.
-
-    // Information about the module itself. These never change no matter how
-    // many times a module may be loaded and unloaded.
-    size_t size = 0;         // The size of the loaded module.
-    uint32_t timestamp = 0;  // Opaque "timestamp" for the module.
-    uint32_t age = 0;        // Opaque "age" for the module.
-    uint8_t identifier[16];  // Opaque identifier (GUID, etc.) for the module.
-    std::string file;        // The full path to the file. (UTF-8)
-    std::string debug_file;  // The full path to the debug file.
-  };
-
-  // This is a thin wrapper around the thread-tracker's ScopedActivity that
-  // accesses the global tracker to provide some of the information, notably
-  // which thread-tracker to use. It is safe to create even if activity
-  // tracking is not enabled.
-  class BASE_EXPORT ScopedThreadActivity
-      : public ThreadActivityTracker::ScopedActivity {
-   public:
-    ScopedThreadActivity(const void* program_counter,
-                         const void* origin,
-                         Activity::Type type,
-                         const ActivityData& data,
-                         bool lock_allowed);
-    ~ScopedThreadActivity();
-
-    // Returns an object for manipulating user data.
-    ActivityUserData& user_data();
-
-   private:
-    // Gets (or creates) a tracker for the current thread. If locking is not
-    // allowed (because a lock is being tracked which would cause recursion)
-    // then the attempt to create one if none found will be skipped. Once
-    // the tracker for this thread has been created for other reasons, locks
-    // will be tracked. The thread-tracker uses locks.
-    static ThreadActivityTracker* GetOrCreateTracker(bool lock_allowed) {
-      GlobalActivityTracker* global_tracker = Get();
-      if (!global_tracker)
-        return nullptr;
-      if (lock_allowed)
-        return global_tracker->GetOrCreateTrackerForCurrentThread();
-      else
-        return global_tracker->GetTrackerForCurrentThread();
-    }
-
-    // An object that manages additional user data, created only upon request.
-    std::unique_ptr<ActivityUserData> user_data_;
-
-    DISALLOW_COPY_AND_ASSIGN(ScopedThreadActivity);
-  };
-
-  ~GlobalActivityTracker();
-
-  // Creates a global tracker using a given persistent-memory |allocator| and
-  // providing the given |stack_depth| to each thread tracker it manages. The
-  // created object is activated so tracking will begin immediately upon return.
-  static void CreateWithAllocator(
-      std::unique_ptr<PersistentMemoryAllocator> allocator,
-      int stack_depth);
-
-#if !defined(OS_NACL)
-  // Like above but internally creates an allocator around a disk file with
-  // the specified |size| at the given |file_path|. Any existing file will be
-  // overwritten. The |id| and |name| are arbitrary and stored in the allocator
-  // for reference by whatever process reads it.
-  static void CreateWithFile(const FilePath& file_path,
-                             size_t size,
-                             uint64_t id,
-                             StringPiece name,
-                             int stack_depth);
-#endif  // !defined(OS_NACL)
-
-  // Like above but internally creates an allocator using local heap memory of
-  // the specified size. This is used primarily for unit tests.
-  static void CreateWithLocalMemory(size_t size,
-                                    uint64_t id,
-                                    StringPiece name,
-                                    int stack_depth);
-
-  // Gets the global activity-tracker or null if none exists.
-  static GlobalActivityTracker* Get() {
-    return reinterpret_cast<GlobalActivityTracker*>(
-        subtle::Acquire_Load(&g_tracker_));
-  }
-
-  // Convenience method for determining if a global tracker is active.
-  static bool IsEnabled() { return Get() != nullptr; }
-
-  // Gets the persistent-memory-allocator in which data is stored. Callers
-  // can store additional records here to pass more information to the
-  // analysis process.
-  PersistentMemoryAllocator* allocator() { return allocator_.get(); }
-
-  // Gets the thread's activity-tracker if it exists. This is inline for
-  // performance reasons and it uses thread-local-storage (TLS) so that there
-  // is no significant lookup time required to find the one for the calling
-  // thread. Ownership remains with the global tracker.
-  ThreadActivityTracker* GetTrackerForCurrentThread() {
-    return reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
-  }
-
-  // Gets the thread's activity-tracker or creates one if none exists. This
-  // is inline for performance reasons. Ownership remains with the global
-  // tracker.
-  ThreadActivityTracker* GetOrCreateTrackerForCurrentThread() {
-    ThreadActivityTracker* tracker = GetTrackerForCurrentThread();
-    if (tracker)
-      return tracker;
-    return CreateTrackerForCurrentThread();
-  }
-
-  // Creates an activity-tracker for the current thread.
-  ThreadActivityTracker* CreateTrackerForCurrentThread();
-
-  // Releases the activity-tracker for the current thread (for testing only).
-  void ReleaseTrackerForCurrentThreadForTesting();
-
-  // Records a log message. The current implementation does NOT recycle these
-  // only store critical messages such as FATAL ones.
-  void RecordLogMessage(StringPiece message);
-  static void RecordLogMessageIfEnabled(StringPiece message) {
-    GlobalActivityTracker* tracker = Get();
-    if (tracker)
-      tracker->RecordLogMessage(message);
-  }
-
-  // Records a module load/unload event. This is safe to call multiple times
-  // even with the same information.
-  void RecordModuleInfo(const ModuleInfo& info);
-  static void RecordModuleInfoIfEnabled(const ModuleInfo& info) {
-    GlobalActivityTracker* tracker = Get();
-    if (tracker)
-      tracker->RecordModuleInfo(info);
-  }
-
-  // Record field trial information. This call is thread-safe. In addition to
-  // this, construction of a GlobalActivityTracker will cause all existing
-  // active field trials to be fetched and recorded.
-  void RecordFieldTrial(const std::string& trial_name, StringPiece group_name);
-  static void RecordFieldTrialIfEnabled(const std::string& trial_name,
-                                        StringPiece group_name) {
-    GlobalActivityTracker* tracker = Get();
-    if (tracker)
-      tracker->RecordFieldTrial(trial_name, group_name);
-  }
-
-  // Accesses the global data record for storing arbitrary key/value pairs.
-  ActivityUserData& global_data() { return global_data_; }
-
- private:
-  friend class GlobalActivityAnalyzer;
-  friend class ScopedThreadActivity;
-  friend class ActivityTrackerTest;
-
-  enum : int {
-    // The maximum number of threads that can be tracked within a process. If
-    // more than this number run concurrently, tracking of new ones may cease.
-    kMaxThreadCount = 100,
-    kCachedThreadMemories = 10,
-    kCachedUserDataMemories = 10,
-  };
-
-  // A wrapper around ActivityUserData that is thread-safe and thus can be used
-  // in the global scope without the requirement of being called from only one
-  // thread.
-  class GlobalUserData : public ActivityUserData {
-   public:
-    GlobalUserData(void* memory, size_t size);
-    ~GlobalUserData() override;
-
-   private:
-    void Set(StringPiece name,
-             ValueType type,
-             const void* memory,
-             size_t size) override;
-
-    Lock data_lock_;
-
-    DISALLOW_COPY_AND_ASSIGN(GlobalUserData);
-  };
-
-  // State of a module as stored in persistent memory. This supports a single
-  // loading of a module only. If modules are loaded multiple times at
-  // different addresses, only the last will be recorded and an unload will
-  // not revert to the information of any other addresses.
-  struct BASE_EXPORT ModuleInfoRecord {
-    // SHA1(ModuleInfoRecord): Increment this if structure changes!
-    static constexpr uint32_t kPersistentTypeId = 0x05DB5F41 + 1;
-
-    // Expected size for 32/64-bit check by PersistentMemoryAllocator.
-    static constexpr size_t kExpectedInstanceSize = 56;
-
-    // The atomic unfortunately makes this a "complex" class on some compilers
-    // and thus requires an out-of-line constructor & destructor even though
-    // they do nothing.
-    ModuleInfoRecord();
-    ~ModuleInfoRecord();
-
-    uint64_t address;               // The base address of the module.
-    uint64_t load_time;             // Time of last load/unload.
-    uint64_t size;                  // The size of the module in bytes.
-    uint32_t timestamp;             // Opaque timestamp of the module.
-    uint32_t age;                   // Opaque "age" associated with the module.
-    uint8_t identifier[16];         // Opaque identifier for the module.
-    std::atomic<uint32_t> changes;  // Number load/unload actions.
-    uint16_t pickle_size;           // The size of the following pickle.
-    uint8_t loaded;                 // Flag if module is loaded or not.
-    char pickle[1];                 // Other strings; may allocate larger.
-
-    // Decodes/encodes storage structure from more generic info structure.
-    bool DecodeTo(GlobalActivityTracker::ModuleInfo* info,
-                  size_t record_size) const;
-    bool EncodeFrom(const GlobalActivityTracker::ModuleInfo& info,
-                    size_t record_size);
-
-    // Updates the core information without changing the encoded strings. This
-    // is useful when a known module changes state (i.e. new load or unload).
-    bool UpdateFrom(const GlobalActivityTracker::ModuleInfo& info);
-
-    // Determines the required memory size for the encoded storage.
-    static size_t EncodedSize(const GlobalActivityTracker::ModuleInfo& info);
-
-   private:
-    DISALLOW_COPY_AND_ASSIGN(ModuleInfoRecord);
-  };
-
-  // A thin wrapper around the main thread-tracker that keeps additional
-  // information that the global tracker needs to handle joined threads.
-  class ManagedActivityTracker : public ThreadActivityTracker {
-   public:
-    ManagedActivityTracker(PersistentMemoryAllocator::Reference mem_reference,
-                           void* base,
-                           size_t size);
-    ~ManagedActivityTracker() override;
-
-    // The reference into persistent memory from which the thread-tracker's
-    // memory was created.
-    const PersistentMemoryAllocator::Reference mem_reference_;
-
-    // The physical address used for the thread-tracker's memory.
-    void* const mem_base_;
-
-   private:
-    DISALLOW_COPY_AND_ASSIGN(ManagedActivityTracker);
-  };
-
-  // Creates a global tracker using a given persistent-memory |allocator| and
-  // providing the given |stack_depth| to each thread tracker it manages. The
-  // created object is activated so tracking has already started upon return.
-  GlobalActivityTracker(std::unique_ptr<PersistentMemoryAllocator> allocator,
-                        int stack_depth);
-
-  // Returns the memory used by an activity-tracker managed by this class.
-  // It is called during the destruction of a ManagedActivityTracker object.
-  void ReturnTrackerMemory(ManagedActivityTracker* tracker);
-
-  // Releases the activity-tracker associcated with thread. It is called
-  // automatically when a thread is joined and thus there is nothing more to
-  // be tracked. |value| is a pointer to a ManagedActivityTracker.
-  static void OnTLSDestroy(void* value);
-
-  // The persistent-memory allocator from which the memory for all trackers
-  // is taken.
-  std::unique_ptr<PersistentMemoryAllocator> allocator_;
-
-  // The size (in bytes) of memory required by a ThreadActivityTracker to
-  // provide the stack-depth requested during construction.
-  const size_t stack_memory_size_;
-
-  // The activity tracker for the currently executing thread.
-  base::ThreadLocalStorage::Slot this_thread_tracker_;
-
-  // The number of thread trackers currently active.
-  std::atomic<int> thread_tracker_count_;
-
-  // A caching memory allocator for thread-tracker objects.
-  ActivityTrackerMemoryAllocator thread_tracker_allocator_;
-  base::Lock thread_tracker_allocator_lock_;
-
-  // A caching memory allocator for user data attached to activity data.
-  ActivityTrackerMemoryAllocator user_data_allocator_;
-  base::Lock user_data_allocator_lock_;
-
-  // An object for holding global arbitrary key value pairs. Values must always
-  // be written from the main UI thread.
-  GlobalUserData global_data_;
-
-  // A map of global module information, keyed by module path.
-  std::map<const std::string, ModuleInfoRecord*> modules_;
-  base::Lock modules_lock_;
-
-  // The active global activity tracker.
-  static subtle::AtomicWord g_tracker_;
-
-  DISALLOW_COPY_AND_ASSIGN(GlobalActivityTracker);
-};
-
-
-// Record entry in to and out of an arbitrary block of code.
-class BASE_EXPORT ScopedActivity
-    : public GlobalActivityTracker::ScopedThreadActivity {
- public:
-  // Track activity at the specified FROM_HERE location for an arbitrary
-  // 4-bit |action|, an arbitrary 32-bit |id|, and 32-bits of arbitrary
-  // |info|. None of these values affect operation; they're all purely
-  // for association and analysis. To have unique identifiers across a
-  // diverse code-base, create the number by taking the first 8 characters
-  // of the hash of the activity being tracked.
-  //
-  // For example:
-  //   Tracking method: void MayNeverExit(uint32_t foo) {...}
-  //   echo -n "MayNeverExit" | sha1sum   =>   e44873ccab21e2b71270da24aa1...
-  //
-  //   void MayNeverExit(int32_t foo) {
-  //     base::debug::ScopedActivity track_me(0, 0xE44873CC, foo);
-  //     ...
-  //   }
-  ALWAYS_INLINE
-  ScopedActivity(uint8_t action, uint32_t id, int32_t info)
-      : ScopedActivity(::tracked_objects::GetProgramCounter(),
-                       action,
-                       id,
-                       info) {}
-  ScopedActivity() : ScopedActivity(0, 0, 0) {}
-
-  // Changes the |action| and/or |info| of this activity on the stack. This
-  // is useful for tracking progress through a function, updating the action
-  // to indicate "milestones" in the block (max 16 milestones: 0-15) or the
-  // info to reflect other changes. Changing both is not atomic so a snapshot
-  // operation could occur between the update of |action| and |info|.
-  void ChangeAction(uint8_t action);
-  void ChangeInfo(int32_t info);
-  void ChangeActionAndInfo(uint8_t action, int32_t info);
-
- private:
-  // Constructs the object using a passed-in program-counter.
-  ScopedActivity(const void* program_counter,
-                 uint8_t action,
-                 uint32_t id,
-                 int32_t info);
-
-  // A copy of the ID code so it doesn't have to be passed by the caller when
-  // changing the |info| field.
-  uint32_t id_;
-
-  DISALLOW_COPY_AND_ASSIGN(ScopedActivity);
-};
-
-
-// These "scoped" classes provide easy tracking of various blocking actions.
-
-class BASE_EXPORT ScopedTaskRunActivity
-    : public GlobalActivityTracker::ScopedThreadActivity {
- public:
-  ALWAYS_INLINE
-  explicit ScopedTaskRunActivity(const base::PendingTask& task)
-      : ScopedTaskRunActivity(::tracked_objects::GetProgramCounter(),
-                              task) {}
-
- private:
-  ScopedTaskRunActivity(const void* program_counter,
-                        const base::PendingTask& task);
-  DISALLOW_COPY_AND_ASSIGN(ScopedTaskRunActivity);
-};
-
-class BASE_EXPORT ScopedLockAcquireActivity
-    : public GlobalActivityTracker::ScopedThreadActivity {
- public:
-  ALWAYS_INLINE
-  explicit ScopedLockAcquireActivity(const base::internal::LockImpl* lock)
-      : ScopedLockAcquireActivity(::tracked_objects::GetProgramCounter(),
-                                  lock) {}
-
- private:
-  ScopedLockAcquireActivity(const void* program_counter,
-                            const base::internal::LockImpl* lock);
-  DISALLOW_COPY_AND_ASSIGN(ScopedLockAcquireActivity);
-};
-
-class BASE_EXPORT ScopedEventWaitActivity
-    : public GlobalActivityTracker::ScopedThreadActivity {
- public:
-  ALWAYS_INLINE
-  explicit ScopedEventWaitActivity(const base::WaitableEvent* event)
-      : ScopedEventWaitActivity(::tracked_objects::GetProgramCounter(),
-                                event) {}
-
- private:
-  ScopedEventWaitActivity(const void* program_counter,
-                          const base::WaitableEvent* event);
-  DISALLOW_COPY_AND_ASSIGN(ScopedEventWaitActivity);
-};
-
-class BASE_EXPORT ScopedThreadJoinActivity
-    : public GlobalActivityTracker::ScopedThreadActivity {
- public:
-  ALWAYS_INLINE
-  explicit ScopedThreadJoinActivity(const base::PlatformThreadHandle* thread)
-      : ScopedThreadJoinActivity(::tracked_objects::GetProgramCounter(),
-                                 thread) {}
-
- private:
-  ScopedThreadJoinActivity(const void* program_counter,
-                           const base::PlatformThreadHandle* thread);
-  DISALLOW_COPY_AND_ASSIGN(ScopedThreadJoinActivity);
-};
-
-// Some systems don't have base::Process
-#if !defined(OS_NACL) && !defined(OS_IOS)
-class BASE_EXPORT ScopedProcessWaitActivity
-    : public GlobalActivityTracker::ScopedThreadActivity {
- public:
-  ALWAYS_INLINE
-  explicit ScopedProcessWaitActivity(const base::Process* process)
-      : ScopedProcessWaitActivity(::tracked_objects::GetProgramCounter(),
-                                  process) {}
-
- private:
-  ScopedProcessWaitActivity(const void* program_counter,
-                            const base::Process* process);
-  DISALLOW_COPY_AND_ASSIGN(ScopedProcessWaitActivity);
-};
-#endif
-
-}  // namespace debug
-}  // namespace base
-
-#endif  // BASE_DEBUG_ACTIVITY_TRACKER_H_
diff --git a/base/debug/activity_tracker_unittest.cc b/base/debug/activity_tracker_unittest.cc
deleted file mode 100644
index aced4fb..0000000
--- a/base/debug/activity_tracker_unittest.cc
+++ /dev/null
@@ -1,340 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/debug/activity_tracker.h"
-
-#include <memory>
-
-#include "base/bind.h"
-#include "base/files/file.h"
-#include "base/files/file_util.h"
-#include "base/files/memory_mapped_file.h"
-#include "base/files/scoped_temp_dir.h"
-#include "base/memory/ptr_util.h"
-#include "base/pending_task.h"
-#include "base/rand_util.h"
-#include "base/synchronization/condition_variable.h"
-#include "base/synchronization/lock.h"
-#include "base/synchronization/spin_wait.h"
-#include "base/threading/platform_thread.h"
-#include "base/threading/simple_thread.h"
-#include "base/time/time.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-namespace debug {
-
-namespace {
-
-class TestActivityTracker : public ThreadActivityTracker {
- public:
-  TestActivityTracker(std::unique_ptr<char[]> memory, size_t mem_size)
-      : ThreadActivityTracker(memset(memory.get(), 0, mem_size), mem_size),
-        mem_segment_(std::move(memory)) {}
-
-  ~TestActivityTracker() override {}
-
- private:
-  std::unique_ptr<char[]> mem_segment_;
-};
-
-}  // namespace
-
-
-class ActivityTrackerTest : public testing::Test {
- public:
-  const int kMemorySize = 1 << 20;  // 1MiB
-  const int kStackSize  = 1 << 10;  // 1KiB
-
-  using ActivityId = ThreadActivityTracker::ActivityId;
-
-  ActivityTrackerTest() {}
-
-  ~ActivityTrackerTest() override {
-    GlobalActivityTracker* global_tracker = GlobalActivityTracker::Get();
-    if (global_tracker) {
-      global_tracker->ReleaseTrackerForCurrentThreadForTesting();
-      delete global_tracker;
-    }
-  }
-
-  std::unique_ptr<ThreadActivityTracker> CreateActivityTracker() {
-    std::unique_ptr<char[]> memory(new char[kStackSize]);
-    return MakeUnique<TestActivityTracker>(std::move(memory), kStackSize);
-  }
-
-  size_t GetGlobalActiveTrackerCount() {
-    GlobalActivityTracker* global_tracker = GlobalActivityTracker::Get();
-    if (!global_tracker)
-      return 0;
-    return global_tracker->thread_tracker_count_.load(
-        std::memory_order_relaxed);
-  }
-
-  size_t GetGlobalInactiveTrackerCount() {
-    GlobalActivityTracker* global_tracker = GlobalActivityTracker::Get();
-    if (!global_tracker)
-      return 0;
-    base::AutoLock autolock(global_tracker->thread_tracker_allocator_lock_);
-    return global_tracker->thread_tracker_allocator_.cache_used();
-  }
-
-  size_t GetGlobalUserDataMemoryCacheUsed() {
-    return GlobalActivityTracker::Get()->user_data_allocator_.cache_used();
-  }
-
-  static void DoNothing() {}
-};
-
-TEST_F(ActivityTrackerTest, UserDataTest) {
-  char buffer[256];
-  memset(buffer, 0, sizeof(buffer));
-  ActivityUserData data(buffer, sizeof(buffer));
-  const size_t space = sizeof(buffer) - 8;
-  ASSERT_EQ(space, data.available_);
-
-  data.SetInt("foo", 1);
-  ASSERT_EQ(space - 24, data.available_);
-
-  data.SetUint("b", 1U);  // Small names fit beside header in a word.
-  ASSERT_EQ(space - 24 - 16, data.available_);
-
-  data.Set("c", buffer, 10);
-  ASSERT_EQ(space - 24 - 16 - 24, data.available_);
-
-  data.SetString("dear john", "it's been fun");
-  ASSERT_EQ(space - 24 - 16 - 24 - 32, data.available_);
-
-  data.Set("c", buffer, 20);
-  ASSERT_EQ(space - 24 - 16 - 24 - 32, data.available_);
-
-  data.SetString("dear john", "but we're done together");
-  ASSERT_EQ(space - 24 - 16 - 24 - 32, data.available_);
-
-  data.SetString("dear john", "bye");
-  ASSERT_EQ(space - 24 - 16 - 24 - 32, data.available_);
-
-  data.SetChar("d", 'x');
-  ASSERT_EQ(space - 24 - 16 - 24 - 32 - 8, data.available_);
-
-  data.SetBool("ee", true);
-  ASSERT_EQ(space - 24 - 16 - 24 - 32 - 8 - 16, data.available_);
-
-  data.SetString("f", "");
-  ASSERT_EQ(space - 24 - 16 - 24 - 32 - 8 - 16 - 8, data.available_);
-}
-
-TEST_F(ActivityTrackerTest, PushPopTest) {
-  std::unique_ptr<ThreadActivityTracker> tracker = CreateActivityTracker();
-  ThreadActivityTracker::Snapshot snapshot;
-
-  ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
-  ASSERT_EQ(0U, snapshot.activity_stack_depth);
-  ASSERT_EQ(0U, snapshot.activity_stack.size());
-
-  char origin1;
-  ActivityId id1 = tracker->PushActivity(&origin1, Activity::ACT_TASK,
-                                         ActivityData::ForTask(11));
-  ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
-  ASSERT_EQ(1U, snapshot.activity_stack_depth);
-  ASSERT_EQ(1U, snapshot.activity_stack.size());
-  EXPECT_NE(0, snapshot.activity_stack[0].time_internal);
-  EXPECT_EQ(Activity::ACT_TASK, snapshot.activity_stack[0].activity_type);
-  EXPECT_EQ(reinterpret_cast<uintptr_t>(&origin1),
-            snapshot.activity_stack[0].origin_address);
-  EXPECT_EQ(11U, snapshot.activity_stack[0].data.task.sequence_id);
-
-  char origin2;
-  char lock2;
-  ActivityId id2 = tracker->PushActivity(&origin2, Activity::ACT_LOCK,
-                                         ActivityData::ForLock(&lock2));
-  ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
-  ASSERT_EQ(2U, snapshot.activity_stack_depth);
-  ASSERT_EQ(2U, snapshot.activity_stack.size());
-  EXPECT_LE(snapshot.activity_stack[0].time_internal,
-            snapshot.activity_stack[1].time_internal);
-  EXPECT_EQ(Activity::ACT_LOCK, snapshot.activity_stack[1].activity_type);
-  EXPECT_EQ(reinterpret_cast<uintptr_t>(&origin2),
-            snapshot.activity_stack[1].origin_address);
-  EXPECT_EQ(reinterpret_cast<uintptr_t>(&lock2),
-            snapshot.activity_stack[1].data.lock.lock_address);
-
-  tracker->PopActivity(id2);
-  ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
-  ASSERT_EQ(1U, snapshot.activity_stack_depth);
-  ASSERT_EQ(1U, snapshot.activity_stack.size());
-  EXPECT_EQ(Activity::ACT_TASK, snapshot.activity_stack[0].activity_type);
-  EXPECT_EQ(reinterpret_cast<uintptr_t>(&origin1),
-            snapshot.activity_stack[0].origin_address);
-  EXPECT_EQ(11U, snapshot.activity_stack[0].data.task.sequence_id);
-
-  tracker->PopActivity(id1);
-  ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
-  ASSERT_EQ(0U, snapshot.activity_stack_depth);
-  ASSERT_EQ(0U, snapshot.activity_stack.size());
-}
-
-TEST_F(ActivityTrackerTest, ScopedTaskTest) {
-  GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
-
-  ThreadActivityTracker* tracker =
-      GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
-  ThreadActivityTracker::Snapshot snapshot;
-  ASSERT_EQ(0U, GetGlobalUserDataMemoryCacheUsed());
-
-  ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
-  ASSERT_EQ(0U, snapshot.activity_stack_depth);
-  ASSERT_EQ(0U, snapshot.activity_stack.size());
-
-  {
-    PendingTask task1(FROM_HERE, base::Bind(&DoNothing));
-    ScopedTaskRunActivity activity1(task1);
-    ActivityUserData& user_data1 = activity1.user_data();
-    (void)user_data1;  // Tell compiler it's been used.
-
-    ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
-    ASSERT_EQ(1U, snapshot.activity_stack_depth);
-    ASSERT_EQ(1U, snapshot.activity_stack.size());
-    EXPECT_EQ(Activity::ACT_TASK, snapshot.activity_stack[0].activity_type);
-
-    {
-      PendingTask task2(FROM_HERE, base::Bind(&DoNothing));
-      ScopedTaskRunActivity activity2(task2);
-      ActivityUserData& user_data2 = activity2.user_data();
-      (void)user_data2;  // Tell compiler it's been used.
-
-      ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
-      ASSERT_EQ(2U, snapshot.activity_stack_depth);
-      ASSERT_EQ(2U, snapshot.activity_stack.size());
-      EXPECT_EQ(Activity::ACT_TASK, snapshot.activity_stack[1].activity_type);
-    }
-
-    ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
-    ASSERT_EQ(1U, snapshot.activity_stack_depth);
-    ASSERT_EQ(1U, snapshot.activity_stack.size());
-    EXPECT_EQ(Activity::ACT_TASK, snapshot.activity_stack[0].activity_type);
-  }
-
-  ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
-  ASSERT_EQ(0U, snapshot.activity_stack_depth);
-  ASSERT_EQ(0U, snapshot.activity_stack.size());
-  ASSERT_EQ(2U, GetGlobalUserDataMemoryCacheUsed());
-}
-
-TEST_F(ActivityTrackerTest, CreateWithFileTest) {
-  const char temp_name[] = "CreateWithFileTest";
-  ScopedTempDir temp_dir;
-  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  FilePath temp_file = temp_dir.GetPath().AppendASCII(temp_name);
-  const size_t temp_size = 64 << 10;  // 64 KiB
-
-  // Create a global tracker on a new file.
-  ASSERT_FALSE(PathExists(temp_file));
-  GlobalActivityTracker::CreateWithFile(temp_file, temp_size, 0, "foo", 3);
-  GlobalActivityTracker* global = GlobalActivityTracker::Get();
-  EXPECT_EQ(std::string("foo"), global->allocator()->Name());
-  global->ReleaseTrackerForCurrentThreadForTesting();
-  delete global;
-
-  // Create a global tracker over an existing file, replacing it. If the
-  // replacement doesn't work, the name will remain as it was first created.
-  ASSERT_TRUE(PathExists(temp_file));
-  GlobalActivityTracker::CreateWithFile(temp_file, temp_size, 0, "bar", 3);
-  global = GlobalActivityTracker::Get();
-  EXPECT_EQ(std::string("bar"), global->allocator()->Name());
-  global->ReleaseTrackerForCurrentThreadForTesting();
-  delete global;
-}
-
-
-// GlobalActivityTracker tests below.
-
-class SimpleActivityThread : public SimpleThread {
- public:
-  SimpleActivityThread(const std::string& name,
-                       const void* origin,
-                       Activity::Type activity,
-                       const ActivityData& data)
-      : SimpleThread(name, Options()),
-        origin_(origin),
-        activity_(activity),
-        data_(data),
-        exit_condition_(&lock_) {}
-
-  ~SimpleActivityThread() override {}
-
-  void Run() override {
-    ThreadActivityTracker::ActivityId id =
-        GlobalActivityTracker::Get()
-            ->GetOrCreateTrackerForCurrentThread()
-            ->PushActivity(origin_, activity_, data_);
-
-    {
-      AutoLock auto_lock(lock_);
-      ready_ = true;
-      while (!exit_)
-        exit_condition_.Wait();
-    }
-
-    GlobalActivityTracker::Get()->GetTrackerForCurrentThread()->PopActivity(id);
-  }
-
-  void Exit() {
-    AutoLock auto_lock(lock_);
-    exit_ = true;
-    exit_condition_.Signal();
-  }
-
-  void WaitReady() {
-    SPIN_FOR_1_SECOND_OR_UNTIL_TRUE(ready_);
-  }
-
- private:
-  const void* origin_;
-  Activity::Type activity_;
-  ActivityData data_;
-
-  bool ready_ = false;
-  bool exit_ = false;
-  Lock lock_;
-  ConditionVariable exit_condition_;
-
-  DISALLOW_COPY_AND_ASSIGN(SimpleActivityThread);
-};
-
-TEST_F(ActivityTrackerTest, ThreadDeathTest) {
-  GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
-  GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
-  const size_t starting_active = GetGlobalActiveTrackerCount();
-  const size_t starting_inactive = GetGlobalInactiveTrackerCount();
-
-  SimpleActivityThread t1("t1", nullptr, Activity::ACT_TASK,
-                          ActivityData::ForTask(11));
-  t1.Start();
-  t1.WaitReady();
-  EXPECT_EQ(starting_active + 1, GetGlobalActiveTrackerCount());
-  EXPECT_EQ(starting_inactive, GetGlobalInactiveTrackerCount());
-
-  t1.Exit();
-  t1.Join();
-  EXPECT_EQ(starting_active, GetGlobalActiveTrackerCount());
-  EXPECT_EQ(starting_inactive + 1, GetGlobalInactiveTrackerCount());
-
-  // Start another thread and ensure it re-uses the existing memory.
-
-  SimpleActivityThread t2("t2", nullptr, Activity::ACT_TASK,
-                          ActivityData::ForTask(22));
-  t2.Start();
-  t2.WaitReady();
-  EXPECT_EQ(starting_active + 1, GetGlobalActiveTrackerCount());
-  EXPECT_EQ(starting_inactive, GetGlobalInactiveTrackerCount());
-
-  t2.Exit();
-  t2.Join();
-  EXPECT_EQ(starting_active, GetGlobalActiveTrackerCount());
-  EXPECT_EQ(starting_inactive + 1, GetGlobalInactiveTrackerCount());
-}
-
-}  // namespace debug
-}  // namespace base
diff --git a/base/debug/alias.cc b/base/debug/alias.cc
index d498084..ff35574 100644
--- a/base/debug/alias.cc
+++ b/base/debug/alias.cc
@@ -12,8 +12,7 @@
 #pragma optimize("", off)
 #endif
 
-void Alias(const void* /* var */) {
-}
+void Alias(const void*) {}
 
 #if defined(COMPILER_MSVC)
 #pragma optimize("", on)
diff --git a/base/debug/debugger_posix.cc b/base/debug/debugger_posix.cc
index ebe9d61..a157d9a 100644
--- a/base/debug/debugger_posix.cc
+++ b/base/debug/debugger_posix.cc
@@ -18,8 +18,6 @@
 #include <vector>
 
 #include "base/macros.h"
-#include "base/threading/platform_thread.h"
-#include "base/time/time.h"
 #include "build/build_config.h"
 
 #if defined(__GLIBCXX__)
diff --git a/base/debug/debugging_flags.h b/base/debug/debugging_flags.h
index e6ae1ee..1ea435f 100644
--- a/base/debug/debugging_flags.h
+++ b/base/debug/debugging_flags.h
@@ -1,8 +1,11 @@
 // Generated by build/write_buildflag_header.py
 // From "base_debugging_flags"
+
 #ifndef BASE_DEBUG_DEBUGGING_FLAGS_H_
 #define BASE_DEBUG_DEBUGGING_FLAGS_H_
+
 #include "build/buildflag.h"
+
 #define BUILDFLAG_INTERNAL_ENABLE_PROFILING() (0)
-#define BUILDFLAG_INTERNAL_ENABLE_MEMORY_TASK_PROFILER() (0)
+
 #endif  // BASE_DEBUG_DEBUGGING_FLAGS_H_
diff --git a/base/debug/dump_without_crashing.cc b/base/debug/dump_without_crashing.cc
deleted file mode 100644
index 4b338ca..0000000
--- a/base/debug/dump_without_crashing.cc
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/debug/dump_without_crashing.h"
-
-#include "base/logging.h"
-
-namespace {
-
-// Pointer to the function that's called by DumpWithoutCrashing() to dump the
-// process's memory.
-void (CDECL *dump_without_crashing_function_)() = NULL;
-
-}  // namespace
-
-namespace base {
-
-namespace debug {
-
-bool DumpWithoutCrashing() {
-  if (dump_without_crashing_function_) {
-    (*dump_without_crashing_function_)();
-    return true;
-  }
-  return false;
-}
-
-void SetDumpWithoutCrashingFunction(void (CDECL *function)()) {
-  dump_without_crashing_function_ = function;
-}
-
-}  // namespace debug
-
-}  // namespace base
diff --git a/base/debug/dump_without_crashing.h b/base/debug/dump_without_crashing.h
deleted file mode 100644
index a5c85d5..0000000
--- a/base/debug/dump_without_crashing.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_DEBUG_DUMP_WITHOUT_CRASHING_H_
-#define BASE_DEBUG_DUMP_WITHOUT_CRASHING_H_
-
-#include "base/base_export.h"
-#include "base/compiler_specific.h"
-#include "build/build_config.h"
-
-namespace base {
-
-namespace debug {
-
-// Handler to silently dump the current process without crashing.
-// Before calling this function, call SetDumpWithoutCrashingFunction to pass a
-// function pointer, typically chrome!DumpProcessWithoutCrash.  See example code
-// in chrome_main.cc that does this for chrome.dll.
-// Returns false if called before SetDumpWithoutCrashingFunction.
-BASE_EXPORT bool DumpWithoutCrashing();
-
-// Sets a function that'll be invoked to dump the current process when
-// DumpWithoutCrashing() is called.
-BASE_EXPORT void SetDumpWithoutCrashingFunction(void (CDECL *function)());
-
-}  // namespace debug
-
-}  // namespace base
-
-#endif  // BASE_DEBUG_DUMP_WITHOUT_CRASHING_H_
diff --git a/base/debug/leak_tracker_unittest.cc b/base/debug/leak_tracker_unittest.cc
index b9ecdcf..8b4c568 100644
--- a/base/debug/leak_tracker_unittest.cc
+++ b/base/debug/leak_tracker_unittest.cc
@@ -30,7 +30,7 @@
   EXPECT_EQ(-1, LeakTracker<ClassA>::NumLiveInstances());
   EXPECT_EQ(-1, LeakTracker<ClassB>::NumLiveInstances());
 
-  // Use unique_ptr so compiler doesn't complain about unused variables.
+  // Use scoped_ptr so compiler doesn't complain about unused variables.
   std::unique_ptr<ClassA> a1(new ClassA);
   std::unique_ptr<ClassB> b1(new ClassB);
   std::unique_ptr<ClassB> b2(new ClassB);
diff --git a/base/debug/profiler.cc b/base/debug/profiler.cc
deleted file mode 100644
index e303c28..0000000
--- a/base/debug/profiler.cc
+++ /dev/null
@@ -1,225 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/debug/profiler.h"
-
-#include <string>
-
-#include "base/debug/debugging_flags.h"
-#include "base/process/process_handle.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/strings/string_util.h"
-#include "build/build_config.h"
-
-#if defined(OS_WIN)
-#include "base/win/current_module.h"
-#include "base/win/pe_image.h"
-#endif  // defined(OS_WIN)
-
-// TODO(peria): Enable profiling on Windows.
-#if BUILDFLAG(ENABLE_PROFILING) && !defined(NO_TCMALLOC) && !defined(OS_WIN)
-#include "third_party/tcmalloc/chromium/src/gperftools/profiler.h"
-#endif
-
-namespace base {
-namespace debug {
-
-// TODO(peria): Enable profiling on Windows.
-#if BUILDFLAG(ENABLE_PROFILING) && !defined(NO_TCMALLOC) && !defined(OS_WIN)
-
-static int profile_count = 0;
-
-void StartProfiling(const std::string& name) {
-  ++profile_count;
-  std::string full_name(name);
-  std::string pid = IntToString(GetCurrentProcId());
-  std::string count = IntToString(profile_count);
-  ReplaceSubstringsAfterOffset(&full_name, 0, "{pid}", pid);
-  ReplaceSubstringsAfterOffset(&full_name, 0, "{count}", count);
-  ProfilerStart(full_name.c_str());
-}
-
-void StopProfiling() {
-  ProfilerFlush();
-  ProfilerStop();
-}
-
-void FlushProfiling() {
-  ProfilerFlush();
-}
-
-bool BeingProfiled() {
-  return ProfilingIsEnabledForAllThreads();
-}
-
-void RestartProfilingAfterFork() {
-  ProfilerRegisterThread();
-}
-
-bool IsProfilingSupported() {
-  return true;
-}
-
-#else
-
-void StartProfiling(const std::string&) {
-}
-
-void StopProfiling() {
-}
-
-void FlushProfiling() {
-}
-
-bool BeingProfiled() {
-  return false;
-}
-
-void RestartProfilingAfterFork() {
-}
-
-bool IsProfilingSupported() {
-  return false;
-}
-
-#endif
-
-#if !defined(OS_WIN)
-
-bool IsBinaryInstrumented() {
-  return false;
-}
-
-ReturnAddressLocationResolver GetProfilerReturnAddrResolutionFunc() {
-  return NULL;
-}
-
-DynamicFunctionEntryHook GetProfilerDynamicFunctionEntryHookFunc() {
-  return NULL;
-}
-
-AddDynamicSymbol GetProfilerAddDynamicSymbolFunc() {
-  return NULL;
-}
-
-MoveDynamicSymbol GetProfilerMoveDynamicSymbolFunc() {
-  return NULL;
-}
-
-#else  // defined(OS_WIN)
-
-bool IsBinaryInstrumented() {
-  enum InstrumentationCheckState {
-    UNINITIALIZED,
-    INSTRUMENTED_IMAGE,
-    NON_INSTRUMENTED_IMAGE,
-  };
-
-  static InstrumentationCheckState state = UNINITIALIZED;
-
-  if (state == UNINITIALIZED) {
-    base::win::PEImage image(CURRENT_MODULE());
-
-    // Check to be sure our image is structured as we'd expect.
-    DCHECK(image.VerifyMagic());
-
-    // Syzygy-instrumented binaries contain a PE image section named ".thunks",
-    // and all Syzygy-modified binaries contain the ".syzygy" image section.
-    // This is a very fast check, as it only looks at the image header.
-    if ((image.GetImageSectionHeaderByName(".thunks") != NULL) &&
-        (image.GetImageSectionHeaderByName(".syzygy") != NULL)) {
-      state = INSTRUMENTED_IMAGE;
-    } else {
-      state = NON_INSTRUMENTED_IMAGE;
-    }
-  }
-  DCHECK(state != UNINITIALIZED);
-
-  return state == INSTRUMENTED_IMAGE;
-}
-
-namespace {
-
-struct FunctionSearchContext {
-  const char* name;
-  FARPROC function;
-};
-
-// Callback function to PEImage::EnumImportChunks.
-bool FindResolutionFunctionInImports(
-    const base::win::PEImage &image, const char* module_name,
-    PIMAGE_THUNK_DATA unused_name_table, PIMAGE_THUNK_DATA import_address_table,
-    PVOID cookie) {
-  FunctionSearchContext* context =
-      reinterpret_cast<FunctionSearchContext*>(cookie);
-
-  DCHECK(context);
-  DCHECK(!context->function);
-
-  // Our import address table contains pointers to the functions we import
-  // at this point. Let's retrieve the first such function and use it to
-  // find the module this import was resolved to by the loader.
-  const wchar_t* function_in_module =
-      reinterpret_cast<const wchar_t*>(import_address_table->u1.Function);
-
-  // Retrieve the module by a function in the module.
-  const DWORD kFlags = GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
-                       GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT;
-  HMODULE module = NULL;
-  if (!::GetModuleHandleEx(kFlags, function_in_module, &module)) {
-    // This can happen if someone IAT patches us to a thunk.
-    return true;
-  }
-
-  // See whether this module exports the function we're looking for.
-  FARPROC exported_func = ::GetProcAddress(module, context->name);
-  if (exported_func != NULL) {
-    // We found it, return the function and terminate the enumeration.
-    context->function = exported_func;
-    return false;
-  }
-
-  // Keep going.
-  return true;
-}
-
-template <typename FunctionType>
-FunctionType FindFunctionInImports(const char* function_name) {
-  if (!IsBinaryInstrumented())
-    return NULL;
-
-  base::win::PEImage image(CURRENT_MODULE());
-
-  FunctionSearchContext ctx = { function_name, NULL };
-  image.EnumImportChunks(FindResolutionFunctionInImports, &ctx);
-
-  return reinterpret_cast<FunctionType>(ctx.function);
-}
-
-}  // namespace
-
-ReturnAddressLocationResolver GetProfilerReturnAddrResolutionFunc() {
-  return FindFunctionInImports<ReturnAddressLocationResolver>(
-      "ResolveReturnAddressLocation");
-}
-
-DynamicFunctionEntryHook GetProfilerDynamicFunctionEntryHookFunc() {
-  return FindFunctionInImports<DynamicFunctionEntryHook>(
-      "OnDynamicFunctionEntry");
-}
-
-AddDynamicSymbol GetProfilerAddDynamicSymbolFunc() {
-  return FindFunctionInImports<AddDynamicSymbol>(
-      "AddDynamicSymbol");
-}
-
-MoveDynamicSymbol GetProfilerMoveDynamicSymbolFunc() {
-  return FindFunctionInImports<MoveDynamicSymbol>(
-      "MoveDynamicSymbol");
-}
-
-#endif  // defined(OS_WIN)
-
-}  // namespace debug
-}  // namespace base
diff --git a/base/debug/profiler.h b/base/debug/profiler.h
deleted file mode 100644
index ea81b13..0000000
--- a/base/debug/profiler.h
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_DEBUG_PROFILER_H_
-#define BASE_DEBUG_PROFILER_H_
-
-#include <stddef.h>
-
-#include <string>
-
-#include "base/base_export.h"
-
-// The Profiler functions allow usage of the underlying sampling based
-// profiler. If the application has not been built with the necessary
-// flags (-DENABLE_PROFILING and not -DNO_TCMALLOC) then these functions
-// are noops.
-namespace base {
-namespace debug {
-
-// Start profiling with the supplied name.
-// {pid} will be replaced by the process' pid and {count} will be replaced
-// by the count of the profile run (starts at 1 with each process).
-BASE_EXPORT void StartProfiling(const std::string& name);
-
-// Stop profiling and write out data.
-BASE_EXPORT void StopProfiling();
-
-// Force data to be written to file.
-BASE_EXPORT void FlushProfiling();
-
-// Returns true if process is being profiled.
-BASE_EXPORT bool BeingProfiled();
-
-// Reset profiling after a fork, which disables timers.
-BASE_EXPORT void RestartProfilingAfterFork();
-
-// Returns true iff this executable is instrumented with the Syzygy profiler.
-BASE_EXPORT bool IsBinaryInstrumented();
-
-// Returns true iff this executable supports profiling.
-BASE_EXPORT bool IsProfilingSupported();
-
-// There's a class of profilers that use "return address swizzling" to get a
-// hook on function exits. This class of profilers uses some form of entry hook,
-// like e.g. binary instrumentation, or a compiler flag, that calls a hook each
-// time a function is invoked. The hook then switches the return address on the
-// stack for the address of an exit hook function, and pushes the original
-// return address to a shadow stack of some type. When in due course the CPU
-// executes a return to the exit hook, the exit hook will do whatever work it
-// does on function exit, then arrange to return to the original return address.
-// This class of profiler does not play well with programs that look at the
-// return address, as does e.g. V8. V8 uses the return address to certain
-// runtime functions to find the JIT code that called it, and from there finds
-// the V8 data structures associated to the JS function involved.
-// A return address resolution function is used to fix this. It allows such
-// programs to resolve a location on stack where a return address originally
-// resided, to the shadow stack location where the profiler stashed it.
-typedef uintptr_t (*ReturnAddressLocationResolver)(
-    uintptr_t return_addr_location);
-
-// This type declaration must match V8's FunctionEntryHook.
-typedef void (*DynamicFunctionEntryHook)(uintptr_t function,
-                                         uintptr_t return_addr_location);
-
-// The functions below here are to support profiling V8-generated code.
-// V8 has provisions for generating a call to an entry hook for newly generated
-// JIT code, and it can push symbol information on code generation and advise
-// when the garbage collector moves code. The functions declarations below here
-// make glue between V8's facilities and a profiler.
-
-// This type declaration must match V8's FunctionEntryHook.
-typedef void (*DynamicFunctionEntryHook)(uintptr_t function,
-                                         uintptr_t return_addr_location);
-
-typedef void (*AddDynamicSymbol)(const void* address,
-                                 size_t length,
-                                 const char* name,
-                                 size_t name_len);
-typedef void (*MoveDynamicSymbol)(const void* address, const void* new_address);
-
-
-// If this binary is instrumented and the instrumentation supplies a function
-// for each of those purposes, find and return the function in question.
-// Otherwise returns NULL.
-BASE_EXPORT ReturnAddressLocationResolver GetProfilerReturnAddrResolutionFunc();
-BASE_EXPORT DynamicFunctionEntryHook GetProfilerDynamicFunctionEntryHookFunc();
-BASE_EXPORT AddDynamicSymbol GetProfilerAddDynamicSymbolFunc();
-BASE_EXPORT MoveDynamicSymbol GetProfilerMoveDynamicSymbolFunc();
-
-}  // namespace debug
-}  // namespace base
-
-#endif  // BASE_DEBUG_PROFILER_H_
diff --git a/base/debug/stack_trace.cc b/base/debug/stack_trace.cc
index af4a6ef..ac0ead7 100644
--- a/base/debug/stack_trace.cc
+++ b/base/debug/stack_trace.cc
@@ -9,195 +9,17 @@
 #include <algorithm>
 #include <sstream>
 
-#include "base/logging.h"
 #include "base/macros.h"
 
-#if HAVE_TRACE_STACK_FRAME_POINTERS
-
-#if defined(OS_LINUX) || defined(OS_ANDROID)
+#if HAVE_TRACE_STACK_FRAME_POINTERS && defined(OS_ANDROID)
 #include <pthread.h>
 #include "base/process/process_handle.h"
 #include "base/threading/platform_thread.h"
 #endif
 
-#if defined(OS_MACOSX)
-#include <pthread.h>
-#endif
-
-#if defined(OS_LINUX) && defined(__GLIBC__)
-extern "C" void* __libc_stack_end;
-#endif
-
-#endif  // HAVE_TRACE_STACK_FRAME_POINTERS
-
 namespace base {
 namespace debug {
 
-namespace {
-
-#if HAVE_TRACE_STACK_FRAME_POINTERS
-
-#if defined(__arm__) && defined(__GNUC__) && !defined(__clang__)
-// GCC and LLVM generate slightly different frames on ARM, see
-// https://llvm.org/bugs/show_bug.cgi?id=18505 - LLVM generates
-// x86-compatible frame, while GCC needs adjustment.
-constexpr size_t kStackFrameAdjustment = sizeof(uintptr_t);
-#else
-constexpr size_t kStackFrameAdjustment = 0;
-#endif
-
-uintptr_t GetNextStackFrame(uintptr_t fp) {
-  return reinterpret_cast<const uintptr_t*>(fp)[0] - kStackFrameAdjustment;
-}
-
-uintptr_t GetStackFramePC(uintptr_t fp) {
-  return reinterpret_cast<const uintptr_t*>(fp)[1];
-}
-
-bool IsStackFrameValid(uintptr_t fp, uintptr_t prev_fp, uintptr_t stack_end) {
-  // With the stack growing downwards, older stack frame must be
-  // at a greater address that the current one.
-  if (fp <= prev_fp) return false;
-
-  // Assume huge stack frames are bogus.
-  if (fp - prev_fp > 100000) return false;
-
-  // Check alignment.
-  if (fp & (sizeof(uintptr_t) - 1)) return false;
-
-  if (stack_end) {
-    // Both fp[0] and fp[1] must be within the stack.
-    if (fp > stack_end - 2 * sizeof(uintptr_t)) return false;
-
-    // Additional check to filter out false positives.
-    if (GetStackFramePC(fp) < 32768) return false;
-  }
-
-  return true;
-};
-
-// ScanStackForNextFrame() scans the stack for a valid frame to allow unwinding
-// past system libraries. Only supported on Linux where system libraries are
-// usually in the middle of the trace:
-//
-//   TraceStackFramePointers
-//   <more frames from Chrome>
-//   base::WorkSourceDispatch   <-- unwinding stops (next frame is invalid),
-//   g_main_context_dispatch        ScanStackForNextFrame() is called
-//   <more frames from glib>
-//   g_main_context_iteration
-//   base::MessagePumpGlib::Run <-- ScanStackForNextFrame() finds valid frame,
-//   base::RunLoop::Run             unwinding resumes
-//   <more frames from Chrome>
-//   __libc_start_main
-//
-// For stack scanning to be efficient it's very important for the thread to
-// be started by Chrome. In that case we naturally terminate unwinding once
-// we reach the origin of the stack (i.e. GetStackEnd()). If the thread is
-// not started by Chrome (e.g. Android's main thread), then we end up always
-// scanning area at the origin of the stack, wasting time and not finding any
-// frames (since Android libraries don't have frame pointers).
-//
-// ScanStackForNextFrame() returns 0 if it couldn't find a valid frame
-// (or if stack scanning is not supported on the current platform).
-uintptr_t ScanStackForNextFrame(uintptr_t fp, uintptr_t stack_end) {
-#if defined(OS_LINUX)
-  // Enough to resume almost all prematurely terminated traces.
-  constexpr size_t kMaxStackScanArea = 8192;
-
-  if (!stack_end) {
-    // Too dangerous to scan without knowing where the stack ends.
-    return 0;
-  }
-
-  fp += sizeof(uintptr_t);  // current frame is known to be invalid
-  uintptr_t last_fp_to_scan = std::min(fp + kMaxStackScanArea, stack_end) -
-                                  sizeof(uintptr_t);
-  for (;fp <= last_fp_to_scan; fp += sizeof(uintptr_t)) {
-    uintptr_t next_fp = GetNextStackFrame(fp);
-    if (IsStackFrameValid(next_fp, fp, stack_end)) {
-      // Check two frames deep. Since stack frame is just a pointer to
-      // a higher address on the stack, it's relatively easy to find
-      // something that looks like one. However two linked frames are
-      // far less likely to be bogus.
-      uintptr_t next2_fp = GetNextStackFrame(next_fp);
-      if (IsStackFrameValid(next2_fp, next_fp, stack_end)) {
-        return fp;
-      }
-    }
-  }
-#endif  // defined(OS_LINUX)
-
-  return 0;
-}
-
-// Links stack frame |fp| to |parent_fp|, so that during stack unwinding
-// TraceStackFramePointers() visits |parent_fp| after visiting |fp|.
-// Both frame pointers must come from __builtin_frame_address().
-// Returns previous stack frame |fp| was linked to.
-void* LinkStackFrames(void* fpp, void* parent_fp) {
-  uintptr_t fp = reinterpret_cast<uintptr_t>(fpp) - kStackFrameAdjustment;
-  void* prev_parent_fp = reinterpret_cast<void**>(fp)[0];
-  reinterpret_cast<void**>(fp)[0] = parent_fp;
-  return prev_parent_fp;
-}
-
-#endif  // HAVE_TRACE_STACK_FRAME_POINTERS
-
-}  // namespace
-
-#if HAVE_TRACE_STACK_FRAME_POINTERS
-uintptr_t GetStackEnd() {
-#if defined(OS_ANDROID)
-  // Bionic reads proc/maps on every call to pthread_getattr_np() when called
-  // from the main thread. So we need to cache end of stack in that case to get
-  // acceptable performance.
-  // For all other threads pthread_getattr_np() is fast enough as it just reads
-  // values from its pthread_t argument.
-  static uintptr_t main_stack_end = 0;
-
-  bool is_main_thread = GetCurrentProcId() == PlatformThread::CurrentId();
-  if (is_main_thread && main_stack_end) {
-    return main_stack_end;
-  }
-
-  uintptr_t stack_begin = 0;
-  size_t stack_size = 0;
-  pthread_attr_t attributes;
-  int error = pthread_getattr_np(pthread_self(), &attributes);
-  if (!error) {
-    error = pthread_attr_getstack(
-        &attributes, reinterpret_cast<void**>(&stack_begin), &stack_size);
-    pthread_attr_destroy(&attributes);
-  }
-  DCHECK(!error);
-
-  uintptr_t stack_end = stack_begin + stack_size;
-  if (is_main_thread) {
-    main_stack_end = stack_end;
-  }
-  return stack_end;  // 0 in case of error
-
-#elif defined(OS_LINUX) && defined(__GLIBC__)
-
-  if (GetCurrentProcId() == PlatformThread::CurrentId()) {
-    // For the main thread we have a shortcut.
-    return reinterpret_cast<uintptr_t>(__libc_stack_end);
-  }
-
-// No easy way to get end of the stack for non-main threads,
-// see crbug.com/617730.
-#elif defined(OS_MACOSX)
-  return reinterpret_cast<uintptr_t>(pthread_get_stackaddr_np(pthread_self()));
-#endif
-
-  // Don't know how to get end of the stack.
-  return 0;
-}
-#endif  // HAVE_TRACE_STACK_FRAME_POINTERS
-
-StackTrace::StackTrace() : StackTrace(arraysize(trace_)) {}
-
 StackTrace::StackTrace(const void* const* trace, size_t count) {
   count = std::min(count, arraysize(trace_));
   if (count)
@@ -205,6 +27,9 @@
   count_ = count;
 }
 
+StackTrace::~StackTrace() {
+}
+
 const void *const *StackTrace::Addresses(size_t* count) const {
   *count = count_;
   if (count_)
@@ -222,55 +47,100 @@
 
 #if HAVE_TRACE_STACK_FRAME_POINTERS
 
+#if defined(OS_ANDROID)
+
+static uintptr_t GetStackEnd() {
+  // Bionic reads proc/maps on every call to pthread_getattr_np() when called
+  // from the main thread. So we need to cache end of stack in that case to get
+  // acceptable performance.
+  // For all other threads pthread_getattr_np() is fast enough as it just reads
+  // values from its pthread_t argument.
+  static uintptr_t main_stack_end = 0;
+
+  bool is_main_thread = GetCurrentProcId() == PlatformThread::CurrentId();
+
+  if (is_main_thread && main_stack_end) {
+    return main_stack_end;
+  }
+
+  uintptr_t stack_begin = 0;
+  size_t stack_size = 0;
+  pthread_attr_t attributes;
+  int error = pthread_getattr_np(pthread_self(), &attributes);
+  if (!error) {
+    error = pthread_attr_getstack(
+        &attributes,
+        reinterpret_cast<void**>(&stack_begin),
+        &stack_size);
+    pthread_attr_destroy(&attributes);
+  }
+  DCHECK(!error);
+
+  uintptr_t stack_end = stack_begin + stack_size;
+  if (is_main_thread) {
+    main_stack_end = stack_end;
+  }
+  return stack_end;
+}
+
+#endif  // defined(OS_ANDROID)
+
 size_t TraceStackFramePointers(const void** out_trace,
                                size_t max_depth,
                                size_t skip_initial) {
   // Usage of __builtin_frame_address() enables frame pointers in this
-  // function even if they are not enabled globally. So 'fp' will always
+  // function even if they are not enabled globally. So 'sp' will always
   // be valid.
-  uintptr_t fp = reinterpret_cast<uintptr_t>(__builtin_frame_address(0)) -
-                    kStackFrameAdjustment;
+  uintptr_t sp = reinterpret_cast<uintptr_t>(__builtin_frame_address(0));
 
+#if defined(OS_ANDROID)
   uintptr_t stack_end = GetStackEnd();
+#endif
 
   size_t depth = 0;
   while (depth < max_depth) {
+#if defined(__arm__) && defined(__GNUC__) && !defined(__clang__)
+    // GCC and LLVM generate slightly different frames on ARM, see
+    // https://llvm.org/bugs/show_bug.cgi?id=18505 - LLVM generates
+    // x86-compatible frame, while GCC needs adjustment.
+    sp -= sizeof(uintptr_t);
+#endif
+
+#if defined(OS_ANDROID)
+    // Both sp[0] and s[1] must be valid.
+    if (sp + 2 * sizeof(uintptr_t) > stack_end) {
+      break;
+    }
+#endif
+
     if (skip_initial != 0) {
       skip_initial--;
     } else {
-      out_trace[depth++] = reinterpret_cast<const void*>(GetStackFramePC(fp));
+      out_trace[depth++] = reinterpret_cast<const void**>(sp)[1];
     }
 
-    uintptr_t next_fp = GetNextStackFrame(fp);
-    if (IsStackFrameValid(next_fp, fp, stack_end)) {
-      fp = next_fp;
-      continue;
-    }
+    // Find out next frame pointer
+    // (heuristics are from TCMalloc's stacktrace functions)
+    {
+      uintptr_t next_sp = reinterpret_cast<const uintptr_t*>(sp)[0];
 
-    next_fp = ScanStackForNextFrame(fp, stack_end);
-    if (next_fp) {
-      fp = next_fp;
-      continue;
-    }
+      // With the stack growing downwards, older stack frame must be
+      // at a greater address that the current one.
+      if (next_sp <= sp) break;
 
-    // Failed to find next frame.
-    break;
+      // Assume stack frames larger than 100,000 bytes are bogus.
+      if (next_sp - sp > 100000) break;
+
+      // Check alignment.
+      if (sp & (sizeof(void*) - 1)) break;
+
+      sp = next_sp;
+    }
   }
 
   return depth;
 }
 
-ScopedStackFrameLinker::ScopedStackFrameLinker(void* fp, void* parent_fp)
-    : fp_(fp),
-      parent_fp_(parent_fp),
-      original_parent_fp_(LinkStackFrames(fp, parent_fp)) {}
-
-ScopedStackFrameLinker::~ScopedStackFrameLinker() {
-  void* previous_parent_fp = LinkStackFrames(fp_, original_parent_fp_);
-  CHECK_EQ(parent_fp_, previous_parent_fp)
-      << "Stack frame's parent pointer has changed!";
-}
-
 #endif  // HAVE_TRACE_STACK_FRAME_POINTERS
 
 }  // namespace debug
diff --git a/base/debug/stack_trace.h b/base/debug/stack_trace.h
index 4c9b73e..23e7b51 100644
--- a/base/debug/stack_trace.h
+++ b/base/debug/stack_trace.h
@@ -11,7 +11,6 @@
 #include <string>
 
 #include "base/base_export.h"
-#include "base/macros.h"
 #include "build/build_config.h"
 
 #if defined(OS_POSIX)
@@ -45,11 +44,6 @@
 // done in official builds because it has security implications).
 BASE_EXPORT bool EnableInProcessStackDumping();
 
-// Returns end of the stack, or 0 if we couldn't get it.
-#if HAVE_TRACE_STACK_FRAME_POINTERS
-BASE_EXPORT uintptr_t GetStackEnd();
-#endif
-
 // A stacktrace can be helpful in debugging. For example, you can include a
 // stacktrace member in a object (probably around #ifndef NDEBUG) so that you
 // can later see where the given object was created from.
@@ -58,13 +52,9 @@
   // Creates a stacktrace from the current location.
   StackTrace();
 
-  // Creates a stacktrace from the current location, of up to |count| entries.
-  // |count| will be limited to at most |kMaxTraces|.
-  explicit StackTrace(size_t count);
-
   // Creates a stacktrace from an existing array of instruction
   // pointers (such as returned by Addresses()).  |count| will be
-  // limited to at most |kMaxTraces|.
+  // trimmed to |kMaxTraces|.
   StackTrace(const void* const* trace, size_t count);
 
 #if defined(OS_WIN)
@@ -77,6 +67,8 @@
 
   // Copying and assignment are allowed with the default functions.
 
+  ~StackTrace();
+
   // Gets an array of instruction pointer values. |*count| will be set to the
   // number of elements in the returned array.
   const void* const* Addresses(size_t* count) const;
@@ -121,57 +113,6 @@
 BASE_EXPORT size_t TraceStackFramePointers(const void** out_trace,
                                            size_t max_depth,
                                            size_t skip_initial);
-
-// Links stack frame |fp| to |parent_fp|, so that during stack unwinding
-// TraceStackFramePointers() visits |parent_fp| after visiting |fp|.
-// Both frame pointers must come from __builtin_frame_address().
-// Destructor restores original linkage of |fp| to avoid corrupting caller's
-// frame register on return.
-//
-// This class can be used to repair broken stack frame chain in cases
-// when execution flow goes into code built without frame pointers:
-//
-// void DoWork() {
-//   Call_SomeLibrary();
-// }
-// static __thread void*  g_saved_fp;
-// void Call_SomeLibrary() {
-//   g_saved_fp = __builtin_frame_address(0);
-//   some_library_call(...); // indirectly calls SomeLibrary_Callback()
-// }
-// void SomeLibrary_Callback() {
-//   ScopedStackFrameLinker linker(__builtin_frame_address(0), g_saved_fp);
-//   ...
-//   TraceStackFramePointers(...);
-// }
-//
-// This produces the following trace:
-//
-// #0 SomeLibrary_Callback()
-// #1 <address of the code inside SomeLibrary that called #0>
-// #2 DoWork()
-// ...rest of the trace...
-//
-// SomeLibrary doesn't use frame pointers, so when SomeLibrary_Callback()
-// is called, stack frame register contains bogus value that becomes callback'
-// parent frame address. Without ScopedStackFrameLinker unwinding would've
-// stopped at that bogus frame address yielding just two first frames (#0, #1).
-// ScopedStackFrameLinker overwrites callback's parent frame address with
-// Call_SomeLibrary's frame, so unwinder produces full trace without even
-// noticing that stack frame chain was broken.
-class BASE_EXPORT ScopedStackFrameLinker {
- public:
-  ScopedStackFrameLinker(void* fp, void* parent_fp);
-  ~ScopedStackFrameLinker();
-
- private:
-  void* fp_;
-  void* parent_fp_;
-  void* original_parent_fp_;
-
-  DISALLOW_COPY_AND_ASSIGN(ScopedStackFrameLinker);
-};
-
 #endif  // HAVE_TRACE_STACK_FRAME_POINTERS
 
 namespace internal {
diff --git a/base/debug/stack_trace_posix.cc b/base/debug/stack_trace_posix.cc
index 78bc650..3c0299c 100644
--- a/base/debug/stack_trace_posix.cc
+++ b/base/debug/stack_trace_posix.cc
@@ -16,14 +16,13 @@
 #include <sys/types.h>
 #include <unistd.h>
 
-#include <algorithm>
 #include <map>
 #include <memory>
 #include <ostream>
 #include <string>
 #include <vector>
 
-#if !defined(USE_SYMBOLIZE)
+#if defined(__GLIBCXX__)
 #include <cxxabi.h>
 #endif
 #if !defined(__UCLIBC__)
@@ -34,11 +33,8 @@
 #include <AvailabilityMacros.h>
 #endif
 
-#if defined(OS_LINUX)
-#include "base/debug/proc_maps_linux.h"
-#endif
-
 #include "base/debug/debugger.h"
+#include "base/debug/proc_maps_linux.h"
 #include "base/logging.h"
 #include "base/macros.h"
 #include "base/memory/free_deleter.h"
@@ -70,18 +66,16 @@
     "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
 #endif  // !defined(USE_SYMBOLIZE) && defined(__GLIBCXX__)
 
-#if !defined(USE_SYMBOLIZE) && !defined(__UCLIBC__)
+#if !defined(USE_SYMBOLIZE)
 // Demangles C++ symbols in the given text. Example:
 //
 // "out/Debug/base_unittests(_ZN10StackTraceC1Ev+0x20) [0x817778c]"
 // =>
 // "out/Debug/base_unittests(StackTrace::StackTrace()+0x20) [0x817778c]"
+#if defined(__GLIBCXX__) && !defined(__UCLIBC__)
 void DemangleSymbols(std::string* text) {
   // Note: code in this function is NOT async-signal safe (std::string uses
   // malloc internally).
-  ALLOW_UNUSED_PARAM(text);
-#if defined(__GLIBCXX__) && !defined(__UCLIBC__)
-
   std::string::size_type search_from = 0;
   while (search_from < text->size()) {
     // Look for the start of a mangled symbol, from search_from.
@@ -116,8 +110,11 @@
       search_from = mangled_start + 2;
     }
   }
-#endif  // defined(__GLIBCXX__) && !defined(__UCLIBC__)
 }
+#elif !defined(__UCLIBC__)
+void DemangleSymbols(std::string* /* text */) {}
+#endif  // defined(__GLIBCXX__) && !defined(__UCLIBC__)
+
 #endif  // !defined(USE_SYMBOLIZE)
 
 class BacktraceOutputHandler {
@@ -128,7 +125,7 @@
   virtual ~BacktraceOutputHandler() {}
 };
 
-#if !defined(__UCLIBC__)
+#if defined(USE_SYMBOLIZE) || !defined(__UCLIBC__)
 void OutputPointer(void* pointer, BacktraceOutputHandler* handler) {
   // This should be more than enough to store a 64-bit number in hex:
   // 16 hex digits + 1 for null-terminator.
@@ -138,6 +135,7 @@
                    buf, sizeof(buf), 16, 12);
   handler->HandleOutput(buf);
 }
+#endif  // defined(USE_SYMBOLIZE) ||  !defined(__UCLIBC__)
 
 #if defined(USE_SYMBOLIZE)
 void OutputFrameId(intptr_t frame_id, BacktraceOutputHandler* handler) {
@@ -151,9 +149,13 @@
 }
 #endif  // defined(USE_SYMBOLIZE)
 
-void ProcessBacktrace(void *const *trace,
+#if !defined(__UCLIBC__)
+void ProcessBacktrace(void *const * trace,
                       size_t size,
                       BacktraceOutputHandler* handler) {
+  (void)trace;  // unused based on build context below.
+  (void)size;  // unusud based on build context below.
+  (void)handler;  // unused based on build context below.
   // NOTE: This code MUST be async-signal safe (it's used by in-process
   // stack dumping signal handler). NO malloc or stdio is allowed here.
 
@@ -214,7 +216,7 @@
 }
 
 void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
-  ALLOW_UNUSED_PARAM(void_context);  // unused depending on build context
+  (void)void_context;  // unused depending on build context
   // NOTE: This code MUST be async-signal safe.
   // NO malloc or stdio is allowed here.
 
@@ -384,7 +386,6 @@
   // Non-Mac OSes should probably reraise the signal as well, but the Linux
   // sandbox tests break on CrOS devices.
   // https://code.google.com/p/chromium/issues/detail?id=551681
-  PrintToStderr("Calling _exit(1). Core file will not be generated.\n");
   _exit(1);
 #endif  // defined(OS_MACOSX) && !defined(OS_IOS)
 }
@@ -449,6 +450,8 @@
   StackTrace stack_trace;
 }
 
+}  // namespace
+
 #if defined(USE_SYMBOLIZE)
 
 // class SandboxSymbolizeHelper.
@@ -464,8 +467,7 @@
  public:
   // Returns the singleton instance.
   static SandboxSymbolizeHelper* GetInstance() {
-    return Singleton<SandboxSymbolizeHelper,
-                     LeakySingletonTraits<SandboxSymbolizeHelper>>::get();
+    return Singleton<SandboxSymbolizeHelper>::get();
   }
 
  private:
@@ -681,8 +683,6 @@
 };
 #endif  // USE_SYMBOLIZE
 
-}  // namespace
-
 bool EnableInProcessStackDumping() {
 #if defined(USE_SYMBOLIZE)
   SandboxSymbolizeHelper::GetInstance();
@@ -719,18 +719,15 @@
   return success;
 }
 
-StackTrace::StackTrace(size_t count) {
-// NOTE: This code MUST be async-signal safe (it's used by in-process
-// stack dumping signal handler). NO malloc or stdio is allowed here.
+StackTrace::StackTrace() {
+  // NOTE: This code MUST be async-signal safe (it's used by in-process
+  // stack dumping signal handler). NO malloc or stdio is allowed here.
 
 #if !defined(__UCLIBC__)
-  count = std::min(arraysize(trace_), count);
-
   // Though the backtrace API man page does not list any possible negative
   // return values, we take no chance.
-  count_ = base::saturated_cast<size_t>(backtrace(trace_, count));
+  count_ = base::saturated_cast<size_t>(backtrace(trace_, arraysize(trace_)));
 #else
-  ALLOW_UNUSED_PARAM(count);
   count_ = 0;
 #endif
 }
diff --git a/base/debug/task_annotator.cc b/base/debug/task_annotator.cc
index 46969f2..4ba4d91 100644
--- a/base/debug/task_annotator.cc
+++ b/base/debug/task_annotator.cc
@@ -4,9 +4,6 @@
 
 #include "base/debug/task_annotator.h"
 
-#include <array>
-
-#include "base/debug/activity_tracker.h"
 #include "base/debug/alias.h"
 #include "base/pending_task.h"
 #include "base/trace_event/trace_event.h"
@@ -30,37 +27,32 @@
 }
 
 void TaskAnnotator::RunTask(const char* queue_function,
-                            PendingTask* pending_task) {
-  ScopedTaskRunActivity task_activity(*pending_task);
-
+                            const PendingTask& pending_task) {
   tracked_objects::TaskStopwatch stopwatch;
   stopwatch.Start();
   tracked_objects::Duration queue_duration =
-      stopwatch.StartTime() - pending_task->EffectiveTimePosted();
+      stopwatch.StartTime() - pending_task.EffectiveTimePosted();
 
-  TRACE_EVENT_WITH_FLOW1(
-      TRACE_DISABLED_BY_DEFAULT("toplevel.flow"), queue_function,
-      TRACE_ID_MANGLE(GetTaskTraceID(*pending_task)), TRACE_EVENT_FLAG_FLOW_IN,
-      "queue_duration", queue_duration.InMilliseconds());
+  TRACE_EVENT_WITH_FLOW1(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
+                          queue_function,
+                          TRACE_ID_MANGLE(GetTaskTraceID(pending_task)),
+                          TRACE_EVENT_FLAG_FLOW_IN,
+                          "queue_duration",
+                          queue_duration.InMilliseconds());
 
-  // Before running the task, store the task backtrace with the chain of
-  // PostTasks that resulted in this call and deliberately alias it to ensure
-  // it is on the stack if the task crashes. Be careful not to assume that the
-  // variable itself will have the expected value when displayed by the
-  // optimizer in an optimized build. Look at a memory dump of the stack.
-  static constexpr int kStackTaskTraceSnapshotSize =
-      std::tuple_size<decltype(pending_task->task_backtrace)>::value + 1;
-  std::array<const void*, kStackTaskTraceSnapshotSize> task_backtrace;
-  task_backtrace[0] = pending_task->posted_from.program_counter();
-  std::copy(pending_task->task_backtrace.begin(),
-            pending_task->task_backtrace.end(), task_backtrace.begin() + 1);
-  debug::Alias(&task_backtrace);
+  // Before running the task, store the program counter where it was posted
+  // and deliberately alias it to ensure it is on the stack if the task
+  // crashes. Be careful not to assume that the variable itself will have the
+  // expected value when displayed by the optimizer in an optimized build.
+  // Look at a memory dump of the stack.
+  const void* program_counter = pending_task.posted_from.program_counter();
+  debug::Alias(&program_counter);
 
-  std::move(pending_task->task).Run();
+  pending_task.task.Run();
 
   stopwatch.Stop();
-  tracked_objects::ThreadData::TallyRunOnNamedThreadIfTracking(*pending_task,
-                                                               stopwatch);
+  tracked_objects::ThreadData::TallyRunOnNamedThreadIfTracking(
+      pending_task, stopwatch);
 }
 
 uint64_t TaskAnnotator::GetTaskTraceID(const PendingTask& task) const {
diff --git a/base/debug/task_annotator.h b/base/debug/task_annotator.h
index 34115d8..2687c5c 100644
--- a/base/debug/task_annotator.h
+++ b/base/debug/task_annotator.h
@@ -28,7 +28,7 @@
 
   // Run a previously queued task. |queue_function| should match what was
   // passed into |DidQueueTask| for this task.
-  void RunTask(const char* queue_function, PendingTask* pending_task);
+  void RunTask(const char* queue_function, const PendingTask& pending_task);
 
  private:
   // Creates a process-wide unique ID to represent this task in trace events.
diff --git a/base/debug/task_annotator_unittest.cc b/base/debug/task_annotator_unittest.cc
index 8a1c8bd..9f5c442 100644
--- a/base/debug/task_annotator_unittest.cc
+++ b/base/debug/task_annotator_unittest.cc
@@ -24,7 +24,7 @@
   TaskAnnotator annotator;
   annotator.DidQueueTask("TaskAnnotatorTest::Queue", pending_task);
   EXPECT_EQ(0, result);
-  annotator.RunTask("TaskAnnotatorTest::Queue", &pending_task);
+  annotator.RunTask("TaskAnnotatorTest::Queue", pending_task);
   EXPECT_EQ(123, result);
 }
 
diff --git a/base/debug/thread_heap_usage_tracker.h b/base/debug/thread_heap_usage_tracker.h
deleted file mode 100644
index 508a0a3..0000000
--- a/base/debug/thread_heap_usage_tracker.h
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_DEBUG_THREAD_HEAP_USAGE_TRACKER_H_
-#define BASE_DEBUG_THREAD_HEAP_USAGE_TRACKER_H_
-
-#include <stdint.h>
-
-#include "base/allocator/features.h"
-#include "base/base_export.h"
-#include "base/threading/thread_checker.h"
-
-namespace base {
-namespace allocator {
-struct AllocatorDispatch;
-}  // namespace allocator
-
-namespace debug {
-
-// Used to store the heap allocator usage in a scope.
-struct ThreadHeapUsage {
-  // The cumulative number of allocation operations.
-  uint64_t alloc_ops;
-
-  // The cumulative number of allocated bytes. Where available, this is
-  // inclusive heap padding and estimated or actual heap overhead.
-  uint64_t alloc_bytes;
-
-  // Where available, cumulative number of heap padding and overhead bytes.
-  uint64_t alloc_overhead_bytes;
-
-  // The cumulative number of free operations.
-  uint64_t free_ops;
-
-  // The cumulative number of bytes freed.
-  // Only recorded if the underlying heap shim can return the size of an
-  // allocation.
-  uint64_t free_bytes;
-
-  // The maximal value of |alloc_bytes| - |free_bytes| seen for this thread.
-  // Only recorded if the underlying heap shim supports returning the size of
-  // an allocation.
-  uint64_t max_allocated_bytes;
-};
-
-// By keeping a tally on heap operations, it's possible to track:
-// - the number of alloc/free operations, where a realloc is zero or one
-//   of each, depending on the input parameters (see man realloc).
-// - the number of bytes allocated/freed.
-// - the number of estimated bytes of heap overhead used.
-// - the high-watermark amount of bytes allocated in the scope.
-// This in turn allows measuring the memory usage and memory usage churn over
-// a scope. Scopes must be cleanly nested, and each scope must be
-// destroyed on the thread where it's created.
-//
-// Note that this depends on the capabilities of the underlying heap shim. If
-// that shim can not yield a size estimate for an allocation, it's not possible
-// to keep track of overhead, freed bytes and the allocation high water mark.
-class BASE_EXPORT ThreadHeapUsageTracker {
- public:
-  ThreadHeapUsageTracker();
-  ~ThreadHeapUsageTracker();
-
-  // Start tracking heap usage on this thread.
-  // This may only be called on the thread where the instance is created.
-  // Note IsHeapTrackingEnabled() must be true.
-  void Start();
-
-  // Stop tracking heap usage on this thread and store the usage tallied.
-  // If |usage_is_exclusive| is true, the usage tallied won't be added to the
-  // outer scope's usage. If |usage_is_exclusive| is false, the usage tallied
-  // in this scope will also tally to any outer scope.
-  // This may only be called on the thread where the instance is created.
-  void Stop(bool usage_is_exclusive);
-
-  // After Stop() returns the usage tallied from Start() to Stop().
-  const ThreadHeapUsage& usage() const { return usage_; }
-
-  // Returns this thread's heap usage from the start of the innermost
-  // enclosing ThreadHeapUsageTracker instance, if any.
-  static ThreadHeapUsage GetUsageSnapshot();
-
-  // Enables the heap intercept. May only be called once, and only if the heap
-  // shim is available, e.g. if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) is
-  // true.
-  static void EnableHeapTracking();
-
-  // Returns true iff heap tracking is enabled.
-  static bool IsHeapTrackingEnabled();
-
- protected:
-  // Exposed for testing only - note that it's safe to re-EnableHeapTracking()
-  // after calling this function in tests.
-  static void DisableHeapTrackingForTesting();
-
-  // Exposed for testing only.
-  static void EnsureTLSInitialized();
-
-  // Exposed to allow testing the shim without inserting it in the allocator
-  // shim chain.
-  static base::allocator::AllocatorDispatch* GetDispatchForTesting();
-
- private:
-  ThreadChecker thread_checker_;
-
-  // The heap usage at Start(), or the difference from Start() to Stop().
-  ThreadHeapUsage usage_;
-
-  // This thread's heap usage, non-null from Start() to Stop().
-  ThreadHeapUsage* thread_usage_;
-};
-
-}  // namespace debug
-}  // namespace base
-
-#endif  // BASE_DEBUG_THREAD_HEAP_USAGE_TRACKER_H_
\ No newline at end of file
diff --git a/base/feature_list.cc b/base/feature_list.cc
index 353136c..435165e 100644
--- a/base/feature_list.cc
+++ b/base/feature_list.cc
@@ -10,9 +10,7 @@
 #include <vector>
 
 #include "base/logging.h"
-#include "base/memory/ptr_util.h"
 #include "base/metrics/field_trial.h"
-#include "base/pickle.h"
 #include "base/strings/string_split.h"
 #include "base/strings/string_util.h"
 
@@ -28,42 +26,6 @@
 // Tracks whether the FeatureList instance was initialized via an accessor.
 bool g_initialized_from_accessor = false;
 
-// An allocator entry for a feature in shared memory. The FeatureEntry is
-// followed by a base::Pickle object that contains the feature and trial name.
-struct FeatureEntry {
-  // SHA1(FeatureEntry): Increment this if structure changes!
-  static constexpr uint32_t kPersistentTypeId = 0x06567CA6 + 1;
-
-  // Expected size for 32/64-bit check.
-  static constexpr size_t kExpectedInstanceSize = 8;
-
-  // Specifies whether a feature override enables or disables the feature. Same
-  // values as the OverrideState enum in feature_list.h
-  uint32_t override_state;
-
-  // Size of the pickled structure, NOT the total size of this entry.
-  uint32_t pickle_size;
-
-  // Reads the feature and trial name from the pickle. Calling this is only
-  // valid on an initialized entry that's in shared memory.
-  bool GetFeatureAndTrialName(StringPiece* feature_name,
-                              StringPiece* trial_name) const {
-    const char* src =
-        reinterpret_cast<const char*>(this) + sizeof(FeatureEntry);
-
-    Pickle pickle(src, pickle_size);
-    PickleIterator pickle_iter(pickle);
-
-    if (!pickle_iter.ReadStringPiece(feature_name))
-      return false;
-
-    // Return true because we are not guaranteed to have a trial name anyways.
-    auto sink = pickle_iter.ReadStringPiece(trial_name);
-    ALLOW_UNUSED_LOCAL(sink);
-    return true;
-  }
-};
-
 // Some characters are not allowed to appear in feature names or the associated
 // field trial names, as they are used as special characters for command-line
 // serialization. This function checks that the strings are ASCII (since they
@@ -93,26 +55,6 @@
   initialized_from_command_line_ = true;
 }
 
-void FeatureList::InitializeFromSharedMemory(
-    PersistentMemoryAllocator* allocator) {
-  DCHECK(!initialized_);
-
-  PersistentMemoryAllocator::Iterator iter(allocator);
-  const FeatureEntry* entry;
-  while ((entry = iter.GetNextOfObject<FeatureEntry>()) != nullptr) {
-    OverrideState override_state =
-        static_cast<OverrideState>(entry->override_state);
-
-    StringPiece feature_name;
-    StringPiece trial_name;
-    if (!entry->GetFeatureAndTrialName(&feature_name, &trial_name))
-      continue;
-
-    FieldTrial* trial = FieldTrialList::Find(trial_name.as_string());
-    RegisterOverride(feature_name, override_state, trial);
-  }
-}
-
 bool FeatureList::IsFeatureOverriddenFromCommandLine(
     const std::string& feature_name,
     OverrideState state) const {
@@ -155,30 +97,6 @@
   RegisterOverride(feature_name, override_state, field_trial);
 }
 
-void FeatureList::AddFeaturesToAllocator(PersistentMemoryAllocator* allocator) {
-  DCHECK(initialized_);
-
-  for (const auto& override : overrides_) {
-    Pickle pickle;
-    pickle.WriteString(override.first);
-    if (override.second.field_trial)
-      pickle.WriteString(override.second.field_trial->trial_name());
-
-    size_t total_size = sizeof(FeatureEntry) + pickle.size();
-    FeatureEntry* entry = allocator->New<FeatureEntry>(total_size);
-    if (!entry)
-      return;
-
-    entry->override_state = override.second.overridden_state;
-    entry->pickle_size = pickle.size();
-
-    char* dst = reinterpret_cast<char*>(entry) + sizeof(FeatureEntry);
-    memcpy(dst, pickle.data(), pickle.size());
-
-    allocator->MakeIterable(entry);
-  }
-}
-
 void FeatureList::GetFeatureOverrides(std::string* enable_overrides,
                                       std::string* disable_overrides) {
   DCHECK(initialized_);
@@ -279,19 +197,10 @@
 }
 
 // static
-std::unique_ptr<FeatureList> FeatureList::ClearInstanceForTesting() {
-  FeatureList* old_instance = g_instance;
+void FeatureList::ClearInstanceForTesting() {
+  delete g_instance;
   g_instance = nullptr;
   g_initialized_from_accessor = false;
-  return base::WrapUnique(old_instance);
-}
-
-// static
-void FeatureList::RestoreInstanceForTesting(
-    std::unique_ptr<FeatureList> instance) {
-  DCHECK(!g_instance);
-  // Note: Intentional leak of global singleton.
-  g_instance = instance.release();
 }
 
 void FeatureList::FinalizeInitialization() {
diff --git a/base/feature_list.h b/base/feature_list.h
index 09e8408..e9ed00a 100644
--- a/base/feature_list.h
+++ b/base/feature_list.h
@@ -13,7 +13,6 @@
 #include "base/base_export.h"
 #include "base/gtest_prod_util.h"
 #include "base/macros.h"
-#include "base/metrics/persistent_memory_allocator.h"
 #include "base/strings/string_piece.h"
 #include "base/synchronization/lock.h"
 
@@ -32,8 +31,6 @@
 // for a given feature name - generally defined as a constant global variable or
 // file static.
 struct BASE_EXPORT Feature {
-  constexpr Feature(const char* name, FeatureState default_state)
-      : name(name), default_state(default_state) {}
   // The name of the feature. This should be unique to each feature and is used
   // for enabling/disabling features via command line flags and experiments.
   const char* const name;
@@ -95,11 +92,6 @@
   void InitializeFromCommandLine(const std::string& enable_features,
                                  const std::string& disable_features);
 
-  // Initializes feature overrides through the field trial allocator, which
-  // we're using to store the feature names, their override state, and the name
-  // of the associated field trial.
-  void InitializeFromSharedMemory(PersistentMemoryAllocator* allocator);
-
   // Specifies whether a feature override enables or disables the feature.
   enum OverrideState {
     OVERRIDE_USE_DEFAULT,
@@ -132,9 +124,6 @@
                                   OverrideState override_state,
                                   FieldTrial* field_trial);
 
-  // Loops through feature overrides and serializes them all into |allocator|.
-  void AddFeaturesToAllocator(PersistentMemoryAllocator* allocator);
-
   // Returns comma-separated lists of feature names (in the same format that is
   // accepted by InitializeFromCommandLine()) corresponding to features that
   // have been overridden - either through command-line or via FieldTrials. For
@@ -174,27 +163,13 @@
 
   // Registers the given |instance| to be the singleton feature list for this
   // process. This should only be called once and |instance| must not be null.
-  // Note: If you are considering using this for the purposes of testing, take
-  // a look at using base/test/scoped_feature_list.h instead.
   static void SetInstance(std::unique_ptr<FeatureList> instance);
 
-  // Clears the previously-registered singleton instance for tests and returns
-  // the old instance.
-  // Note: Most tests should never call this directly. Instead consider using
-  // base::test::ScopedFeatureList.
-  static std::unique_ptr<FeatureList> ClearInstanceForTesting();
-
-  // Sets a given (initialized) |instance| to be the singleton feature list,
-  // for testing. Existing instance must be null. This is primarily intended
-  // to support base::test::ScopedFeatureList helper class.
-  static void RestoreInstanceForTesting(std::unique_ptr<FeatureList> instance);
+  // Clears the previously-registered singleton instance for tests.
+  static void ClearInstanceForTesting();
 
  private:
   FRIEND_TEST_ALL_PREFIXES(FeatureListTest, CheckFeatureIdentity);
-  FRIEND_TEST_ALL_PREFIXES(FeatureListTest,
-                           StoreAndRetrieveFeaturesFromSharedMemory);
-  FRIEND_TEST_ALL_PREFIXES(FeatureListTest,
-                           StoreAndRetrieveAssociatedFeaturesFromSharedMemory);
 
   struct OverrideEntry {
     // The overridden enable (on/off) state of the feature.
diff --git a/base/feature_list_unittest.cc b/base/feature_list_unittest.cc
index fb3b320..9d1dcb7 100644
--- a/base/feature_list_unittest.cc
+++ b/base/feature_list_unittest.cc
@@ -13,7 +13,6 @@
 #include "base/macros.h"
 #include "base/memory/ptr_util.h"
 #include "base/metrics/field_trial.h"
-#include "base/metrics/persistent_memory_allocator.h"
 #include "base/strings/string_util.h"
 #include "base/strings/stringprintf.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -22,12 +21,12 @@
 
 namespace {
 
-constexpr char kFeatureOnByDefaultName[] = "OnByDefault";
+const char kFeatureOnByDefaultName[] = "OnByDefault";
 struct Feature kFeatureOnByDefault {
   kFeatureOnByDefaultName, FEATURE_ENABLED_BY_DEFAULT
 };
 
-constexpr char kFeatureOffByDefaultName[] = "OffByDefault";
+const char kFeatureOffByDefaultName[] = "OffByDefault";
 struct Feature kFeatureOffByDefault {
   kFeatureOffByDefaultName, FEATURE_DISABLED_BY_DEFAULT
 };
@@ -469,68 +468,4 @@
   EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
 }
 
-TEST_F(FeatureListTest, StoreAndRetrieveFeaturesFromSharedMemory) {
-  std::unique_ptr<base::FeatureList> feature_list(new base::FeatureList);
-
-  // Create some overrides.
-  feature_list->RegisterOverride(kFeatureOffByDefaultName,
-                                 FeatureList::OVERRIDE_ENABLE_FEATURE, nullptr);
-  feature_list->RegisterOverride(
-      kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE, nullptr);
-  feature_list->FinalizeInitialization();
-
-  // Create an allocator and store the overrides.
-  std::unique_ptr<SharedMemory> shm(new SharedMemory());
-  shm->CreateAndMapAnonymous(4 << 10);
-  SharedPersistentMemoryAllocator allocator(std::move(shm), 1, "", false);
-  feature_list->AddFeaturesToAllocator(&allocator);
-
-  std::unique_ptr<base::FeatureList> feature_list2(new base::FeatureList);
-
-  // Check that the new feature list is empty.
-  EXPECT_FALSE(feature_list2->IsFeatureOverriddenFromCommandLine(
-      kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
-  EXPECT_FALSE(feature_list2->IsFeatureOverriddenFromCommandLine(
-      kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
-
-  feature_list2->InitializeFromSharedMemory(&allocator);
-  // Check that the new feature list now has 2 overrides.
-  EXPECT_TRUE(feature_list2->IsFeatureOverriddenFromCommandLine(
-      kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
-  EXPECT_TRUE(feature_list2->IsFeatureOverriddenFromCommandLine(
-      kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
-}
-
-TEST_F(FeatureListTest, StoreAndRetrieveAssociatedFeaturesFromSharedMemory) {
-  FieldTrialList field_trial_list(nullptr);
-  std::unique_ptr<base::FeatureList> feature_list(new base::FeatureList);
-
-  // Create some overrides.
-  FieldTrial* trial1 = FieldTrialList::CreateFieldTrial("TrialExample1", "A");
-  FieldTrial* trial2 = FieldTrialList::CreateFieldTrial("TrialExample2", "B");
-  feature_list->RegisterFieldTrialOverride(
-      kFeatureOnByDefaultName, FeatureList::OVERRIDE_USE_DEFAULT, trial1);
-  feature_list->RegisterFieldTrialOverride(
-      kFeatureOffByDefaultName, FeatureList::OVERRIDE_USE_DEFAULT, trial2);
-  feature_list->FinalizeInitialization();
-
-  // Create an allocator and store the overrides.
-  std::unique_ptr<SharedMemory> shm(new SharedMemory());
-  shm->CreateAndMapAnonymous(4 << 10);
-  SharedPersistentMemoryAllocator allocator(std::move(shm), 1, "", false);
-  feature_list->AddFeaturesToAllocator(&allocator);
-
-  std::unique_ptr<base::FeatureList> feature_list2(new base::FeatureList);
-  feature_list2->InitializeFromSharedMemory(&allocator);
-  feature_list2->FinalizeInitialization();
-
-  // Check that the field trials are still associated.
-  FieldTrial* associated_trial1 =
-      feature_list2->GetAssociatedFieldTrial(kFeatureOnByDefault);
-  FieldTrial* associated_trial2 =
-      feature_list2->GetAssociatedFieldTrial(kFeatureOffByDefault);
-  EXPECT_EQ(associated_trial1, trial1);
-  EXPECT_EQ(associated_trial2, trial2);
-}
-
 }  // namespace base
diff --git a/base/file_version_info_unittest.cc b/base/file_version_info_unittest.cc
new file mode 100644
index 0000000..67edc77
--- /dev/null
+++ b/base/file_version_info_unittest.cc
@@ -0,0 +1,144 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/file_version_info.h"
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/files/file_path.h"
+#include "base/macros.h"
+
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_WIN)
+#include "base/path_service.h"
+#include "base/file_version_info_win.h"
+#endif
+
+using base::FilePath;
+
+namespace {
+
+#if defined(OS_WIN)
+FilePath GetTestDataPath() {
+  FilePath path;
+  PathService::Get(base::DIR_SOURCE_ROOT, &path);
+  path = path.AppendASCII("base");
+  path = path.AppendASCII("test");
+  path = path.AppendASCII("data");
+  path = path.AppendASCII("file_version_info_unittest");
+  return path;
+}
+#endif
+
+}  // namespace
+
+#if defined(OS_WIN)
+TEST(FileVersionInfoTest, HardCodedProperties) {
+  const wchar_t kDLLName[] = {L"FileVersionInfoTest1.dll"};
+
+  const wchar_t* const kExpectedValues[15] = {
+      // FileVersionInfoTest.dll
+      L"Goooooogle",                                  // company_name
+      L"Google",                                      // company_short_name
+      L"This is the product name",                    // product_name
+      L"This is the product short name",              // product_short_name
+      L"The Internal Name",                           // internal_name
+      L"4.3.2.1",                                     // product_version
+      L"Private build property",                      // private_build
+      L"Special build property",                      // special_build
+      L"This is a particularly interesting comment",  // comments
+      L"This is the original filename",               // original_filename
+      L"This is my file description",                 // file_description
+      L"1.2.3.4",                                     // file_version
+      L"This is the legal copyright",                 // legal_copyright
+      L"This is the legal trademarks",                // legal_trademarks
+      L"This is the last change",                     // last_change
+  };
+
+  FilePath dll_path = GetTestDataPath();
+  dll_path = dll_path.Append(kDLLName);
+
+  std::unique_ptr<FileVersionInfo> version_info(
+      FileVersionInfo::CreateFileVersionInfo(dll_path));
+
+  int j = 0;
+  EXPECT_EQ(kExpectedValues[j++], version_info->company_name());
+  EXPECT_EQ(kExpectedValues[j++], version_info->company_short_name());
+  EXPECT_EQ(kExpectedValues[j++], version_info->product_name());
+  EXPECT_EQ(kExpectedValues[j++], version_info->product_short_name());
+  EXPECT_EQ(kExpectedValues[j++], version_info->internal_name());
+  EXPECT_EQ(kExpectedValues[j++], version_info->product_version());
+  EXPECT_EQ(kExpectedValues[j++], version_info->private_build());
+  EXPECT_EQ(kExpectedValues[j++], version_info->special_build());
+  EXPECT_EQ(kExpectedValues[j++], version_info->comments());
+  EXPECT_EQ(kExpectedValues[j++], version_info->original_filename());
+  EXPECT_EQ(kExpectedValues[j++], version_info->file_description());
+  EXPECT_EQ(kExpectedValues[j++], version_info->file_version());
+  EXPECT_EQ(kExpectedValues[j++], version_info->legal_copyright());
+  EXPECT_EQ(kExpectedValues[j++], version_info->legal_trademarks());
+  EXPECT_EQ(kExpectedValues[j++], version_info->last_change());
+}
+#endif
+
+#if defined(OS_WIN)
+TEST(FileVersionInfoTest, IsOfficialBuild) {
+  const wchar_t* kDLLNames[] = {
+    L"FileVersionInfoTest1.dll",
+    L"FileVersionInfoTest2.dll"
+  };
+
+  const bool kExpected[] = {
+    true,
+    false,
+  };
+
+  // Test consistency check.
+  ASSERT_EQ(arraysize(kDLLNames), arraysize(kExpected));
+
+  for (size_t i = 0; i < arraysize(kDLLNames); ++i) {
+    FilePath dll_path = GetTestDataPath();
+    dll_path = dll_path.Append(kDLLNames[i]);
+
+    std::unique_ptr<FileVersionInfo> version_info(
+        FileVersionInfo::CreateFileVersionInfo(dll_path));
+
+    EXPECT_EQ(kExpected[i], version_info->is_official_build());
+  }
+}
+#endif
+
+#if defined(OS_WIN)
+TEST(FileVersionInfoTest, CustomProperties) {
+  FilePath dll_path = GetTestDataPath();
+  dll_path = dll_path.AppendASCII("FileVersionInfoTest1.dll");
+
+  std::unique_ptr<FileVersionInfo> version_info(
+      FileVersionInfo::CreateFileVersionInfo(dll_path));
+
+  // Test few existing properties.
+  std::wstring str;
+  FileVersionInfoWin* version_info_win =
+      static_cast<FileVersionInfoWin*>(version_info.get());
+  EXPECT_TRUE(version_info_win->GetValue(L"Custom prop 1",  &str));
+  EXPECT_EQ(L"Un", str);
+  EXPECT_EQ(L"Un", version_info_win->GetStringValue(L"Custom prop 1"));
+
+  EXPECT_TRUE(version_info_win->GetValue(L"Custom prop 2",  &str));
+  EXPECT_EQ(L"Deux", str);
+  EXPECT_EQ(L"Deux", version_info_win->GetStringValue(L"Custom prop 2"));
+
+  EXPECT_TRUE(version_info_win->GetValue(L"Custom prop 3",  &str));
+  EXPECT_EQ(L"1600 Amphitheatre Parkway Mountain View, CA 94043", str);
+  EXPECT_EQ(L"1600 Amphitheatre Parkway Mountain View, CA 94043",
+            version_info_win->GetStringValue(L"Custom prop 3"));
+
+  // Test an non-existing property.
+  EXPECT_FALSE(version_info_win->GetValue(L"Unknown property",  &str));
+  EXPECT_EQ(L"", version_info_win->GetStringValue(L"Unknown property"));
+}
+#endif
diff --git a/base/files/dir_reader_posix_unittest.cc b/base/files/dir_reader_posix_unittest.cc
index 5d7fd8b..a75858f 100644
--- a/base/files/dir_reader_posix_unittest.cc
+++ b/base/files/dir_reader_posix_unittest.cc
@@ -29,7 +29,7 @@
 
   base::ScopedTempDir temp_dir;
   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  const char* dir = temp_dir.GetPath().value().c_str();
+  const char* dir = temp_dir.path().value().c_str();
   ASSERT_TRUE(dir);
 
   const int prev_wd = open(".", O_RDONLY | O_DIRECTORY);
diff --git a/base/files/file.cc b/base/files/file.cc
index 1b2224e..ab05630 100644
--- a/base/files/file.cc
+++ b/base/files/file.cc
@@ -138,4 +138,12 @@
   return "";
 }
 
+bool File::Flush() {
+  ElapsedTimer timer;
+  SCOPED_FILE_TRACE("Flush");
+  bool return_value = DoFlush();
+  UMA_HISTOGRAM_TIMES("PlatformFile.FlushTime", timer.Elapsed());
+  return return_value;
+}
+
 }  // namespace base
diff --git a/base/files/file.h b/base/files/file.h
index 94a9d5c..ae2bd1b 100644
--- a/base/files/file.h
+++ b/base/files/file.h
@@ -63,31 +63,28 @@
   // FLAG_EXCLUSIVE_(READ|WRITE) only grant exclusive access to the file on
   // creation on POSIX; for existing files, consider using Lock().
   enum Flags {
-    FLAG_OPEN = 1 << 0,            // Opens a file, only if it exists.
-    FLAG_CREATE = 1 << 1,          // Creates a new file, only if it does not
-                                   // already exist.
-    FLAG_OPEN_ALWAYS = 1 << 2,     // May create a new file.
-    FLAG_CREATE_ALWAYS = 1 << 3,   // May overwrite an old file.
-    FLAG_OPEN_TRUNCATED = 1 << 4,  // Opens a file and truncates it, only if it
-                                   // exists.
+    FLAG_OPEN = 1 << 0,             // Opens a file, only if it exists.
+    FLAG_CREATE = 1 << 1,           // Creates a new file, only if it does not
+                                    // already exist.
+    FLAG_OPEN_ALWAYS = 1 << 2,      // May create a new file.
+    FLAG_CREATE_ALWAYS = 1 << 3,    // May overwrite an old file.
+    FLAG_OPEN_TRUNCATED = 1 << 4,   // Opens a file and truncates it, only if it
+                                    // exists.
     FLAG_READ = 1 << 5,
     FLAG_WRITE = 1 << 6,
     FLAG_APPEND = 1 << 7,
-    FLAG_EXCLUSIVE_READ = 1 << 8,  // EXCLUSIVE is opposite of Windows SHARE.
+    FLAG_EXCLUSIVE_READ = 1 << 8,   // EXCLUSIVE is opposite of Windows SHARE.
     FLAG_EXCLUSIVE_WRITE = 1 << 9,
     FLAG_ASYNC = 1 << 10,
-    FLAG_TEMPORARY = 1 << 11,  // Used on Windows only.
-    FLAG_HIDDEN = 1 << 12,     // Used on Windows only.
+    FLAG_TEMPORARY = 1 << 11,       // Used on Windows only.
+    FLAG_HIDDEN = 1 << 12,          // Used on Windows only.
     FLAG_DELETE_ON_CLOSE = 1 << 13,
-    FLAG_WRITE_ATTRIBUTES = 1 << 14,     // Used on Windows only.
-    FLAG_SHARE_DELETE = 1 << 15,         // Used on Windows only.
-    FLAG_TERMINAL_DEVICE = 1 << 16,      // Serial port flags.
-    FLAG_BACKUP_SEMANTICS = 1 << 17,     // Used on Windows only.
-    FLAG_EXECUTE = 1 << 18,              // Used on Windows only.
-    FLAG_SEQUENTIAL_SCAN = 1 << 19,      // Used on Windows only.
-    FLAG_CAN_DELETE_ON_CLOSE = 1 << 20,  // Requests permission to delete a file
-                                         // via DeleteOnClose() (Windows only).
-                                         // See DeleteOnClose() for details.
+    FLAG_WRITE_ATTRIBUTES = 1 << 14,  // Used on Windows only.
+    FLAG_SHARE_DELETE = 1 << 15,      // Used on Windows only.
+    FLAG_TERMINAL_DEVICE = 1 << 16,   // Serial port flags.
+    FLAG_BACKUP_SEMANTICS = 1 << 17,  // Used on Windows only.
+    FLAG_EXECUTE = 1 << 18,           // Used on Windows only.
+    FLAG_SEQUENTIAL_SCAN = 1 << 19,   // Used on Windows only.
   };
 
   // This enum has been recorded in multiple histograms. If the order of the
@@ -293,41 +290,11 @@
   // object that was created or initialized with this flag will have unlinked
   // the underlying file when it was created or opened. On Windows, the
   // underlying file is deleted when the last handle to it is closed.
-  File Duplicate() const;
+  File Duplicate();
 
   bool async() const { return async_; }
 
 #if defined(OS_WIN)
-  // Sets or clears the DeleteFile disposition on the handle. Returns true if
-  // the disposition was set or cleared, as indicated by |delete_on_close|.
-  //
-  // Microsoft Windows deletes a file only when the last handle to the
-  // underlying kernel object is closed when the DeleteFile disposition has been
-  // set by any handle holder. This disposition is be set by:
-  // - Calling the Win32 DeleteFile function with the path to a file.
-  // - Opening/creating a file with FLAG_DELETE_ON_CLOSE.
-  // - Opening/creating a file with FLAG_CAN_DELETE_ON_CLOSE and subsequently
-  //   calling DeleteOnClose(true).
-  //
-  // In all cases, all pre-existing handles to the file must have been opened
-  // with FLAG_SHARE_DELETE.
-  //
-  // So:
-  // - Use FLAG_SHARE_DELETE when creating/opening a file to allow another
-  //   entity on the system to cause it to be deleted when it is closed. (Note:
-  //   another entity can delete the file the moment after it is closed, so not
-  //   using this permission doesn't provide any protections.)
-  // - Use FLAG_DELETE_ON_CLOSE for any file that is to be deleted after use.
-  //   The OS will ensure it is deleted even in the face of process termination.
-  // - Use FLAG_CAN_DELETE_ON_CLOSE in conjunction with DeleteOnClose() to alter
-  //   the DeleteFile disposition on an open handle. This fine-grained control
-  //   allows for marking a file for deletion during processing so that it is
-  //   deleted in the event of untimely process termination, and then clearing
-  //   this state once the file is suitable for persistence.
-  bool DeleteOnClose(bool delete_on_close);
-#endif
-
-#if defined(OS_WIN)
   static Error OSErrorToFileError(DWORD last_error);
 #elif defined(OS_POSIX)
   static Error OSErrorToFileError(int saved_errno);
@@ -343,6 +310,10 @@
   // traversal ('..') components.
   void DoInitialize(const FilePath& path, uint32_t flags);
 
+  // TODO(tnagel): Reintegrate into Flush() once histogram isn't needed anymore,
+  // cf. issue 473337.
+  bool DoFlush();
+
   void SetPlatformFile(PlatformFile file);
 
 #if defined(OS_WIN)
diff --git a/base/files/file_descriptor_watcher_posix.cc b/base/files/file_descriptor_watcher_posix.cc
deleted file mode 100644
index 9746e35..0000000
--- a/base/files/file_descriptor_watcher_posix.cc
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/files/file_descriptor_watcher_posix.h"
-
-#include "base/bind.h"
-#include "base/lazy_instance.h"
-#include "base/logging.h"
-#include "base/memory/ptr_util.h"
-#include "base/sequenced_task_runner.h"
-#include "base/single_thread_task_runner.h"
-#include "base/threading/sequenced_task_runner_handle.h"
-#include "base/threading/thread_checker.h"
-#include "base/threading/thread_local.h"
-
-namespace base {
-
-namespace {
-
-// MessageLoopForIO used to watch file descriptors for which callbacks are
-// registered from a given thread.
-LazyInstance<ThreadLocalPointer<MessageLoopForIO>>::Leaky
-    tls_message_loop_for_io = LAZY_INSTANCE_INITIALIZER;
-
-}  // namespace
-
-FileDescriptorWatcher::Controller::~Controller() {
-  DCHECK(sequence_checker_.CalledOnValidSequence());
-
-  // Delete |watcher_| on the MessageLoopForIO.
-  //
-  // If the MessageLoopForIO is deleted before Watcher::StartWatching() runs,
-  // |watcher_| is leaked. If the MessageLoopForIO is deleted after
-  // Watcher::StartWatching() runs but before the DeleteSoon task runs,
-  // |watcher_| is deleted from Watcher::WillDestroyCurrentMessageLoop().
-  message_loop_for_io_task_runner_->DeleteSoon(FROM_HERE, watcher_.release());
-
-  // Since WeakPtrs are invalidated by the destructor, RunCallback() won't be
-  // invoked after this returns.
-}
-
-class FileDescriptorWatcher::Controller::Watcher
-    : public MessageLoopForIO::Watcher,
-      public MessageLoop::DestructionObserver {
- public:
-  Watcher(WeakPtr<Controller> controller, MessageLoopForIO::Mode mode, int fd);
-  ~Watcher() override;
-
-  void StartWatching();
-
- private:
-  friend class FileDescriptorWatcher;
-
-  // MessageLoopForIO::Watcher:
-  void OnFileCanReadWithoutBlocking(int fd) override;
-  void OnFileCanWriteWithoutBlocking(int fd) override;
-
-  // MessageLoop::DestructionObserver:
-  void WillDestroyCurrentMessageLoop() override;
-
-  // Used to instruct the MessageLoopForIO to stop watching the file descriptor.
-  MessageLoopForIO::FileDescriptorWatcher file_descriptor_watcher_;
-
-  // Runs tasks on the sequence on which this was instantiated (i.e. the
-  // sequence on which the callback must run).
-  const scoped_refptr<SequencedTaskRunner> callback_task_runner_ =
-      SequencedTaskRunnerHandle::Get();
-
-  // The Controller that created this Watcher.
-  WeakPtr<Controller> controller_;
-
-  // Whether this Watcher is notified when |fd_| becomes readable or writable
-  // without blocking.
-  const MessageLoopForIO::Mode mode_;
-
-  // The watched file descriptor.
-  const int fd_;
-
-  // Except for the constructor, every method of this class must run on the same
-  // MessageLoopForIO thread.
-  ThreadChecker thread_checker_;
-
-  // Whether this Watcher was registered as a DestructionObserver on the
-  // MessageLoopForIO thread.
-  bool registered_as_destruction_observer_ = false;
-
-  DISALLOW_COPY_AND_ASSIGN(Watcher);
-};
-
-FileDescriptorWatcher::Controller::Watcher::Watcher(
-    WeakPtr<Controller> controller,
-    MessageLoopForIO::Mode mode,
-    int fd)
-    : file_descriptor_watcher_(FROM_HERE),
-      controller_(controller),
-      mode_(mode),
-      fd_(fd) {
-  DCHECK(callback_task_runner_);
-  thread_checker_.DetachFromThread();
-}
-
-FileDescriptorWatcher::Controller::Watcher::~Watcher() {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  MessageLoopForIO::current()->RemoveDestructionObserver(this);
-}
-
-void FileDescriptorWatcher::Controller::Watcher::StartWatching() {
-  DCHECK(thread_checker_.CalledOnValidThread());
-
-  MessageLoopForIO::current()->WatchFileDescriptor(
-      fd_, false, mode_, &file_descriptor_watcher_, this);
-
-  if (!registered_as_destruction_observer_) {
-    MessageLoopForIO::current()->AddDestructionObserver(this);
-    registered_as_destruction_observer_ = true;
-  }
-}
-
-void FileDescriptorWatcher::Controller::Watcher::OnFileCanReadWithoutBlocking(
-    int fd) {
-  DCHECK_EQ(fd_, fd);
-  DCHECK_EQ(MessageLoopForIO::WATCH_READ, mode_);
-  DCHECK(thread_checker_.CalledOnValidThread());
-
-  // Run the callback on the sequence on which the watch was initiated.
-  callback_task_runner_->PostTask(FROM_HERE,
-                                  Bind(&Controller::RunCallback, controller_));
-}
-
-void FileDescriptorWatcher::Controller::Watcher::OnFileCanWriteWithoutBlocking(
-    int fd) {
-  DCHECK_EQ(fd_, fd);
-  DCHECK_EQ(MessageLoopForIO::WATCH_WRITE, mode_);
-  DCHECK(thread_checker_.CalledOnValidThread());
-
-  // Run the callback on the sequence on which the watch was initiated.
-  callback_task_runner_->PostTask(FROM_HERE,
-                                  Bind(&Controller::RunCallback, controller_));
-}
-
-void FileDescriptorWatcher::Controller::Watcher::
-    WillDestroyCurrentMessageLoop() {
-  DCHECK(thread_checker_.CalledOnValidThread());
-
-  // A Watcher is owned by a Controller. When the Controller is deleted, it
-  // transfers ownership of the Watcher to a delete task posted to the
-  // MessageLoopForIO. If the MessageLoopForIO is deleted before the delete task
-  // runs, the following line takes care of deleting the Watcher.
-  delete this;
-}
-
-FileDescriptorWatcher::Controller::Controller(MessageLoopForIO::Mode mode,
-                                              int fd,
-                                              const Closure& callback)
-    : callback_(callback),
-      message_loop_for_io_task_runner_(
-          tls_message_loop_for_io.Get().Get()->task_runner()),
-      weak_factory_(this) {
-  DCHECK(!callback_.is_null());
-  DCHECK(message_loop_for_io_task_runner_);
-  watcher_ = MakeUnique<Watcher>(weak_factory_.GetWeakPtr(), mode, fd);
-  StartWatching();
-}
-
-void FileDescriptorWatcher::Controller::StartWatching() {
-  DCHECK(sequence_checker_.CalledOnValidSequence());
-  // It is safe to use Unretained() below because |watcher_| can only be deleted
-  // by a delete task posted to |message_loop_for_io_task_runner_| by this
-  // Controller's destructor. Since this delete task hasn't been posted yet, it
-  // can't run before the task posted below.
-  message_loop_for_io_task_runner_->PostTask(
-      FROM_HERE, Bind(&Watcher::StartWatching, Unretained(watcher_.get())));
-}
-
-void FileDescriptorWatcher::Controller::RunCallback() {
-  DCHECK(sequence_checker_.CalledOnValidSequence());
-
-  WeakPtr<Controller> weak_this = weak_factory_.GetWeakPtr();
-
-  callback_.Run();
-
-  // If |this| wasn't deleted, re-enable the watch.
-  if (weak_this)
-    StartWatching();
-}
-
-FileDescriptorWatcher::FileDescriptorWatcher(
-    MessageLoopForIO* message_loop_for_io) {
-  DCHECK(message_loop_for_io);
-  DCHECK(!tls_message_loop_for_io.Get().Get());
-  tls_message_loop_for_io.Get().Set(message_loop_for_io);
-}
-
-FileDescriptorWatcher::~FileDescriptorWatcher() {
-  tls_message_loop_for_io.Get().Set(nullptr);
-}
-
-std::unique_ptr<FileDescriptorWatcher::Controller>
-FileDescriptorWatcher::WatchReadable(int fd, const Closure& callback) {
-  return WrapUnique(new Controller(MessageLoopForIO::WATCH_READ, fd, callback));
-}
-
-std::unique_ptr<FileDescriptorWatcher::Controller>
-FileDescriptorWatcher::WatchWritable(int fd, const Closure& callback) {
-  return WrapUnique(
-      new Controller(MessageLoopForIO::WATCH_WRITE, fd, callback));
-}
-
-}  // namespace base
diff --git a/base/files/file_descriptor_watcher_posix.h b/base/files/file_descriptor_watcher_posix.h
deleted file mode 100644
index 6cc011b..0000000
--- a/base/files/file_descriptor_watcher_posix.h
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_FILES_FILE_DESCRIPTOR_WATCHER_POSIX_H_
-#define BASE_FILES_FILE_DESCRIPTOR_WATCHER_POSIX_H_
-
-#include <memory>
-
-#include "base/base_export.h"
-#include "base/callback.h"
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/weak_ptr.h"
-#include "base/message_loop/message_loop.h"
-#include "base/sequence_checker.h"
-
-namespace base {
-
-class SingleThreadTaskRunner;
-
-// The FileDescriptorWatcher API allows callbacks to be invoked when file
-// descriptors are readable or writable without blocking.
-class BASE_EXPORT FileDescriptorWatcher {
- public:
-  // Instantiated and returned by WatchReadable() or WatchWritable(). The
-  // constructor registers a callback to be invoked when a file descriptor is
-  // readable or writable without blocking and the destructor unregisters it.
-  class Controller {
-   public:
-    // Unregisters the callback registered by the constructor.
-    ~Controller();
-
-   private:
-    friend class FileDescriptorWatcher;
-    class Watcher;
-
-    // Registers |callback| to be invoked when |fd| is readable or writable
-    // without blocking (depending on |mode|).
-    Controller(MessageLoopForIO::Mode mode, int fd, const Closure& callback);
-
-    // Starts watching the file descriptor.
-    void StartWatching();
-
-    // Runs |callback_|.
-    void RunCallback();
-
-    // The callback to run when the watched file descriptor is readable or
-    // writable without blocking.
-    Closure callback_;
-
-    // TaskRunner associated with the MessageLoopForIO that watches the file
-    // descriptor.
-    const scoped_refptr<SingleThreadTaskRunner>
-        message_loop_for_io_task_runner_;
-
-    // Notified by the MessageLoopForIO associated with
-    // |message_loop_for_io_task_runner_| when the watched file descriptor is
-    // readable or writable without blocking. Posts a task to run RunCallback()
-    // on the sequence on which the Controller was instantiated. When the
-    // Controller is deleted, ownership of |watcher_| is transfered to a delete
-    // task posted to the MessageLoopForIO. This ensures that |watcher_| isn't
-    // deleted while it is being used by the MessageLoopForIO.
-    std::unique_ptr<Watcher> watcher_;
-
-    // Validates that the Controller is used on the sequence on which it was
-    // instantiated.
-    SequenceChecker sequence_checker_;
-
-    WeakPtrFactory<Controller> weak_factory_;
-
-    DISALLOW_COPY_AND_ASSIGN(Controller);
-  };
-
-  // Registers |message_loop_for_io| to watch file descriptors for which
-  // callbacks are registered from the current thread via WatchReadable() or
-  // WatchWritable(). |message_loop_for_io| may run on another thread. The
-  // constructed FileDescriptorWatcher must not outlive |message_loop_for_io|.
-  FileDescriptorWatcher(MessageLoopForIO* message_loop_for_io);
-  ~FileDescriptorWatcher();
-
-  // Registers |callback| to be invoked on the current sequence when |fd| is
-  // readable or writable without blocking. |callback| is unregistered when the
-  // returned Controller is deleted (deletion must happen on the current
-  // sequence). To call these methods, a FileDescriptorWatcher must have been
-  // instantiated on the current thread and SequencedTaskRunnerHandle::IsSet()
-  // must return true.
-  static std::unique_ptr<Controller> WatchReadable(int fd,
-                                                   const Closure& callback);
-  static std::unique_ptr<Controller> WatchWritable(int fd,
-                                                   const Closure& callback);
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(FileDescriptorWatcher);
-};
-
-}  // namespace base
-
-#endif  // BASE_FILES_FILE_DESCRIPTOR_WATCHER_POSIX_H_
diff --git a/base/files/file_descriptor_watcher_posix_unittest.cc b/base/files/file_descriptor_watcher_posix_unittest.cc
deleted file mode 100644
index 7ff40c5..0000000
--- a/base/files/file_descriptor_watcher_posix_unittest.cc
+++ /dev/null
@@ -1,318 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/files/file_descriptor_watcher_posix.h"
-
-#include <unistd.h>
-
-#include <memory>
-
-#include "base/bind.h"
-#include "base/files/file_util.h"
-#include "base/macros.h"
-#include "base/memory/ptr_util.h"
-#include "base/message_loop/message_loop.h"
-#include "base/posix/eintr_wrapper.h"
-#include "base/run_loop.h"
-#include "base/test/test_timeouts.h"
-#include "base/threading/platform_thread.h"
-#include "base/threading/thread.h"
-#include "base/threading/thread_checker_impl.h"
-#include "build/build_config.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-
-namespace {
-
-class Mock {
- public:
-  Mock() = default;
-
-  MOCK_METHOD0(ReadableCallback, void());
-  MOCK_METHOD0(WritableCallback, void());
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(Mock);
-};
-
-enum class FileDescriptorWatcherTestType {
-  MESSAGE_LOOP_FOR_IO_ON_MAIN_THREAD,
-  MESSAGE_LOOP_FOR_IO_ON_OTHER_THREAD,
-};
-
-class FileDescriptorWatcherTest
-    : public testing::TestWithParam<FileDescriptorWatcherTestType> {
- public:
-  FileDescriptorWatcherTest()
-      : message_loop_(GetParam() == FileDescriptorWatcherTestType::
-                                        MESSAGE_LOOP_FOR_IO_ON_MAIN_THREAD
-                          ? new MessageLoopForIO
-                          : new MessageLoop),
-        other_thread_("FileDescriptorWatcherTest_OtherThread") {}
-  ~FileDescriptorWatcherTest() override = default;
-
-  void SetUp() override {
-    ASSERT_EQ(0, pipe(pipe_fds_));
-
-    MessageLoop* message_loop_for_io;
-    if (GetParam() ==
-        FileDescriptorWatcherTestType::MESSAGE_LOOP_FOR_IO_ON_OTHER_THREAD) {
-      Thread::Options options;
-      options.message_loop_type = MessageLoop::TYPE_IO;
-      ASSERT_TRUE(other_thread_.StartWithOptions(options));
-      message_loop_for_io = other_thread_.message_loop();
-    } else {
-      message_loop_for_io = message_loop_.get();
-    }
-
-    ASSERT_TRUE(message_loop_for_io->IsType(MessageLoop::TYPE_IO));
-    file_descriptor_watcher_ = MakeUnique<FileDescriptorWatcher>(
-        static_cast<MessageLoopForIO*>(message_loop_for_io));
-  }
-
-  void TearDown() override {
-    if (GetParam() ==
-            FileDescriptorWatcherTestType::MESSAGE_LOOP_FOR_IO_ON_MAIN_THREAD &&
-        message_loop_) {
-      // Allow the delete task posted by the Controller's destructor to run.
-      base::RunLoop().RunUntilIdle();
-    }
-
-    EXPECT_EQ(0, IGNORE_EINTR(close(pipe_fds_[0])));
-    EXPECT_EQ(0, IGNORE_EINTR(close(pipe_fds_[1])));
-  }
-
- protected:
-  int read_file_descriptor() const { return pipe_fds_[0]; }
-  int write_file_descriptor() const { return pipe_fds_[1]; }
-
-  // Waits for a short delay and run pending tasks.
-  void WaitAndRunPendingTasks() {
-    PlatformThread::Sleep(TestTimeouts::tiny_timeout());
-    RunLoop().RunUntilIdle();
-  }
-
-  // Registers ReadableCallback() to be called on |mock_| when
-  // read_file_descriptor() is readable without blocking.
-  std::unique_ptr<FileDescriptorWatcher::Controller> WatchReadable() {
-    std::unique_ptr<FileDescriptorWatcher::Controller> controller =
-        FileDescriptorWatcher::WatchReadable(
-            read_file_descriptor(),
-            Bind(&Mock::ReadableCallback, Unretained(&mock_)));
-    EXPECT_TRUE(controller);
-
-    // Unless read_file_descriptor() was readable before the callback was
-    // registered, this shouldn't do anything.
-    WaitAndRunPendingTasks();
-
-    return controller;
-  }
-
-  // Registers WritableCallback() to be called on |mock_| when
-  // write_file_descriptor() is writable without blocking.
-  std::unique_ptr<FileDescriptorWatcher::Controller> WatchWritable() {
-    std::unique_ptr<FileDescriptorWatcher::Controller> controller =
-        FileDescriptorWatcher::WatchWritable(
-            read_file_descriptor(),
-            Bind(&Mock::WritableCallback, Unretained(&mock_)));
-    EXPECT_TRUE(controller);
-    return controller;
-  }
-
-  void WriteByte() {
-    constexpr char kByte = '!';
-    ASSERT_TRUE(
-        WriteFileDescriptor(write_file_descriptor(), &kByte, sizeof(kByte)));
-  }
-
-  void ReadByte() {
-    // This is always called as part of the WatchReadable() callback, which
-    // should run on the main thread.
-    EXPECT_TRUE(thread_checker_.CalledOnValidThread());
-
-    char buffer;
-    ASSERT_TRUE(ReadFromFD(read_file_descriptor(), &buffer, sizeof(buffer)));
-  }
-
-  // Mock on wich callbacks are invoked.
-  testing::StrictMock<Mock> mock_;
-
-  // MessageLoop bound to the main thread.
-  std::unique_ptr<MessageLoop> message_loop_;
-
-  // Thread running a MessageLoopForIO. Used when the test type is
-  // MESSAGE_LOOP_FOR_IO_ON_OTHER_THREAD.
-  Thread other_thread_;
-
- private:
-  // Determines which MessageLoopForIO is used to watch file descriptors for
-  // which callbacks are registered on the main thread.
-  std::unique_ptr<FileDescriptorWatcher> file_descriptor_watcher_;
-
-  // Watched file descriptors.
-  int pipe_fds_[2];
-
-  // Used to verify that callbacks run on the thread on which they are
-  // registered.
-  ThreadCheckerImpl thread_checker_;
-
-  DISALLOW_COPY_AND_ASSIGN(FileDescriptorWatcherTest);
-};
-
-}  // namespace
-
-TEST_P(FileDescriptorWatcherTest, WatchWritable) {
-  auto controller = WatchWritable();
-
-// On Mac and iOS, the write end of a newly created pipe is writable without
-// blocking.
-#if defined(OS_MACOSX)
-  RunLoop run_loop;
-  EXPECT_CALL(mock_, WritableCallback())
-      .WillOnce(testing::Invoke(&run_loop, &RunLoop::Quit));
-  run_loop.Run();
-#endif  // defined(OS_MACOSX)
-}
-
-TEST_P(FileDescriptorWatcherTest, WatchReadableOneByte) {
-  auto controller = WatchReadable();
-
-  // Write 1 byte to the pipe, making it readable without blocking. Expect one
-  // call to ReadableCallback() which will read 1 byte from the pipe.
-  WriteByte();
-  RunLoop run_loop;
-  EXPECT_CALL(mock_, ReadableCallback())
-      .WillOnce(testing::Invoke([this, &run_loop]() {
-        ReadByte();
-        run_loop.Quit();
-      }));
-  run_loop.Run();
-  testing::Mock::VerifyAndClear(&mock_);
-
-  // No more call to ReadableCallback() is expected.
-  WaitAndRunPendingTasks();
-}
-
-TEST_P(FileDescriptorWatcherTest, WatchReadableTwoBytes) {
-  auto controller = WatchReadable();
-
-  // Write 2 bytes to the pipe. Expect two calls to ReadableCallback() which
-  // will each read 1 byte from the pipe.
-  WriteByte();
-  WriteByte();
-  RunLoop run_loop;
-  EXPECT_CALL(mock_, ReadableCallback())
-      .WillOnce(testing::Invoke([this]() { ReadByte(); }))
-      .WillOnce(testing::Invoke([this, &run_loop]() {
-        ReadByte();
-        run_loop.Quit();
-      }));
-  run_loop.Run();
-  testing::Mock::VerifyAndClear(&mock_);
-
-  // No more call to ReadableCallback() is expected.
-  WaitAndRunPendingTasks();
-}
-
-TEST_P(FileDescriptorWatcherTest, WatchReadableByteWrittenFromCallback) {
-  auto controller = WatchReadable();
-
-  // Write 1 byte to the pipe. Expect one call to ReadableCallback() from which
-  // 1 byte is read and 1 byte is written to the pipe. Then, expect another call
-  // to ReadableCallback() from which the remaining byte is read from the pipe.
-  WriteByte();
-  RunLoop run_loop;
-  EXPECT_CALL(mock_, ReadableCallback())
-      .WillOnce(testing::Invoke([this]() {
-        ReadByte();
-        WriteByte();
-      }))
-      .WillOnce(testing::Invoke([this, &run_loop]() {
-        ReadByte();
-        run_loop.Quit();
-      }));
-  run_loop.Run();
-  testing::Mock::VerifyAndClear(&mock_);
-
-  // No more call to ReadableCallback() is expected.
-  WaitAndRunPendingTasks();
-}
-
-TEST_P(FileDescriptorWatcherTest, DeleteControllerFromCallback) {
-  auto controller = WatchReadable();
-
-  // Write 1 byte to the pipe. Expect one call to ReadableCallback() from which
-  // |controller| is deleted.
-  WriteByte();
-  RunLoop run_loop;
-  EXPECT_CALL(mock_, ReadableCallback())
-      .WillOnce(testing::Invoke([&run_loop, &controller]() {
-        controller = nullptr;
-        run_loop.Quit();
-      }));
-  run_loop.Run();
-  testing::Mock::VerifyAndClear(&mock_);
-
-  // Since |controller| has been deleted, no call to ReadableCallback() is
-  // expected even though the pipe is still readable without blocking.
-  WaitAndRunPendingTasks();
-}
-
-TEST_P(FileDescriptorWatcherTest,
-       DeleteControllerBeforeFileDescriptorReadable) {
-  auto controller = WatchReadable();
-
-  // Cancel the watch.
-  controller = nullptr;
-
-  // Write 1 byte to the pipe to make it readable without blocking.
-  WriteByte();
-
-  // No call to ReadableCallback() is expected.
-  WaitAndRunPendingTasks();
-}
-
-TEST_P(FileDescriptorWatcherTest, DeleteControllerAfterFileDescriptorReadable) {
-  auto controller = WatchReadable();
-
-  // Write 1 byte to the pipe to make it readable without blocking.
-  WriteByte();
-
-  // Cancel the watch.
-  controller = nullptr;
-
-  // No call to ReadableCallback() is expected.
-  WaitAndRunPendingTasks();
-}
-
-TEST_P(FileDescriptorWatcherTest, DeleteControllerAfterDeleteMessageLoopForIO) {
-  auto controller = WatchReadable();
-
-  // Delete the MessageLoopForIO.
-  if (GetParam() ==
-      FileDescriptorWatcherTestType::MESSAGE_LOOP_FOR_IO_ON_MAIN_THREAD) {
-    message_loop_ = nullptr;
-  } else {
-    other_thread_.Stop();
-  }
-
-  // Deleting |controller| shouldn't crash even though that causes a task to be
-  // posted to the MessageLoopForIO thread.
-  controller = nullptr;
-}
-
-INSTANTIATE_TEST_CASE_P(
-    MessageLoopForIOOnMainThread,
-    FileDescriptorWatcherTest,
-    ::testing::Values(
-        FileDescriptorWatcherTestType::MESSAGE_LOOP_FOR_IO_ON_MAIN_THREAD));
-INSTANTIATE_TEST_CASE_P(
-    MessageLoopForIOOnOtherThread,
-    FileDescriptorWatcherTest,
-    ::testing::Values(
-        FileDescriptorWatcherTestType::MESSAGE_LOOP_FOR_IO_ON_OTHER_THREAD));
-
-}  // namespace base
diff --git a/base/files/file_path.cc b/base/files/file_path.cc
index 9f67f9b..29f12a8 100644
--- a/base/files/file_path.cc
+++ b/base/files/file_path.cc
@@ -176,7 +176,6 @@
 
 FilePath::FilePath(const FilePath& that) : path_(that.path_) {
 }
-FilePath::FilePath(FilePath&& that) = default;
 
 FilePath::FilePath(StringPieceType path) {
   path.CopyToString(&path_);
@@ -193,8 +192,6 @@
   return *this;
 }
 
-FilePath& FilePath::operator=(FilePath&& that) = default;
-
 bool FilePath::operator==(const FilePath& that) const {
 #if defined(FILE_PATH_USES_DRIVE_LETTERS)
   return EqualDriveLetterCaseInsensitive(this->path_, that.path_);
diff --git a/base/files/file_path.h b/base/files/file_path.h
index 02846f6..3234df7 100644
--- a/base/files/file_path.h
+++ b/base/files/file_path.h
@@ -182,13 +182,6 @@
   ~FilePath();
   FilePath& operator=(const FilePath& that);
 
-  // Constructs FilePath with the contents of |that|, which is left in valid but
-  // unspecified state.
-  FilePath(FilePath&& that);
-  // Replaces the contents with those of |that|, which is left in valid but
-  // unspecified state.
-  FilePath& operator=(FilePath&& that);
-
   bool operator==(const FilePath& that) const;
 
   bool operator!=(const FilePath& that) const;
diff --git a/base/files/file_path_unittest.cc b/base/files/file_path_unittest.cc
index a091e62..d8c5969 100644
--- a/base/files/file_path_unittest.cc
+++ b/base/files/file_path_unittest.cc
@@ -9,6 +9,7 @@
 #include "base/files/file_path.h"
 #include "base/macros.h"
 #include "base/strings/utf_string_conversions.h"
+#include "base/test/scoped_locale.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 #include "testing/platform_test.h"
diff --git a/base/files/file_path_watcher.cc b/base/files/file_path_watcher.cc
index 245bd8e..a4624ab 100644
--- a/base/files/file_path_watcher.cc
+++ b/base/files/file_path_watcher.cc
@@ -8,16 +8,22 @@
 #include "base/files/file_path_watcher.h"
 
 #include "base/logging.h"
+#include "base/message_loop/message_loop.h"
 #include "build/build_config.h"
 
 namespace base {
 
 FilePathWatcher::~FilePathWatcher() {
-  DCHECK(sequence_checker_.CalledOnValidSequence());
   impl_->Cancel();
 }
 
 // static
+void FilePathWatcher::CancelWatch(
+    const scoped_refptr<PlatformDelegate>& delegate) {
+  delegate->CancelOnMessageLoopThread();
+}
+
+// static
 bool FilePathWatcher::RecursiveWatchAvailable() {
 #if (defined(OS_MACOSX) && !defined(OS_IOS)) || defined(OS_WIN) || \
     defined(OS_LINUX) || defined(OS_ANDROID)
@@ -38,7 +44,6 @@
 bool FilePathWatcher::Watch(const FilePath& path,
                             bool recursive,
                             const Callback& callback) {
-  DCHECK(sequence_checker_.CalledOnValidSequence());
   DCHECK(path.IsAbsolute());
   return impl_->Watch(path, recursive, callback);
 }
diff --git a/base/files/file_path_watcher.h b/base/files/file_path_watcher.h
index 9e29d0a..d5c6db1 100644
--- a/base/files/file_path_watcher.h
+++ b/base/files/file_path_watcher.h
@@ -7,15 +7,12 @@
 #ifndef BASE_FILES_FILE_PATH_WATCHER_H_
 #define BASE_FILES_FILE_PATH_WATCHER_H_
 
-#include <memory>
-
 #include "base/base_export.h"
 #include "base/callback.h"
 #include "base/files/file_path.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
-#include "base/sequence_checker.h"
-#include "base/sequenced_task_runner.h"
+#include "base/single_thread_task_runner.h"
 
 namespace base {
 
@@ -28,8 +25,6 @@
 // detect the creation and deletion of files in a watched directory, but will
 // not detect modifications to those files. See file_path_watcher_kqueue.cc for
 // details.
-//
-// Must be destroyed on the sequence that invokes Watch().
 class BASE_EXPORT FilePathWatcher {
  public:
   // Callback type for Watch(). |path| points to the file that was updated,
@@ -38,10 +33,9 @@
   typedef base::Callback<void(const FilePath& path, bool error)> Callback;
 
   // Used internally to encapsulate different members on different platforms.
-  class PlatformDelegate {
+  class PlatformDelegate : public base::RefCountedThreadSafe<PlatformDelegate> {
    public:
     PlatformDelegate();
-    virtual ~PlatformDelegate();
 
     // Start watching for the given |path| and notify |delegate| about changes.
     virtual bool Watch(const FilePath& path,
@@ -50,16 +44,25 @@
 
     // Stop watching. This is called from FilePathWatcher's dtor in order to
     // allow to shut down properly while the object is still alive.
+    // It can be called from any thread.
     virtual void Cancel() = 0;
 
    protected:
+    friend class base::RefCountedThreadSafe<PlatformDelegate>;
     friend class FilePathWatcher;
 
-    scoped_refptr<SequencedTaskRunner> task_runner() const {
+    virtual ~PlatformDelegate();
+
+    // Stop watching. This is only called on the thread of the appropriate
+    // message loop. Since it can also be called more than once, it should
+    // check |is_cancelled()| to avoid duplicate work.
+    virtual void CancelOnMessageLoopThread() = 0;
+
+    scoped_refptr<base::SingleThreadTaskRunner> task_runner() const {
       return task_runner_;
     }
 
-    void set_task_runner(scoped_refptr<SequencedTaskRunner> runner) {
+    void set_task_runner(scoped_refptr<base::SingleThreadTaskRunner> runner) {
       task_runner_ = std::move(runner);
     }
 
@@ -73,34 +76,32 @@
     }
 
    private:
-    scoped_refptr<SequencedTaskRunner> task_runner_;
+    scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
     bool cancelled_;
-
-    DISALLOW_COPY_AND_ASSIGN(PlatformDelegate);
   };
 
   FilePathWatcher();
-  ~FilePathWatcher();
+  virtual ~FilePathWatcher();
+
+  // A callback that always cleans up the PlatformDelegate, either when executed
+  // or when deleted without having been executed at all, as can happen during
+  // shutdown.
+  static void CancelWatch(const scoped_refptr<PlatformDelegate>& delegate);
 
   // Returns true if the platform and OS version support recursive watches.
   static bool RecursiveWatchAvailable();
 
   // Invokes |callback| whenever updates to |path| are detected. This should be
-  // called at most once. Set |recursive| to true to watch |path| and its
-  // children. The callback will be invoked on the same sequence. Returns true
-  // on success.
-  //
-  // On POSIX, this must be called from a thread that supports
-  // FileDescriptorWatcher.
+  // called at most once, and from a MessageLoop of TYPE_IO. Set |recursive| to
+  // true, to watch |path| and its children. The callback will be invoked on
+  // the same loop. Returns true on success.
   //
   // Recursive watch is not supported on all platforms and file systems.
   // Watch() will return false in the case of failure.
   bool Watch(const FilePath& path, bool recursive, const Callback& callback);
 
  private:
-  std::unique_ptr<PlatformDelegate> impl_;
-
-  SequenceChecker sequence_checker_;
+  scoped_refptr<PlatformDelegate> impl_;
 
   DISALLOW_COPY_AND_ASSIGN(FilePathWatcher);
 };
diff --git a/base/files/file_path_watcher_fsevents.cc b/base/files/file_path_watcher_fsevents.cc
index e9a87b0..e9d2508 100644
--- a/base/files/file_path_watcher_fsevents.cc
+++ b/base/files/file_path_watcher_fsevents.cc
@@ -13,8 +13,10 @@
 #include "base/lazy_instance.h"
 #include "base/logging.h"
 #include "base/mac/scoped_cftyperef.h"
+#include "base/macros.h"
+#include "base/message_loop/message_loop.h"
 #include "base/strings/stringprintf.h"
-#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/threading/thread_task_runner_handle.h"
 
 namespace base {
 
@@ -68,21 +70,16 @@
 
 FilePathWatcherFSEvents::FilePathWatcherFSEvents()
     : queue_(dispatch_queue_create(
-          base::StringPrintf("org.chromium.base.FilePathWatcher.%p", this)
-              .c_str(),
+          base::StringPrintf(
+              "org.chromium.base.FilePathWatcher.%p", this).c_str(),
           DISPATCH_QUEUE_SERIAL)),
-      fsevent_stream_(nullptr),
-      weak_factory_(this) {}
-
-FilePathWatcherFSEvents::~FilePathWatcherFSEvents() {
-  DCHECK(!task_runner() || task_runner()->RunsTasksOnCurrentThread());
-  DCHECK(callback_.is_null())
-      << "Cancel() must be called before FilePathWatcher is destroyed.";
+      fsevent_stream_(nullptr) {
 }
 
 bool FilePathWatcherFSEvents::Watch(const FilePath& path,
                                     bool recursive,
                                     const FilePathWatcher::Callback& callback) {
+  DCHECK(MessageLoopForIO::current());
   DCHECK(!callback.is_null());
   DCHECK(callback_.is_null());
 
@@ -91,7 +88,7 @@
   if (!recursive)
     return false;
 
-  set_task_runner(SequencedTaskRunnerHandle::Get());
+  set_task_runner(ThreadTaskRunnerHandle::Get());
   callback_ = callback;
 
   FSEventStreamEventId start_event = FSEventsGetCurrentEventId();
@@ -110,15 +107,11 @@
   set_cancelled();
   callback_.Reset();
 
-  // Switch to the dispatch queue to tear down the event stream. As the queue is
-  // owned by |this|, and this method is called from the destructor, execute the
-  // block synchronously.
+  // Switch to the dispatch queue to tear down the event stream. As the queue
+  // is owned by this object, and this method is called from the destructor,
+  // execute the block synchronously.
   dispatch_sync(queue_, ^{
-    if (fsevent_stream_) {
-      DestroyEventStream();
-      target_.clear();
-      resolved_target_.clear();
-    }
+      CancelOnMessageLoopThread();
   });
 }
 
@@ -149,40 +142,31 @@
   // the directory to be watched gets created.
   if (root_changed) {
     // Resetting the event stream from within the callback fails (FSEvents spews
-    // bad file descriptor errors), so do the reset asynchronously.
-    //
-    // We can't dispatch_async a call to UpdateEventStream() directly because
-    // there would be no guarantee that |watcher| still exists when it runs.
-    //
-    // Instead, bounce on task_runner() and use a WeakPtr to verify that
-    // |watcher| still exists. If it does, dispatch_async a call to
-    // UpdateEventStream(). Because the destructor of |watcher| runs on
-    // task_runner() and calls dispatch_sync, it is guaranteed that |watcher|
-    // still exists when UpdateEventStream() runs.
-    watcher->task_runner()->PostTask(
-        FROM_HERE, Bind(
-                       [](WeakPtr<FilePathWatcherFSEvents> weak_watcher,
-                          FSEventStreamEventId root_change_at) {
-                         if (!weak_watcher)
-                           return;
-                         FilePathWatcherFSEvents* watcher = weak_watcher.get();
-                         dispatch_async(watcher->queue_, ^{
-                           watcher->UpdateEventStream(root_change_at);
-                         });
-                       },
-                       watcher->weak_factory_.GetWeakPtr(), root_change_at));
+    // bad file descriptor errors), so post a task to do the reset.
+    dispatch_async(watcher->queue_, ^{
+        watcher->UpdateEventStream(root_change_at);
+    });
   }
 
   watcher->OnFilePathsChanged(paths);
 }
 
+FilePathWatcherFSEvents::~FilePathWatcherFSEvents() {
+  // This method may be called on either the libdispatch or task_runner()
+  // thread. Checking callback_ on the libdispatch thread here is safe because
+  // it is executing in a task posted by Cancel() which first reset callback_.
+  // PostTask forms a sufficient memory barrier to ensure that the value is
+  // consistent on the target thread.
+  DCHECK(callback_.is_null())
+      << "Cancel() must be called before FilePathWatcher is destroyed.";
+}
+
 void FilePathWatcherFSEvents::OnFilePathsChanged(
     const std::vector<FilePath>& paths) {
   DCHECK(!resolved_target_.empty());
   task_runner()->PostTask(
-      FROM_HERE,
-      Bind(&FilePathWatcherFSEvents::DispatchEvents, weak_factory_.GetWeakPtr(),
-           paths, target_, resolved_target_));
+      FROM_HERE, Bind(&FilePathWatcherFSEvents::DispatchEvents, this, paths,
+                      target_, resolved_target_));
 }
 
 void FilePathWatcherFSEvents::DispatchEvents(const std::vector<FilePath>& paths,
@@ -203,6 +187,18 @@
   }
 }
 
+void FilePathWatcherFSEvents::CancelOnMessageLoopThread() {
+  // For all other implementations, the "message loop thread" is the IO thread,
+  // as returned by task_runner(). This implementation, however, needs to
+  // cancel pending work on the Dispatch Queue thread.
+
+  if (fsevent_stream_) {
+    DestroyEventStream();
+    target_.clear();
+    resolved_target_.clear();
+  }
+}
+
 void FilePathWatcherFSEvents::UpdateEventStream(
     FSEventStreamEventId start_event) {
   // It can happen that the watcher gets canceled while tasks that call this
@@ -238,9 +234,8 @@
   FSEventStreamSetDispatchQueue(fsevent_stream_, queue_);
 
   if (!FSEventStreamStart(fsevent_stream_)) {
-    task_runner()->PostTask(FROM_HERE,
-                            Bind(&FilePathWatcherFSEvents::ReportError,
-                                 weak_factory_.GetWeakPtr(), target_));
+    task_runner()->PostTask(
+        FROM_HERE, Bind(&FilePathWatcherFSEvents::ReportError, this, target_));
   }
 }
 
@@ -249,9 +244,8 @@
   bool changed = resolved != resolved_target_;
   resolved_target_ = resolved;
   if (resolved_target_.empty()) {
-    task_runner()->PostTask(FROM_HERE,
-                            Bind(&FilePathWatcherFSEvents::ReportError,
-                                 weak_factory_.GetWeakPtr(), target_));
+    task_runner()->PostTask(
+        FROM_HERE, Bind(&FilePathWatcherFSEvents::ReportError, this, target_));
   }
   return changed;
 }
diff --git a/base/files/file_path_watcher_fsevents.h b/base/files/file_path_watcher_fsevents.h
index dcdf2fb..cfbe020 100644
--- a/base/files/file_path_watcher_fsevents.h
+++ b/base/files/file_path_watcher_fsevents.h
@@ -14,7 +14,6 @@
 #include "base/files/file_path_watcher.h"
 #include "base/mac/scoped_dispatch_object.h"
 #include "base/macros.h"
-#include "base/memory/weak_ptr.h"
 
 namespace base {
 
@@ -27,7 +26,6 @@
 class FilePathWatcherFSEvents : public FilePathWatcher::PlatformDelegate {
  public:
   FilePathWatcherFSEvents();
-  ~FilePathWatcherFSEvents() override;
 
   // FilePathWatcher::PlatformDelegate overrides.
   bool Watch(const FilePath& path,
@@ -43,6 +41,8 @@
                                const FSEventStreamEventFlags flags[],
                                const FSEventStreamEventId event_ids[]);
 
+  ~FilePathWatcherFSEvents() override;
+
   // Called from FSEventsCallback whenever there is a change to the paths.
   void OnFilePathsChanged(const std::vector<FilePath>& paths);
 
@@ -53,6 +53,9 @@
                       const FilePath& target,
                       const FilePath& resolved_target);
 
+  // Cleans up and stops the event stream.
+  void CancelOnMessageLoopThread() override;
+
   // (Re-)Initialize the event stream to start reporting events from
   // |start_event|.
   void UpdateEventStream(FSEventStreamEventId start_event);
@@ -89,8 +92,6 @@
   // (Only accessed from the libdispatch queue.)
   FSEventStreamRef fsevent_stream_;
 
-  WeakPtrFactory<FilePathWatcherFSEvents> weak_factory_;
-
   DISALLOW_COPY_AND_ASSIGN(FilePathWatcherFSEvents);
 };
 
diff --git a/base/files/file_path_watcher_kqueue.cc b/base/files/file_path_watcher_kqueue.cc
index a28726a..6d034cd 100644
--- a/base/files/file_path_watcher_kqueue.cc
+++ b/base/files/file_path_watcher_kqueue.cc
@@ -12,7 +12,7 @@
 #include "base/files/file_util.h"
 #include "base/logging.h"
 #include "base/strings/stringprintf.h"
-#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/threading/thread_task_runner_handle.h"
 
 // On some platforms these are not defined.
 #if !defined(EV_RECEIPT)
@@ -26,9 +26,7 @@
 
 FilePathWatcherKQueue::FilePathWatcherKQueue() : kqueue_(-1) {}
 
-FilePathWatcherKQueue::~FilePathWatcherKQueue() {
-  DCHECK(!task_runner() || task_runner()->RunsTasksOnCurrentThread());
-}
+FilePathWatcherKQueue::~FilePathWatcherKQueue() {}
 
 void FilePathWatcherKQueue::ReleaseEvent(struct kevent& event) {
   CloseFileDescriptor(&event.ident);
@@ -38,6 +36,7 @@
 }
 
 int FilePathWatcherKQueue::EventsForPath(FilePath path, EventVector* events) {
+  DCHECK(MessageLoopForIO::current());
   // Make sure that we are working with a clean slate.
   DCHECK(events->empty());
 
@@ -231,74 +230,9 @@
   return true;
 }
 
-bool FilePathWatcherKQueue::Watch(const FilePath& path,
-                                  bool recursive,
-                                  const FilePathWatcher::Callback& callback) {
-  DCHECK(target_.value().empty());  // Can only watch one path.
-  DCHECK(!callback.is_null());
-  DCHECK_EQ(kqueue_, -1);
-  // Recursive watch is not supported using kqueue.
-  DCHECK(!recursive);
-
-  callback_ = callback;
-  target_ = path;
-
-  set_task_runner(SequencedTaskRunnerHandle::Get());
-
-  kqueue_ = kqueue();
-  if (kqueue_ == -1) {
-    DPLOG(ERROR) << "kqueue";
-    return false;
-  }
-
-  int last_entry = EventsForPath(target_, &events_);
-  DCHECK_NE(last_entry, 0);
-
-  EventVector responses(last_entry);
-
-  int count = HANDLE_EINTR(kevent(kqueue_, &events_[0], last_entry,
-                                  &responses[0], last_entry, NULL));
-  if (!AreKeventValuesValid(&responses[0], count)) {
-    // Calling Cancel() here to close any file descriptors that were opened.
-    // This would happen in the destructor anyways, but FilePathWatchers tend to
-    // be long lived, and if an error has occurred, there is no reason to waste
-    // the file descriptors.
-    Cancel();
-    return false;
-  }
-
-  // It's safe to use Unretained() because the watch is cancelled and the
-  // callback cannot be invoked after |kqueue_watch_controller_| (which is a
-  // member of |this|) has been deleted.
-  kqueue_watch_controller_ = FileDescriptorWatcher::WatchReadable(
-      kqueue_,
-      Bind(&FilePathWatcherKQueue::OnKQueueReadable, Unretained(this)));
-
-  return true;
-}
-
-void FilePathWatcherKQueue::Cancel() {
-  if (!task_runner()) {
-    set_cancelled();
-    return;
-  }
-
-  DCHECK(task_runner()->RunsTasksOnCurrentThread());
-  if (!is_cancelled()) {
-    set_cancelled();
-    kqueue_watch_controller_.reset();
-    if (IGNORE_EINTR(close(kqueue_)) != 0) {
-      DPLOG(ERROR) << "close kqueue";
-    }
-    kqueue_ = -1;
-    std::for_each(events_.begin(), events_.end(), ReleaseEvent);
-    events_.clear();
-    callback_.Reset();
-  }
-}
-
-void FilePathWatcherKQueue::OnKQueueReadable() {
-  DCHECK(task_runner()->RunsTasksOnCurrentThread());
+void FilePathWatcherKQueue::OnFileCanReadWithoutBlocking(int fd) {
+  DCHECK(MessageLoopForIO::current());
+  DCHECK_EQ(fd, kqueue_);
   DCHECK(events_.size());
 
   // Request the file system update notifications that have occurred and return
@@ -369,4 +303,89 @@
   }
 }
 
+void FilePathWatcherKQueue::OnFileCanWriteWithoutBlocking(int /* fd */) {
+  NOTREACHED();
+}
+
+void FilePathWatcherKQueue::WillDestroyCurrentMessageLoop() {
+  CancelOnMessageLoopThread();
+}
+
+bool FilePathWatcherKQueue::Watch(const FilePath& path,
+                                  bool recursive,
+                                  const FilePathWatcher::Callback& callback) {
+  DCHECK(MessageLoopForIO::current());
+  DCHECK(target_.value().empty());  // Can only watch one path.
+  DCHECK(!callback.is_null());
+  DCHECK_EQ(kqueue_, -1);
+
+  if (recursive) {
+    // Recursive watch is not supported using kqueue.
+    NOTIMPLEMENTED();
+    return false;
+  }
+
+  callback_ = callback;
+  target_ = path;
+
+  MessageLoop::current()->AddDestructionObserver(this);
+  io_task_runner_ = ThreadTaskRunnerHandle::Get();
+
+  kqueue_ = kqueue();
+  if (kqueue_ == -1) {
+    DPLOG(ERROR) << "kqueue";
+    return false;
+  }
+
+  int last_entry = EventsForPath(target_, &events_);
+  DCHECK_NE(last_entry, 0);
+
+  EventVector responses(last_entry);
+
+  int count = HANDLE_EINTR(kevent(kqueue_, &events_[0], last_entry,
+                                  &responses[0], last_entry, NULL));
+  if (!AreKeventValuesValid(&responses[0], count)) {
+    // Calling Cancel() here to close any file descriptors that were opened.
+    // This would happen in the destructor anyways, but FilePathWatchers tend to
+    // be long lived, and if an error has occurred, there is no reason to waste
+    // the file descriptors.
+    Cancel();
+    return false;
+  }
+
+  return MessageLoopForIO::current()->WatchFileDescriptor(
+      kqueue_, true, MessageLoopForIO::WATCH_READ, &kqueue_watcher_, this);
+}
+
+void FilePathWatcherKQueue::Cancel() {
+  SingleThreadTaskRunner* task_runner = io_task_runner_.get();
+  if (!task_runner) {
+    set_cancelled();
+    return;
+  }
+  if (!task_runner->BelongsToCurrentThread()) {
+    task_runner->PostTask(FROM_HERE,
+                          base::Bind(&FilePathWatcherKQueue::Cancel, this));
+    return;
+  }
+  CancelOnMessageLoopThread();
+}
+
+void FilePathWatcherKQueue::CancelOnMessageLoopThread() {
+  DCHECK(MessageLoopForIO::current());
+  if (!is_cancelled()) {
+    set_cancelled();
+    kqueue_watcher_.StopWatchingFileDescriptor();
+    if (IGNORE_EINTR(close(kqueue_)) != 0) {
+      DPLOG(ERROR) << "close kqueue";
+    }
+    kqueue_ = -1;
+    std::for_each(events_.begin(), events_.end(), ReleaseEvent);
+    events_.clear();
+    io_task_runner_ = NULL;
+    MessageLoop::current()->RemoveDestructionObserver(this);
+    callback_.Reset();
+  }
+}
+
 }  // namespace base
diff --git a/base/files/file_path_watcher_kqueue.h b/base/files/file_path_watcher_kqueue.h
index ef79be5..d9db8c2 100644
--- a/base/files/file_path_watcher_kqueue.h
+++ b/base/files/file_path_watcher_kqueue.h
@@ -6,14 +6,13 @@
 #define BASE_FILES_FILE_PATH_WATCHER_KQUEUE_H_
 
 #include <sys/event.h>
-
-#include <memory>
 #include <vector>
 
-#include "base/files/file_descriptor_watcher_posix.h"
 #include "base/files/file_path.h"
 #include "base/files/file_path_watcher.h"
 #include "base/macros.h"
+#include "base/message_loop/message_loop.h"
+#include "base/single_thread_task_runner.h"
 
 namespace base {
 
@@ -28,10 +27,18 @@
 // detect the creation and deletion of files, just not the modification of
 // files. It does however detect the attribute changes that the FSEvents impl
 // would miss.
-class FilePathWatcherKQueue : public FilePathWatcher::PlatformDelegate {
+class FilePathWatcherKQueue : public FilePathWatcher::PlatformDelegate,
+                              public MessageLoopForIO::Watcher,
+                              public MessageLoop::DestructionObserver {
  public:
   FilePathWatcherKQueue();
-  ~FilePathWatcherKQueue() override;
+
+  // MessageLoopForIO::Watcher overrides.
+  void OnFileCanReadWithoutBlocking(int fd) override;
+  void OnFileCanWriteWithoutBlocking(int fd) override;
+
+  // MessageLoop::DestructionObserver overrides.
+  void WillDestroyCurrentMessageLoop() override;
 
   // FilePathWatcher::PlatformDelegate overrides.
   bool Watch(const FilePath& path,
@@ -39,6 +46,9 @@
              const FilePathWatcher::Callback& callback) override;
   void Cancel() override;
 
+ protected:
+  ~FilePathWatcherKQueue() override;
+
  private:
   class EventData {
    public:
@@ -50,8 +60,8 @@
 
   typedef std::vector<struct kevent> EventVector;
 
-  // Called when data is available in |kqueue_|.
-  void OnKQueueReadable();
+  // Can only be called on |io_task_runner_|'s thread.
+  void CancelOnMessageLoopThread() override;
 
   // Returns true if the kevent values are error free.
   bool AreKeventValuesValid(struct kevent* kevents, int count);
@@ -109,14 +119,12 @@
   }
 
   EventVector events_;
+  scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
+  MessageLoopForIO::FileDescriptorWatcher kqueue_watcher_;
   FilePathWatcher::Callback callback_;
   FilePath target_;
   int kqueue_;
 
-  // Throughout the lifetime of this, OnKQueueReadable() will be called when
-  // data is available in |kqueue_|.
-  std::unique_ptr<FileDescriptorWatcher::Controller> kqueue_watch_controller_;
-
   DISALLOW_COPY_AND_ASSIGN(FilePathWatcherKQueue);
 };
 
diff --git a/base/files/file_path_watcher_linux.cc b/base/files/file_path_watcher_linux.cc
index 1dc833d..87bddd3 100644
--- a/base/files/file_path_watcher_linux.cc
+++ b/base/files/file_path_watcher_linux.cc
@@ -28,14 +28,12 @@
 #include "base/location.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/ptr_util.h"
-#include "base/memory/weak_ptr.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/single_thread_task_runner.h"
 #include "base/stl_util.h"
 #include "base/synchronization/lock.h"
-#include "base/threading/sequenced_task_runner_handle.h"
 #include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "base/trace_event/trace_event.h"
 
 namespace base {
@@ -63,14 +61,12 @@
   void OnInotifyEvent(const inotify_event* event);
 
  private:
-  friend struct LazyInstanceTraitsBase<InotifyReader>;
+  friend struct DefaultLazyInstanceTraits<InotifyReader>;
 
   typedef std::set<FilePathWatcherImpl*> WatcherSet;
 
   InotifyReader();
-  // There is no destructor because |g_inotify_reader| is a
-  // base::LazyInstace::Leaky object. Having a destructor causes build
-  // issues with GCC 6 (http://crbug.com/636346).
+  ~InotifyReader();
 
   // We keep track of which delegates want to be notified on which watches.
   hash_map<Watch, WatcherSet> watchers_;
@@ -84,16 +80,19 @@
   // File descriptor returned by inotify_init.
   const int inotify_fd_;
 
+  // Use self-pipe trick to unblock select during shutdown.
+  int shutdown_pipe_[2];
+
   // Flag set to true when startup was successful.
   bool valid_;
 
   DISALLOW_COPY_AND_ASSIGN(InotifyReader);
 };
 
-class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate {
+class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate,
+                            public MessageLoop::DestructionObserver {
  public:
   FilePathWatcherImpl();
-  ~FilePathWatcherImpl() override;
 
   // Called for each event coming from the watch. |fired_watch| identifies the
   // watch that fired, |child| indicates what has changed, and is relative to
@@ -108,13 +107,10 @@
                          bool deleted,
                          bool is_dir);
 
- private:
-  void OnFilePathChangedOnOriginSequence(InotifyReader::Watch fired_watch,
-                                         const FilePath::StringType& child,
-                                         bool created,
-                                         bool deleted,
-                                         bool is_dir);
+ protected:
+  ~FilePathWatcherImpl() override {}
 
+ private:
   // Start watching |path| for changes and notify |delegate| on each change.
   // Returns true if watch for |path| has been added successfully.
   bool Watch(const FilePath& path,
@@ -124,6 +120,14 @@
   // Cancel the watch. This unregisters the instance with InotifyReader.
   void Cancel() override;
 
+  // Cleans up and stops observing the message_loop() thread.
+  void CancelOnMessageLoopThread() override;
+
+  // Deletion of the FilePathWatcher will call Cancel() to dispose of this
+  // object in the right thread. This also observes destruction of the required
+  // cleanup thread, in case it quits before Cancel() is called.
+  void WillDestroyCurrentMessageLoop() override;
+
   // Inotify watches are installed for all directory components of |target_|.
   // A WatchEntry instance holds:
   // - |watch|: the watch descriptor for a component.
@@ -187,15 +191,16 @@
   hash_map<InotifyReader::Watch, FilePath> recursive_paths_by_watch_;
   std::map<FilePath, InotifyReader::Watch> recursive_watches_by_path_;
 
-  WeakPtrFactory<FilePathWatcherImpl> weak_factory_;
-
   DISALLOW_COPY_AND_ASSIGN(FilePathWatcherImpl);
 };
 
-void InotifyReaderCallback(InotifyReader* reader, int inotify_fd) {
+void InotifyReaderCallback(InotifyReader* reader, int inotify_fd,
+                           int shutdown_fd) {
   // Make sure the file descriptors are good for use with select().
   CHECK_LE(0, inotify_fd);
   CHECK_GT(FD_SETSIZE, inotify_fd);
+  CHECK_LE(0, shutdown_fd);
+  CHECK_GT(FD_SETSIZE, shutdown_fd);
 
   trace_event::TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop();
 
@@ -203,15 +208,20 @@
     fd_set rfds;
     FD_ZERO(&rfds);
     FD_SET(inotify_fd, &rfds);
+    FD_SET(shutdown_fd, &rfds);
 
     // Wait until some inotify events are available.
     int select_result =
-      HANDLE_EINTR(select(inotify_fd + 1, &rfds, NULL, NULL, NULL));
+      HANDLE_EINTR(select(std::max(inotify_fd, shutdown_fd) + 1,
+                          &rfds, NULL, NULL, NULL));
     if (select_result < 0) {
       DPLOG(WARNING) << "select failed";
       return;
     }
 
+    if (FD_ISSET(shutdown_fd, &rfds))
+      return;
+
     // Adjust buffer size to current event queue size.
     int buffer_size;
     int ioctl_result = HANDLE_EINTR(ioctl(inotify_fd, FIONREAD,
@@ -253,14 +263,33 @@
   if (inotify_fd_ < 0)
     PLOG(ERROR) << "inotify_init() failed";
 
-  if (inotify_fd_ >= 0 && thread_.Start()) {
+  shutdown_pipe_[0] = -1;
+  shutdown_pipe_[1] = -1;
+  if (inotify_fd_ >= 0 && pipe(shutdown_pipe_) == 0 && thread_.Start()) {
     thread_.task_runner()->PostTask(
         FROM_HERE,
-        Bind(&InotifyReaderCallback, this, inotify_fd_));
+        Bind(&InotifyReaderCallback, this, inotify_fd_, shutdown_pipe_[0]));
     valid_ = true;
   }
 }
 
+InotifyReader::~InotifyReader() {
+  if (valid_) {
+    // Write to the self-pipe so that the select call in InotifyReaderTask
+    // returns.
+    ssize_t ret = HANDLE_EINTR(write(shutdown_pipe_[1], "", 1));
+    DPCHECK(ret > 0);
+    DCHECK_EQ(ret, 1);
+    thread_.Stop();
+  }
+  if (inotify_fd_ >= 0)
+    close(inotify_fd_);
+  if (shutdown_pipe_[0] >= 0)
+    close(shutdown_pipe_[0]);
+  if (shutdown_pipe_[1] >= 0)
+    close(shutdown_pipe_[1]);
+}
+
 InotifyReader::Watch InotifyReader::AddWatch(
     const FilePath& path, FilePathWatcherImpl* watcher) {
   if (!valid_)
@@ -314,10 +343,7 @@
 }
 
 FilePathWatcherImpl::FilePathWatcherImpl()
-    : recursive_(false), weak_factory_(this) {}
-
-FilePathWatcherImpl::~FilePathWatcherImpl() {
-  DCHECK(!task_runner() || task_runner()->RunsTasksOnCurrentThread());
+    : recursive_(false) {
 }
 
 void FilePathWatcherImpl::OnFilePathChanged(InotifyReader::Watch fired_watch,
@@ -325,25 +351,22 @@
                                             bool created,
                                             bool deleted,
                                             bool is_dir) {
-  DCHECK(!task_runner()->RunsTasksOnCurrentThread());
+  if (!task_runner()->BelongsToCurrentThread()) {
+    // Switch to task_runner() to access |watches_| safely.
+    task_runner()->PostTask(FROM_HERE,
+                            Bind(&FilePathWatcherImpl::OnFilePathChanged, this,
+                                 fired_watch, child, created, deleted, is_dir));
+    return;
+  }
 
-  // This method is invoked on the Inotify thread. Switch to task_runner() to
-  // access |watches_| safely. Use a WeakPtr to prevent the callback from
-  // running after |this| is destroyed (i.e. after the watch is cancelled).
-  task_runner()->PostTask(
-      FROM_HERE, Bind(&FilePathWatcherImpl::OnFilePathChangedOnOriginSequence,
-                      weak_factory_.GetWeakPtr(), fired_watch, child, created,
-                      deleted, is_dir));
-}
+  // Check to see if CancelOnMessageLoopThread() has already been called.
+  // May happen when code flow reaches here from the PostTask() above.
+  if (watches_.empty()) {
+    DCHECK(target_.empty());
+    return;
+  }
 
-void FilePathWatcherImpl::OnFilePathChangedOnOriginSequence(
-    InotifyReader::Watch fired_watch,
-    const FilePath::StringType& child,
-    bool created,
-    bool deleted,
-    bool is_dir) {
-  DCHECK(task_runner()->RunsTasksOnCurrentThread());
-  DCHECK(!watches_.empty());
+  DCHECK(MessageLoopForIO::current());
   DCHECK(HasValidWatchVector());
 
   // Used below to avoid multiple recursive updates.
@@ -428,11 +451,13 @@
                                 bool recursive,
                                 const FilePathWatcher::Callback& callback) {
   DCHECK(target_.empty());
+  DCHECK(MessageLoopForIO::current());
 
-  set_task_runner(SequencedTaskRunnerHandle::Get());
+  set_task_runner(ThreadTaskRunnerHandle::Get());
   callback_ = callback;
   target_ = path;
   recursive_ = recursive;
+  MessageLoop::current()->AddDestructionObserver(this);
 
   std::vector<FilePath::StringType> comps;
   target_.GetComponents(&comps);
@@ -445,29 +470,47 @@
 }
 
 void FilePathWatcherImpl::Cancel() {
-  if (!callback_) {
-    // Watch() was never called.
+  if (callback_.is_null()) {
+    // Watch was never called, or the message_loop() thread is already gone.
     set_cancelled();
     return;
   }
 
-  DCHECK(task_runner()->RunsTasksOnCurrentThread());
-  DCHECK(!is_cancelled());
+  // Switch to the message_loop() if necessary so we can access |watches_|.
+  if (!task_runner()->BelongsToCurrentThread()) {
+    task_runner()->PostTask(FROM_HERE, Bind(&FilePathWatcher::CancelWatch,
+                                            make_scoped_refptr(this)));
+  } else {
+    CancelOnMessageLoopThread();
+  }
+}
 
+void FilePathWatcherImpl::CancelOnMessageLoopThread() {
+  DCHECK(task_runner()->BelongsToCurrentThread());
   set_cancelled();
-  callback_.Reset();
+
+  if (!callback_.is_null()) {
+    MessageLoop::current()->RemoveDestructionObserver(this);
+    callback_.Reset();
+  }
 
   for (size_t i = 0; i < watches_.size(); ++i)
     g_inotify_reader.Get().RemoveWatch(watches_[i].watch, this);
   watches_.clear();
   target_.clear();
-  RemoveRecursiveWatches();
+
+  if (recursive_)
+    RemoveRecursiveWatches();
+}
+
+void FilePathWatcherImpl::WillDestroyCurrentMessageLoop() {
+  CancelOnMessageLoopThread();
 }
 
 void FilePathWatcherImpl::UpdateWatches() {
-  // Ensure this runs on the task_runner() exclusively in order to avoid
+  // Ensure this runs on the message_loop() exclusively in order to avoid
   // concurrency issues.
-  DCHECK(task_runner()->RunsTasksOnCurrentThread());
+  DCHECK(task_runner()->BelongsToCurrentThread());
   DCHECK(HasValidWatchVector());
 
   // Walk the list of watches and update them as we go.
@@ -498,8 +541,6 @@
 void FilePathWatcherImpl::UpdateRecursiveWatches(
     InotifyReader::Watch fired_watch,
     bool is_dir) {
-  DCHECK(HasValidWatchVector());
-
   if (!recursive_)
     return;
 
@@ -510,8 +551,7 @@
 
   // Check to see if this is a forced update or if some component of |target_|
   // has changed. For these cases, redo the watches for |target_| and below.
-  if (!ContainsKey(recursive_paths_by_watch_, fired_watch) &&
-      fired_watch != watches_.back().watch) {
+  if (!ContainsKey(recursive_paths_by_watch_, fired_watch)) {
     UpdateRecursiveWatchesForPath(target_);
     return;
   }
@@ -520,10 +560,7 @@
   if (!is_dir)
     return;
 
-  const FilePath& changed_dir =
-      ContainsKey(recursive_paths_by_watch_, fired_watch) ?
-      recursive_paths_by_watch_[fired_watch] :
-      target_;
+  const FilePath& changed_dir = recursive_paths_by_watch_[fired_watch];
 
   std::map<FilePath, InotifyReader::Watch>::iterator start_it =
       recursive_watches_by_path_.lower_bound(changed_dir);
@@ -646,8 +683,7 @@
 }  // namespace
 
 FilePathWatcher::FilePathWatcher() {
-  sequence_checker_.DetachFromSequence();
-  impl_ = MakeUnique<FilePathWatcherImpl>();
+  impl_ = new FilePathWatcherImpl();
 }
 
 }  // namespace base
diff --git a/base/files/file_path_watcher_mac.cc b/base/files/file_path_watcher_mac.cc
new file mode 100644
index 0000000..7338eaf
--- /dev/null
+++ b/base/files/file_path_watcher_mac.cc
@@ -0,0 +1,61 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/file_path_watcher.h"
+#include "base/files/file_path_watcher_kqueue.h"
+#include "build/build_config.h"
+
+#if !defined(OS_IOS)
+#include "base/files/file_path_watcher_fsevents.h"
+#endif
+
+namespace base {
+
+namespace {
+
+class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate {
+ public:
+  bool Watch(const FilePath& path,
+             bool recursive,
+             const FilePathWatcher::Callback& callback) override {
+    // Use kqueue for non-recursive watches and FSEvents for recursive ones.
+    DCHECK(!impl_.get());
+    if (recursive) {
+      if (!FilePathWatcher::RecursiveWatchAvailable())
+        return false;
+#if !defined(OS_IOS)
+      impl_ = new FilePathWatcherFSEvents();
+#endif  // OS_IOS
+    } else {
+      impl_ = new FilePathWatcherKQueue();
+    }
+    DCHECK(impl_.get());
+    return impl_->Watch(path, recursive, callback);
+  }
+
+  void Cancel() override {
+    if (impl_.get())
+      impl_->Cancel();
+    set_cancelled();
+  }
+
+  void CancelOnMessageLoopThread() override {
+    if (impl_.get())
+      impl_->Cancel();
+    set_cancelled();
+  }
+
+ protected:
+  ~FilePathWatcherImpl() override {}
+
+  scoped_refptr<PlatformDelegate> impl_;
+};
+
+}  // namespace
+
+FilePathWatcher::FilePathWatcher() {
+  impl_ = new FilePathWatcherImpl();
+}
+
+}  // namespace base
diff --git a/base/files/file_path_watcher_unittest.cc b/base/files/file_path_watcher_unittest.cc
index d2ec37b..a40e485 100644
--- a/base/files/file_path_watcher_unittest.cc
+++ b/base/files/file_path_watcher_unittest.cc
@@ -28,6 +28,7 @@
 #include "base/synchronization/waitable_event.h"
 #include "base/test/test_file_util.h"
 #include "base/test/test_timeouts.h"
+#include "base/threading/thread.h"
 #include "base/threading/thread_task_runner_handle.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -36,10 +37,6 @@
 #include "base/android/path_utils.h"
 #endif  // defined(OS_ANDROID)
 
-#if defined(OS_POSIX)
-#include "base/files/file_descriptor_watcher_posix.h"
-#endif  // defined(OS_POSIX)
-
 namespace base {
 
 namespace {
@@ -134,19 +131,30 @@
   DISALLOW_COPY_AND_ASSIGN(TestDelegate);
 };
 
+void SetupWatchCallback(const FilePath& target,
+                        FilePathWatcher* watcher,
+                        TestDelegateBase* delegate,
+                        bool recursive_watch,
+                        bool* result,
+                        base::WaitableEvent* completion) {
+  *result = watcher->Watch(target, recursive_watch,
+                           base::Bind(&TestDelegateBase::OnFileChanged,
+                                      delegate->AsWeakPtr()));
+  completion->Signal();
+}
+
 class FilePathWatcherTest : public testing::Test {
  public:
   FilePathWatcherTest()
-#if defined(OS_POSIX)
-      : file_descriptor_watcher_(&loop_)
-#endif
-  {
-  }
+      : file_thread_("FilePathWatcherTest") {}
 
   ~FilePathWatcherTest() override {}
 
  protected:
   void SetUp() override {
+    // Create a separate file thread in order to test proper thread usage.
+    base::Thread::Options options(MessageLoop::TYPE_IO, 0);
+    ASSERT_TRUE(file_thread_.StartWithOptions(options));
 #if defined(OS_ANDROID)
     // Watching files is only permitted when all parent directories are
     // accessible, which is not the case for the default temp directory
@@ -163,12 +171,16 @@
 
   void TearDown() override { RunLoop().RunUntilIdle(); }
 
+  void DeleteDelegateOnFileThread(TestDelegate* delegate) {
+    file_thread_.task_runner()->DeleteSoon(FROM_HERE, delegate);
+  }
+
   FilePath test_file() {
-    return temp_dir_.GetPath().AppendASCII("FilePathWatcherTest");
+    return temp_dir_.path().AppendASCII("FilePathWatcherTest");
   }
 
   FilePath test_link() {
-    return temp_dir_.GetPath().AppendASCII("FilePathWatcherTest.lnk");
+    return temp_dir_.path().AppendASCII("FilePathWatcherTest.lnk");
   }
 
   // Write |content| to |file|. Returns true on success.
@@ -184,23 +196,18 @@
 
   bool WaitForEvents() WARN_UNUSED_RESULT {
     collector_->Reset();
-
-    RunLoop run_loop;
     // Make sure we timeout if we don't get notified.
-    ThreadTaskRunnerHandle::Get()->PostDelayedTask(
-        FROM_HERE, run_loop.QuitWhenIdleClosure(),
-        TestTimeouts::action_timeout());
-    run_loop.Run();
+    loop_.PostDelayedTask(FROM_HERE,
+                          MessageLoop::QuitWhenIdleClosure(),
+                          TestTimeouts::action_timeout());
+    RunLoop().Run();
     return collector_->Success();
   }
 
   NotificationCollector* collector() { return collector_.get(); }
 
-  MessageLoopForIO loop_;
-#if defined(OS_POSIX)
-  FileDescriptorWatcher file_descriptor_watcher_;
-#endif
-
+  MessageLoop loop_;
+  base::Thread file_thread_;
   ScopedTempDir temp_dir_;
   scoped_refptr<NotificationCollector> collector_;
 
@@ -212,9 +219,14 @@
                                      FilePathWatcher* watcher,
                                      TestDelegateBase* delegate,
                                      bool recursive_watch) {
-  return watcher->Watch(
-      target, recursive_watch,
-      base::Bind(&TestDelegateBase::OnFileChanged, delegate->AsWeakPtr()));
+  base::WaitableEvent completion(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                 WaitableEvent::InitialState::NOT_SIGNALED);
+  bool result;
+  file_thread_.task_runner()->PostTask(
+      FROM_HERE, base::Bind(SetupWatchCallback, target, watcher, delegate,
+                            recursive_watch, &result, &completion));
+  completion.Wait();
+  return result;
 }
 
 // Basic test: Create the file and verify that we notice.
@@ -225,6 +237,7 @@
 
   ASSERT_TRUE(WriteFile(test_file(), "content"));
   ASSERT_TRUE(WaitForEvents());
+  DeleteDelegateOnFileThread(delegate.release());
 }
 
 // Verify that modifying the file is caught.
@@ -238,11 +251,12 @@
   // Now make sure we get notified if the file is modified.
   ASSERT_TRUE(WriteFile(test_file(), "new content"));
   ASSERT_TRUE(WaitForEvents());
+  DeleteDelegateOnFileThread(delegate.release());
 }
 
 // Verify that moving the file into place is caught.
 TEST_F(FilePathWatcherTest, MovedFile) {
-  FilePath source_file(temp_dir_.GetPath().AppendASCII("source"));
+  FilePath source_file(temp_dir_.path().AppendASCII("source"));
   ASSERT_TRUE(WriteFile(source_file, "content"));
 
   FilePathWatcher watcher;
@@ -252,6 +266,7 @@
   // Now make sure we get notified if the file is modified.
   ASSERT_TRUE(base::Move(source_file, test_file()));
   ASSERT_TRUE(WaitForEvents());
+  DeleteDelegateOnFileThread(delegate.release());
 }
 
 TEST_F(FilePathWatcherTest, DeletedFile) {
@@ -264,6 +279,7 @@
   // Now make sure we get notified if the file is deleted.
   base::DeleteFile(test_file(), false);
   ASSERT_TRUE(WaitForEvents());
+  DeleteDelegateOnFileThread(delegate.release());
 }
 
 // Used by the DeleteDuringNotify test below.
@@ -311,9 +327,11 @@
 // Flaky on MacOS (and ARM linux): http://crbug.com/85930
 TEST_F(FilePathWatcherTest, DISABLED_DestroyWithPendingNotification) {
   std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
-  FilePathWatcher watcher;
-  ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
+  FilePathWatcher* watcher = new FilePathWatcher;
+  ASSERT_TRUE(SetupWatch(test_file(), watcher, delegate.get(), false));
   ASSERT_TRUE(WriteFile(test_file(), "content"));
+  file_thread_.task_runner()->DeleteSoon(FROM_HERE, watcher);
+  DeleteDelegateOnFileThread(delegate.release());
 }
 
 TEST_F(FilePathWatcherTest, MultipleWatchersSingleFile) {
@@ -325,13 +343,15 @@
 
   ASSERT_TRUE(WriteFile(test_file(), "content"));
   ASSERT_TRUE(WaitForEvents());
+  DeleteDelegateOnFileThread(delegate1.release());
+  DeleteDelegateOnFileThread(delegate2.release());
 }
 
 // Verify that watching a file whose parent directory doesn't exist yet works if
 // the directory and file are created eventually.
 TEST_F(FilePathWatcherTest, NonExistentDirectory) {
   FilePathWatcher watcher;
-  FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
+  FilePath dir(temp_dir_.path().AppendASCII("dir"));
   FilePath file(dir.AppendASCII("file"));
   std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(file, &watcher, delegate.get(), false));
@@ -350,12 +370,13 @@
   ASSERT_TRUE(base::DeleteFile(file, false));
   VLOG(1) << "Waiting for file deletion";
   ASSERT_TRUE(WaitForEvents());
+  DeleteDelegateOnFileThread(delegate.release());
 }
 
 // Exercises watch reconfiguration for the case that directories on the path
 // are rapidly created.
 TEST_F(FilePathWatcherTest, DirectoryChain) {
-  FilePath path(temp_dir_.GetPath());
+  FilePath path(temp_dir_.path());
   std::vector<std::string> dir_names;
   for (int i = 0; i < 20; i++) {
     std::string dir(base::StringPrintf("d%d", i));
@@ -368,7 +389,7 @@
   std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(file, &watcher, delegate.get(), false));
 
-  FilePath sub_path(temp_dir_.GetPath());
+  FilePath sub_path(temp_dir_.path());
   for (std::vector<std::string>::const_iterator d(dir_names.begin());
        d != dir_names.end(); ++d) {
     sub_path = sub_path.AppendASCII(*d);
@@ -382,6 +403,7 @@
   ASSERT_TRUE(WriteFile(file, "content v2"));
   VLOG(1) << "Waiting for file modification";
   ASSERT_TRUE(WaitForEvents());
+  DeleteDelegateOnFileThread(delegate.release());
 }
 
 #if defined(OS_MACOSX)
@@ -390,7 +412,7 @@
 #endif
 TEST_F(FilePathWatcherTest, DisappearingDirectory) {
   FilePathWatcher watcher;
-  FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
+  FilePath dir(temp_dir_.path().AppendASCII("dir"));
   FilePath file(dir.AppendASCII("file"));
   ASSERT_TRUE(base::CreateDirectory(dir));
   ASSERT_TRUE(WriteFile(file, "content"));
@@ -399,6 +421,7 @@
 
   ASSERT_TRUE(base::DeleteFile(dir, true));
   ASSERT_TRUE(WaitForEvents());
+  DeleteDelegateOnFileThread(delegate.release());
 }
 
 // Tests that a file that is deleted and reappears is tracked correctly.
@@ -415,11 +438,12 @@
   ASSERT_TRUE(WriteFile(test_file(), "content"));
   VLOG(1) << "Waiting for file creation";
   ASSERT_TRUE(WaitForEvents());
+  DeleteDelegateOnFileThread(delegate.release());
 }
 
 TEST_F(FilePathWatcherTest, WatchDirectory) {
   FilePathWatcher watcher;
-  FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
+  FilePath dir(temp_dir_.path().AppendASCII("dir"));
   FilePath file1(dir.AppendASCII("file1"));
   FilePath file2(dir.AppendASCII("file2"));
   std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
@@ -447,13 +471,14 @@
   ASSERT_TRUE(WriteFile(file2, "content"));
   VLOG(1) << "Waiting for file2 creation";
   ASSERT_TRUE(WaitForEvents());
+  DeleteDelegateOnFileThread(delegate.release());
 }
 
 TEST_F(FilePathWatcherTest, MoveParent) {
   FilePathWatcher file_watcher;
   FilePathWatcher subdir_watcher;
-  FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
-  FilePath dest(temp_dir_.GetPath().AppendASCII("dest"));
+  FilePath dir(temp_dir_.path().AppendASCII("dir"));
+  FilePath dest(temp_dir_.path().AppendASCII("dest"));
   FilePath subdir(dir.AppendASCII("subdir"));
   FilePath file(subdir.AppendASCII("file"));
   std::unique_ptr<TestDelegate> file_delegate(new TestDelegate(collector()));
@@ -472,15 +497,18 @@
   base::Move(dir, dest);
   VLOG(1) << "Waiting for directory move";
   ASSERT_TRUE(WaitForEvents());
+  DeleteDelegateOnFileThread(file_delegate.release());
+  DeleteDelegateOnFileThread(subdir_delegate.release());
 }
 
 TEST_F(FilePathWatcherTest, RecursiveWatch) {
   FilePathWatcher watcher;
-  FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
+  FilePath dir(temp_dir_.path().AppendASCII("dir"));
   std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   bool setup_result = SetupWatch(dir, &watcher, delegate.get(), true);
   if (!FilePathWatcher::RecursiveWatchAvailable()) {
     ASSERT_FALSE(setup_result);
+    DeleteDelegateOnFileThread(delegate.release());
     return;
   }
   ASSERT_TRUE(setup_result);
@@ -536,6 +564,7 @@
   // Delete "$dir/subdir/subdir_child_dir/child_dir_file1".
   ASSERT_TRUE(base::DeleteFile(child_dir_file1, false));
   ASSERT_TRUE(WaitForEvents());
+  DeleteDelegateOnFileThread(delegate.release());
 }
 
 #if defined(OS_POSIX)
@@ -552,14 +581,14 @@
     return;
 
   FilePathWatcher watcher;
-  FilePath test_dir(temp_dir_.GetPath().AppendASCII("test_dir"));
+  FilePath test_dir(temp_dir_.path().AppendASCII("test_dir"));
   ASSERT_TRUE(base::CreateDirectory(test_dir));
   FilePath symlink(test_dir.AppendASCII("symlink"));
   std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(symlink, &watcher, delegate.get(), true));
 
   // Link creation.
-  FilePath target1(temp_dir_.GetPath().AppendASCII("target1"));
+  FilePath target1(temp_dir_.path().AppendASCII("target1"));
   ASSERT_TRUE(base::CreateSymbolicLink(target1, symlink));
   ASSERT_TRUE(WaitForEvents());
 
@@ -573,7 +602,7 @@
   ASSERT_TRUE(WaitForEvents());
 
   // Link change.
-  FilePath target2(temp_dir_.GetPath().AppendASCII("target2"));
+  FilePath target2(temp_dir_.path().AppendASCII("target2"));
   ASSERT_TRUE(base::CreateDirectory(target2));
   ASSERT_TRUE(base::DeleteFile(symlink, false));
   ASSERT_TRUE(base::CreateSymbolicLink(target2, symlink));
@@ -583,16 +612,18 @@
   FilePath target2_file(target2.AppendASCII("file"));
   ASSERT_TRUE(WriteFile(target2_file, "content"));
   ASSERT_TRUE(WaitForEvents());
+
+  DeleteDelegateOnFileThread(delegate.release());
 }
 #endif  // OS_POSIX
 
 TEST_F(FilePathWatcherTest, MoveChild) {
   FilePathWatcher file_watcher;
   FilePathWatcher subdir_watcher;
-  FilePath source_dir(temp_dir_.GetPath().AppendASCII("source"));
+  FilePath source_dir(temp_dir_.path().AppendASCII("source"));
   FilePath source_subdir(source_dir.AppendASCII("subdir"));
   FilePath source_file(source_subdir.AppendASCII("file"));
-  FilePath dest_dir(temp_dir_.GetPath().AppendASCII("dest"));
+  FilePath dest_dir(temp_dir_.path().AppendASCII("dest"));
   FilePath dest_subdir(dest_dir.AppendASCII("subdir"));
   FilePath dest_file(dest_subdir.AppendASCII("file"));
 
@@ -609,6 +640,8 @@
   // Move the directory into place, s.t. the watched file appears.
   ASSERT_TRUE(base::Move(source_dir, dest_dir));
   ASSERT_TRUE(WaitForEvents());
+  DeleteDelegateOnFileThread(file_delegate.release());
+  DeleteDelegateOnFileThread(subdir_delegate.release());
 }
 
 // Verify that changing attributes on a file is caught
@@ -629,6 +662,7 @@
   // Now make sure we get notified if the file is modified.
   ASSERT_TRUE(base::MakeFileUnreadable(test_file()));
   ASSERT_TRUE(WaitForEvents());
+  DeleteDelegateOnFileThread(delegate.release());
 }
 
 #if defined(OS_LINUX)
@@ -644,6 +678,7 @@
   // Note that test_file() doesn't have to exist.
   ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
   ASSERT_TRUE(WaitForEvents());
+  DeleteDelegateOnFileThread(delegate.release());
 }
 
 // Verify that deleting a symlink is caught.
@@ -659,6 +694,7 @@
   // Now make sure we get notified if the link is deleted.
   ASSERT_TRUE(base::DeleteFile(test_link(), false));
   ASSERT_TRUE(WaitForEvents());
+  DeleteDelegateOnFileThread(delegate.release());
 }
 
 // Verify that modifying a target file that a link is pointing to
@@ -674,6 +710,7 @@
   // Now make sure we get notified if the file is modified.
   ASSERT_TRUE(WriteFile(test_file(), "new content"));
   ASSERT_TRUE(WaitForEvents());
+  DeleteDelegateOnFileThread(delegate.release());
 }
 
 // Verify that creating a target file that a link is pointing to
@@ -688,6 +725,7 @@
   // Now make sure we get notified if the target file is created.
   ASSERT_TRUE(WriteFile(test_file(), "content"));
   ASSERT_TRUE(WaitForEvents());
+  DeleteDelegateOnFileThread(delegate.release());
 }
 
 // Verify that deleting a target file that a link is pointing to
@@ -703,14 +741,15 @@
   // Now make sure we get notified if the target file is deleted.
   ASSERT_TRUE(base::DeleteFile(test_file(), false));
   ASSERT_TRUE(WaitForEvents());
+  DeleteDelegateOnFileThread(delegate.release());
 }
 
 // Verify that watching a file whose parent directory is a link that
 // doesn't exist yet works if the symlink is created eventually.
 TEST_F(FilePathWatcherTest, LinkedDirectoryPart1) {
   FilePathWatcher watcher;
-  FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
-  FilePath link_dir(temp_dir_.GetPath().AppendASCII("dir.lnk"));
+  FilePath dir(temp_dir_.path().AppendASCII("dir"));
+  FilePath link_dir(temp_dir_.path().AppendASCII("dir.lnk"));
   FilePath file(dir.AppendASCII("file"));
   FilePath linkfile(link_dir.AppendASCII("file"));
   std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
@@ -731,14 +770,15 @@
   ASSERT_TRUE(base::DeleteFile(file, false));
   VLOG(1) << "Waiting for file deletion";
   ASSERT_TRUE(WaitForEvents());
+  DeleteDelegateOnFileThread(delegate.release());
 }
 
 // Verify that watching a file whose parent directory is a
 // dangling symlink works if the directory is created eventually.
 TEST_F(FilePathWatcherTest, LinkedDirectoryPart2) {
   FilePathWatcher watcher;
-  FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
-  FilePath link_dir(temp_dir_.GetPath().AppendASCII("dir.lnk"));
+  FilePath dir(temp_dir_.path().AppendASCII("dir"));
+  FilePath link_dir(temp_dir_.path().AppendASCII("dir.lnk"));
   FilePath file(dir.AppendASCII("file"));
   FilePath linkfile(link_dir.AppendASCII("file"));
   std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
@@ -760,14 +800,15 @@
   ASSERT_TRUE(base::DeleteFile(file, false));
   VLOG(1) << "Waiting for file deletion";
   ASSERT_TRUE(WaitForEvents());
+  DeleteDelegateOnFileThread(delegate.release());
 }
 
 // Verify that watching a file with a symlink on the path
 // to the file works.
 TEST_F(FilePathWatcherTest, LinkedDirectoryPart3) {
   FilePathWatcher watcher;
-  FilePath dir(temp_dir_.GetPath().AppendASCII("dir"));
-  FilePath link_dir(temp_dir_.GetPath().AppendASCII("dir.lnk"));
+  FilePath dir(temp_dir_.path().AppendASCII("dir"));
+  FilePath link_dir(temp_dir_.path().AppendASCII("dir.lnk"));
   FilePath file(dir.AppendASCII("file"));
   FilePath linkfile(link_dir.AppendASCII("file"));
   std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
@@ -787,6 +828,7 @@
   ASSERT_TRUE(base::DeleteFile(file, false));
   VLOG(1) << "Waiting for file deletion";
   ASSERT_TRUE(WaitForEvents());
+  DeleteDelegateOnFileThread(delegate.release());
 }
 
 #endif  // OS_LINUX
@@ -837,8 +879,7 @@
 
 // Verify that changing attributes on a directory works.
 TEST_F(FilePathWatcherTest, DirAttributesChanged) {
-  FilePath test_dir1(
-      temp_dir_.GetPath().AppendASCII("DirAttributesChangedDir1"));
+  FilePath test_dir1(temp_dir_.path().AppendASCII("DirAttributesChangedDir1"));
   FilePath test_dir2(test_dir1.AppendASCII("DirAttributesChangedDir2"));
   FilePath test_file(test_dir2.AppendASCII("DirAttributesChangedFile"));
   // Setup a directory hierarchy.
@@ -864,6 +905,7 @@
   ASSERT_TRUE(ChangeFilePermissions(test_dir1, Execute, false));
   ASSERT_TRUE(WaitForEvents());
   ASSERT_TRUE(ChangeFilePermissions(test_dir1, Execute, true));
+  DeleteDelegateOnFileThread(delegate.release());
 }
 
 #endif  // OS_MACOSX
diff --git a/base/files/file_posix.cc b/base/files/file_posix.cc
index 2738d6c..12f80c4 100644
--- a/base/files/file_posix.cc
+++ b/base/files/file_posix.cc
@@ -11,7 +11,7 @@
 #include <unistd.h>
 
 #include "base/logging.h"
-#include "base/metrics/histogram_macros.h"
+#include "base/metrics/sparse_histogram.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/strings/utf_string_conversions.h"
 #include "base/threading/thread_restrictions.h"
@@ -323,7 +323,7 @@
 
   stat_wrapper_t file_info;
   if (CallFstat(file_.get(), &file_info))
-    return -1;
+    return false;
 
   return file_info.st_size;
 }
@@ -372,7 +372,7 @@
   return CallFcntlFlock(file_.get(), false);
 }
 
-File File::Duplicate() const {
+File File::Duplicate() {
   if (!IsValid())
     return File();
 
@@ -513,10 +513,9 @@
 }
 #endif  // !defined(OS_NACL)
 
-bool File::Flush() {
+bool File::DoFlush() {
   ThreadRestrictions::AssertIOAllowed();
   DCHECK(IsValid());
-  SCOPED_FILE_TRACE("Flush");
 
 #if defined(OS_NACL)
   NOTIMPLEMENTED();  // NaCl doesn't implement fsync.
diff --git a/base/files/file_tracing.cc b/base/files/file_tracing.cc
index 48f5741..6d11cbc 100644
--- a/base/files/file_tracing.cc
+++ b/base/files/file_tracing.cc
@@ -4,62 +4,47 @@
 
 #include "base/files/file_tracing.h"
 
-#include "base/atomicops.h"
 #include "base/files/file.h"
 
-using base::subtle::AtomicWord;
-
 namespace base {
 
 namespace {
-AtomicWord g_provider;
-}
-
-FileTracing::Provider* GetProvider() {
-  AtomicWord provider = base::subtle::Acquire_Load(&g_provider);
-  return reinterpret_cast<FileTracing::Provider*>(provider);
+FileTracing::Provider* g_provider = nullptr;
 }
 
 // static
 bool FileTracing::IsCategoryEnabled() {
-  FileTracing::Provider* provider = GetProvider();
-  return provider && provider->FileTracingCategoryIsEnabled();
+  return g_provider && g_provider->FileTracingCategoryIsEnabled();
 }
 
 // static
 void FileTracing::SetProvider(FileTracing::Provider* provider) {
-  base::subtle::Release_Store(&g_provider,
-                              reinterpret_cast<AtomicWord>(provider));
+  g_provider = provider;
 }
 
 FileTracing::ScopedEnabler::ScopedEnabler() {
-  FileTracing::Provider* provider = GetProvider();
-  if (provider)
-    provider->FileTracingEnable(this);
+  if (g_provider)
+    g_provider->FileTracingEnable(this);
 }
 
 FileTracing::ScopedEnabler::~ScopedEnabler() {
-  FileTracing::Provider* provider = GetProvider();
-  if (provider)
-    provider->FileTracingDisable(this);
+  if (g_provider)
+    g_provider->FileTracingDisable(this);
 }
 
 FileTracing::ScopedTrace::ScopedTrace() : id_(nullptr) {}
 
 FileTracing::ScopedTrace::~ScopedTrace() {
-  if (id_) {
-    FileTracing::Provider* provider = GetProvider();
-    if (provider)
-      provider->FileTracingEventEnd(name_, id_);
-  }
+  if (id_ && g_provider)
+    g_provider->FileTracingEventEnd(name_, id_);
 }
 
 void FileTracing::ScopedTrace::Initialize(const char* name,
-                                          const File* file,
+                                          File* file,
                                           int64_t size) {
   id_ = &file->trace_enabler_;
   name_ = name;
-  GetProvider()->FileTracingEventBegin(name_, id_, file->tracing_path_, size);
+  g_provider->FileTracingEventBegin(name_, id_, file->tracing_path_, size);
 }
 
 }  // namespace base
diff --git a/base/files/file_tracing.h b/base/files/file_tracing.h
index 1fbfcd4..bedd7be 100644
--- a/base/files/file_tracing.h
+++ b/base/files/file_tracing.h
@@ -37,21 +37,21 @@
     virtual bool FileTracingCategoryIsEnabled() const = 0;
 
     // Enables file tracing for |id|. Must be called before recording events.
-    virtual void FileTracingEnable(const void* id) = 0;
+    virtual void FileTracingEnable(void* id) = 0;
 
     // Disables file tracing for |id|.
-    virtual void FileTracingDisable(const void* id) = 0;
+    virtual void FileTracingDisable(void* id) = 0;
 
     // Begins an event for |id| with |name|. |path| tells where in the directory
     // structure the event is happening (and may be blank). |size| is the number
     // of bytes involved in the event.
     virtual void FileTracingEventBegin(const char* name,
-                                       const void* id,
+                                       void* id,
                                        const FilePath& path,
                                        int64_t size) = 0;
 
     // Ends an event for |id| with |name|.
-    virtual void FileTracingEventEnd(const char* name, const void* id) = 0;
+    virtual void FileTracingEventEnd(const char* name, void* id) = 0;
   };
 
   // Sets a global file tracing provider to query categories and record events.
@@ -73,12 +73,12 @@
     // event to trace (e.g. "Read", "Write") and must have an application
     // lifetime (e.g. static or literal). |file| is the file being traced; must
     // outlive this class. |size| is the size (in bytes) of this event.
-    void Initialize(const char* name, const File* file, int64_t size);
+    void Initialize(const char* name, File* file, int64_t size);
 
    private:
     // The ID of this trace. Based on the |file| passed to |Initialize()|. Must
     // outlive this class.
-    const void* id_;
+    void* id_;
 
     // The name of the event to trace (e.g. "Read", "Write"). Prefixed with
     // "File".
diff --git a/base/files/file_unittest.cc b/base/files/file_unittest.cc
index 66c312b..2445f7e 100644
--- a/base/files/file_unittest.cc
+++ b/base/files/file_unittest.cc
@@ -9,7 +9,6 @@
 #include <utility>
 
 #include "base/files/file_util.h"
-#include "base/files/memory_mapped_file.h"
 #include "base/files/scoped_temp_dir.h"
 #include "base/time/time.h"
 #include "build/build_config.h"
@@ -21,7 +20,7 @@
 TEST(FileTest, Create) {
   base::ScopedTempDir temp_dir;
   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  FilePath file_path = temp_dir.GetPath().AppendASCII("create_file_1");
+  FilePath file_path = temp_dir.path().AppendASCII("create_file_1");
 
   {
     // Don't create a File at all.
@@ -93,7 +92,7 @@
 
   {
     // Create a delete-on-close file.
-    file_path = temp_dir.GetPath().AppendASCII("create_file_2");
+    file_path = temp_dir.path().AppendASCII("create_file_2");
     File file(file_path,
               base::File::FLAG_OPEN_ALWAYS | base::File::FLAG_READ |
                   base::File::FLAG_DELETE_ON_CLOSE);
@@ -108,7 +107,7 @@
 TEST(FileTest, Async) {
   base::ScopedTempDir temp_dir;
   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  FilePath file_path = temp_dir.GetPath().AppendASCII("create_file");
+  FilePath file_path = temp_dir.path().AppendASCII("create_file");
 
   {
     File file(file_path, base::File::FLAG_OPEN_ALWAYS | base::File::FLAG_ASYNC);
@@ -126,7 +125,7 @@
 TEST(FileTest, DeleteOpenFile) {
   base::ScopedTempDir temp_dir;
   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  FilePath file_path = temp_dir.GetPath().AppendASCII("create_file_1");
+  FilePath file_path = temp_dir.path().AppendASCII("create_file_1");
 
   // Create a file.
   File file(file_path,
@@ -153,7 +152,7 @@
 TEST(FileTest, ReadWrite) {
   base::ScopedTempDir temp_dir;
   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  FilePath file_path = temp_dir.GetPath().AppendASCII("read_write_file");
+  FilePath file_path = temp_dir.path().AppendASCII("read_write_file");
   File file(file_path,
             base::File::FLAG_CREATE | base::File::FLAG_READ |
                 base::File::FLAG_WRITE);
@@ -225,7 +224,7 @@
 TEST(FileTest, Append) {
   base::ScopedTempDir temp_dir;
   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  FilePath file_path = temp_dir.GetPath().AppendASCII("append_file");
+  FilePath file_path = temp_dir.path().AppendASCII("append_file");
   File file(file_path, base::File::FLAG_CREATE | base::File::FLAG_APPEND);
   ASSERT_TRUE(file.IsValid());
 
@@ -273,7 +272,7 @@
 TEST(FileTest, Length) {
   base::ScopedTempDir temp_dir;
   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  FilePath file_path = temp_dir.GetPath().AppendASCII("truncate_file");
+  FilePath file_path = temp_dir.path().AppendASCII("truncate_file");
   File file(file_path,
             base::File::FLAG_CREATE | base::File::FLAG_READ |
                 base::File::FLAG_WRITE);
@@ -325,7 +324,7 @@
 #endif
   base::ScopedTempDir temp_dir;
   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  File file(temp_dir.GetPath().AppendASCII("touch_get_info_file"),
+  File file(temp_dir.path().AppendASCII("touch_get_info_file"),
             base::File::FLAG_CREATE | base::File::FLAG_WRITE |
                 base::File::FLAG_WRITE_ATTRIBUTES);
   ASSERT_TRUE(file.IsValid());
@@ -388,8 +387,7 @@
 TEST(FileTest, ReadAtCurrentPosition) {
   base::ScopedTempDir temp_dir;
   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  FilePath file_path =
-      temp_dir.GetPath().AppendASCII("read_at_current_position");
+  FilePath file_path = temp_dir.path().AppendASCII("read_at_current_position");
   File file(file_path,
             base::File::FLAG_CREATE | base::File::FLAG_READ |
                 base::File::FLAG_WRITE);
@@ -413,8 +411,7 @@
 TEST(FileTest, WriteAtCurrentPosition) {
   base::ScopedTempDir temp_dir;
   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  FilePath file_path =
-      temp_dir.GetPath().AppendASCII("write_at_current_position");
+  FilePath file_path = temp_dir.path().AppendASCII("write_at_current_position");
   File file(file_path,
             base::File::FLAG_CREATE | base::File::FLAG_READ |
                 base::File::FLAG_WRITE);
@@ -437,7 +434,7 @@
 TEST(FileTest, Seek) {
   base::ScopedTempDir temp_dir;
   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  FilePath file_path = temp_dir.GetPath().AppendASCII("seek_file");
+  FilePath file_path = temp_dir.path().AppendASCII("seek_file");
   File file(file_path,
             base::File::FLAG_CREATE | base::File::FLAG_READ |
                 base::File::FLAG_WRITE);
@@ -454,7 +451,7 @@
 TEST(FileTest, Duplicate) {
   base::ScopedTempDir temp_dir;
   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+  FilePath file_path = temp_dir.path().AppendASCII("file");
   File file(file_path,(base::File::FLAG_CREATE |
                        base::File::FLAG_READ |
                        base::File::FLAG_WRITE));
@@ -481,7 +478,7 @@
 TEST(FileTest, DuplicateDeleteOnClose) {
   base::ScopedTempDir temp_dir;
   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+  FilePath file_path = temp_dir.path().AppendASCII("file");
   File file(file_path,(base::File::FLAG_CREATE |
                        base::File::FLAG_READ |
                        base::File::FLAG_WRITE |
@@ -498,8 +495,7 @@
 TEST(FileTest, GetInfoForDirectory) {
   base::ScopedTempDir temp_dir;
   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  FilePath empty_dir =
-      temp_dir.GetPath().Append(FILE_PATH_LITERAL("gpfi_test"));
+  FilePath empty_dir = temp_dir.path().Append(FILE_PATH_LITERAL("gpfi_test"));
   ASSERT_TRUE(CreateDirectory(empty_dir));
 
   base::File dir(
@@ -518,158 +514,4 @@
   EXPECT_FALSE(info.is_symbolic_link);
   EXPECT_EQ(0, info.size);
 }
-
-TEST(FileTest, DeleteNoop) {
-  base::ScopedTempDir temp_dir;
-  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  FilePath file_path = temp_dir.GetPath().AppendASCII("file");
-
-  // Creating and closing a file with DELETE perms should do nothing special.
-  File file(file_path,
-            (base::File::FLAG_CREATE | base::File::FLAG_READ |
-             base::File::FLAG_WRITE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
-  ASSERT_TRUE(file.IsValid());
-  file.Close();
-  ASSERT_TRUE(base::PathExists(file_path));
-}
-
-TEST(FileTest, Delete) {
-  base::ScopedTempDir temp_dir;
-  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  FilePath file_path = temp_dir.GetPath().AppendASCII("file");
-
-  // Creating a file with DELETE and then marking for delete on close should
-  // delete it.
-  File file(file_path,
-            (base::File::FLAG_CREATE | base::File::FLAG_READ |
-             base::File::FLAG_WRITE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
-  ASSERT_TRUE(file.IsValid());
-  ASSERT_TRUE(file.DeleteOnClose(true));
-  file.Close();
-  ASSERT_FALSE(base::PathExists(file_path));
-}
-
-TEST(FileTest, DeleteThenRevoke) {
-  base::ScopedTempDir temp_dir;
-  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  FilePath file_path = temp_dir.GetPath().AppendASCII("file");
-
-  // Creating a file with DELETE, marking it for delete, then clearing delete on
-  // close should not delete it.
-  File file(file_path,
-            (base::File::FLAG_CREATE | base::File::FLAG_READ |
-             base::File::FLAG_WRITE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
-  ASSERT_TRUE(file.IsValid());
-  ASSERT_TRUE(file.DeleteOnClose(true));
-  ASSERT_TRUE(file.DeleteOnClose(false));
-  file.Close();
-  ASSERT_TRUE(base::PathExists(file_path));
-}
-
-TEST(FileTest, IrrevokableDeleteOnClose) {
-  base::ScopedTempDir temp_dir;
-  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  FilePath file_path = temp_dir.GetPath().AppendASCII("file");
-
-  // DELETE_ON_CLOSE cannot be revoked by this opener.
-  File file(
-      file_path,
-      (base::File::FLAG_CREATE | base::File::FLAG_READ |
-       base::File::FLAG_WRITE | base::File::FLAG_DELETE_ON_CLOSE |
-       base::File::FLAG_SHARE_DELETE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
-  ASSERT_TRUE(file.IsValid());
-  // https://msdn.microsoft.com/library/windows/desktop/aa364221.aspx says that
-  // setting the dispositon has no effect if the handle was opened with
-  // FLAG_DELETE_ON_CLOSE. Do not make the test's success dependent on whether
-  // or not SetFileInformationByHandle indicates success or failure. (It happens
-  // to indicate success on Windows 10.)
-  file.DeleteOnClose(false);
-  file.Close();
-  ASSERT_FALSE(base::PathExists(file_path));
-}
-
-TEST(FileTest, IrrevokableDeleteOnCloseOther) {
-  base::ScopedTempDir temp_dir;
-  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  FilePath file_path = temp_dir.GetPath().AppendASCII("file");
-
-  // DELETE_ON_CLOSE cannot be revoked by another opener.
-  File file(
-      file_path,
-      (base::File::FLAG_CREATE | base::File::FLAG_READ |
-       base::File::FLAG_WRITE | base::File::FLAG_DELETE_ON_CLOSE |
-       base::File::FLAG_SHARE_DELETE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
-  ASSERT_TRUE(file.IsValid());
-
-  File file2(
-      file_path,
-      (base::File::FLAG_OPEN | base::File::FLAG_READ | base::File::FLAG_WRITE |
-       base::File::FLAG_SHARE_DELETE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
-  ASSERT_TRUE(file2.IsValid());
-
-  file2.DeleteOnClose(false);
-  file2.Close();
-  ASSERT_TRUE(base::PathExists(file_path));
-  file.Close();
-  ASSERT_FALSE(base::PathExists(file_path));
-}
-
-TEST(FileTest, DeleteWithoutPermission) {
-  base::ScopedTempDir temp_dir;
-  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  FilePath file_path = temp_dir.GetPath().AppendASCII("file");
-
-  // It should not be possible to mark a file for deletion when it was not
-  // created/opened with DELETE.
-  File file(file_path, (base::File::FLAG_CREATE | base::File::FLAG_READ |
-                        base::File::FLAG_WRITE));
-  ASSERT_TRUE(file.IsValid());
-  ASSERT_FALSE(file.DeleteOnClose(true));
-  file.Close();
-  ASSERT_TRUE(base::PathExists(file_path));
-}
-
-TEST(FileTest, UnsharedDeleteOnClose) {
-  base::ScopedTempDir temp_dir;
-  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  FilePath file_path = temp_dir.GetPath().AppendASCII("file");
-
-  // Opening with DELETE_ON_CLOSE when a previous opener hasn't enabled sharing
-  // will fail.
-  File file(file_path, (base::File::FLAG_CREATE | base::File::FLAG_READ |
-                        base::File::FLAG_WRITE));
-  ASSERT_TRUE(file.IsValid());
-  File file2(
-      file_path,
-      (base::File::FLAG_OPEN | base::File::FLAG_READ | base::File::FLAG_WRITE |
-       base::File::FLAG_DELETE_ON_CLOSE | base::File::FLAG_SHARE_DELETE));
-  ASSERT_FALSE(file2.IsValid());
-
-  file.Close();
-  ASSERT_TRUE(base::PathExists(file_path));
-}
-
-TEST(FileTest, NoDeleteOnCloseWithMappedFile) {
-  base::ScopedTempDir temp_dir;
-  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  FilePath file_path = temp_dir.GetPath().AppendASCII("file");
-
-  // Mapping a file into memory blocks DeleteOnClose.
-  File file(file_path,
-            (base::File::FLAG_CREATE | base::File::FLAG_READ |
-             base::File::FLAG_WRITE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
-  ASSERT_TRUE(file.IsValid());
-  ASSERT_EQ(5, file.WriteAtCurrentPos("12345", 5));
-
-  {
-    base::MemoryMappedFile mapping;
-    ASSERT_TRUE(mapping.Initialize(file.Duplicate()));
-    ASSERT_EQ(5U, mapping.length());
-
-    EXPECT_FALSE(file.DeleteOnClose(true));
-  }
-
-  file.Close();
-  ASSERT_TRUE(base::PathExists(file_path));
-}
 #endif  // defined(OS_WIN)
diff --git a/base/files/file_util.h b/base/files/file_util.h
index 5ada35f..420dcae 100644
--- a/base/files/file_util.h
+++ b/base/files/file_util.h
@@ -294,6 +294,10 @@
 // be resolved with this function.
 BASE_EXPORT bool NormalizeToNativeFilePath(const FilePath& path,
                                            FilePath* nt_path);
+
+// Given an existing file in |path|, returns whether this file is on a network
+// drive or not. If |path| does not exist, this function returns false.
+BASE_EXPORT bool IsOnNetworkDrive(const base::FilePath& path);
 #endif
 
 // This function will return if the given file is a symlink or not.
@@ -307,9 +311,7 @@
                            const Time& last_accessed,
                            const Time& last_modified);
 
-// Wrapper for fopen-like calls. Returns non-NULL FILE* on success. The
-// underlying file descriptor (POSIX) or handle (Windows) is unconditionally
-// configured to not be propagated to child processes.
+// Wrapper for fopen-like calls. Returns non-NULL FILE* on success.
 BASE_EXPORT FILE* OpenFile(const FilePath& filename, const char* mode);
 
 // Closes file opened by OpenFile. Returns true on success.
@@ -363,17 +365,6 @@
 BASE_EXPORT bool SetNonBlocking(int fd);
 
 #if defined(OS_POSIX)
-// Creates a non-blocking, close-on-exec pipe.
-// This creates a non-blocking pipe that is not intended to be shared with any
-// child process. This will be done atomically if the operating system supports
-// it. Returns true if it was able to create the pipe, otherwise false.
-BASE_EXPORT bool CreateLocalNonBlockingPipe(int fds[2]);
-
-// Sets the given |fd| to close-on-exec mode.
-// Returns true if it was able to set it in the close-on-exec mode, otherwise
-// false.
-BASE_EXPORT bool SetCloseOnExec(int fd);
-
 // Test that |path| can only be changed by a given user and members of
 // a given set of groups.
 // Specifically, test that all parts of |path| under (and including) |base|:
diff --git a/base/files/file_util_mac.mm b/base/files/file_util_mac.mm
index 5a99aa0..e9c6c65 100644
--- a/base/files/file_util_mac.mm
+++ b/base/files/file_util_mac.mm
@@ -4,9 +4,8 @@
 
 #include "base/files/file_util.h"
 
-#import <Foundation/Foundation.h>
 #include <copyfile.h>
-#include <stdlib.h>
+#import <Foundation/Foundation.h>
 
 #include "base/files/file_path.h"
 #include "base/mac/foundation_util.h"
@@ -24,15 +23,6 @@
 }
 
 bool GetTempDir(base::FilePath* path) {
-  // In order to facilitate hermetic runs on macOS, first check $TMPDIR.
-  // NOTE: $TMPDIR is ALMOST ALWAYS set on macOS (unless the user un-set it).
-  const char* env_tmpdir = getenv("TMPDIR");
-  if (env_tmpdir) {
-    *path = base::FilePath(env_tmpdir);
-    return true;
-  }
-
-  // If we didn't find it, fall back to the native function.
   NSString* tmp = NSTemporaryDirectory();
   if (tmp == nil)
     return false;
diff --git a/base/files/file_util_posix.cc b/base/files/file_util_posix.cc
index a03ca8d..85a1b41 100644
--- a/base/files/file_util_posix.cc
+++ b/base/files/file_util_posix.cc
@@ -185,19 +185,6 @@
 #endif  // defined(OS_LINUX)
 #endif  // !defined(OS_NACL_NONSFI)
 
-#if !defined(OS_MACOSX)
-// Appends |mode_char| to |mode| before the optional character set encoding; see
-// https://www.gnu.org/software/libc/manual/html_node/Opening-Streams.html for
-// details.
-std::string AppendModeCharacter(StringPiece mode, char mode_char) {
-  std::string result(mode.as_string());
-  size_t comma_pos = result.find(',');
-  result.insert(comma_pos == std::string::npos ? result.length() : comma_pos, 1,
-                mode_char);
-  return result;
-}
-#endif
-
 }  // namespace
 
 #if !defined(OS_NACL_NONSFI)
@@ -289,8 +276,11 @@
   FilePath real_from_path = MakeAbsoluteFilePath(from_path);
   if (real_from_path.empty())
     return false;
-  if (real_to_path == real_from_path || real_from_path.IsParent(real_to_path))
+  if (real_to_path.value().size() >= real_from_path.value().size() &&
+      real_to_path.value().compare(0, real_from_path.value().size(),
+                                   real_from_path.value()) == 0) {
     return false;
+  }
 
   int traverse_type = FileEnumerator::FILES | FileEnumerator::SHOW_SYM_LINKS;
   if (recursive)
@@ -361,29 +351,6 @@
 }
 #endif  // !defined(OS_NACL_NONSFI)
 
-bool CreateLocalNonBlockingPipe(int fds[2]) {
-#if defined(OS_LINUX)
-  return pipe2(fds, O_CLOEXEC | O_NONBLOCK) == 0;
-#else
-  int raw_fds[2];
-  if (pipe(raw_fds) != 0)
-    return false;
-  ScopedFD fd_out(raw_fds[0]);
-  ScopedFD fd_in(raw_fds[1]);
-  if (!SetCloseOnExec(fd_out.get()))
-    return false;
-  if (!SetCloseOnExec(fd_in.get()))
-    return false;
-  if (!SetNonBlocking(fd_out.get()))
-    return false;
-  if (!SetNonBlocking(fd_in.get()))
-    return false;
-  fds[0] = fd_out.release();
-  fds[1] = fd_in.release();
-  return true;
-#endif
-}
-
 bool SetNonBlocking(int fd) {
   const int flags = fcntl(fd, F_GETFL);
   if (flags == -1)
@@ -395,21 +362,6 @@
   return true;
 }
 
-bool SetCloseOnExec(int fd) {
-#if defined(OS_NACL_NONSFI)
-  const int flags = 0;
-#else
-  const int flags = fcntl(fd, F_GETFD);
-  if (flags == -1)
-    return false;
-  if (flags & FD_CLOEXEC)
-    return true;
-#endif  // defined(OS_NACL_NONSFI)
-  if (HANDLE_EINTR(fcntl(fd, F_SETFD, flags | FD_CLOEXEC)) == -1)
-    return false;
-  return true;
-}
-
 bool PathExists(const FilePath& path) {
   ThreadRestrictions::AssertIOAllowed();
 #if defined(OS_ANDROID)
@@ -725,29 +677,11 @@
 #endif  // !defined(OS_NACL_NONSFI)
 
 FILE* OpenFile(const FilePath& filename, const char* mode) {
-  // 'e' is unconditionally added below, so be sure there is not one already
-  // present before a comma in |mode|.
-  DCHECK(
-      strchr(mode, 'e') == nullptr ||
-      (strchr(mode, ',') != nullptr && strchr(mode, 'e') > strchr(mode, ',')));
   ThreadRestrictions::AssertIOAllowed();
   FILE* result = NULL;
-#if defined(OS_MACOSX)
-  // macOS does not provide a mode character to set O_CLOEXEC; see
-  // https://developer.apple.com/legacy/library/documentation/Darwin/Reference/ManPages/man3/fopen.3.html.
-  const char* the_mode = mode;
-#else
-  std::string mode_with_e(AppendModeCharacter(mode, 'e'));
-  const char* the_mode = mode_with_e.c_str();
-#endif
   do {
-    result = fopen(filename.value().c_str(), the_mode);
+    result = fopen(filename.value().c_str(), mode);
   } while (!result && errno == EINTR);
-#if defined(OS_MACOSX)
-  // Mark the descriptor as close-on-exec.
-  if (result)
-    SetCloseOnExec(fileno(result));
-#endif
   return result;
 }
 
diff --git a/base/files/important_file_writer.cc b/base/files/important_file_writer.cc
index b468462..28550ad 100644
--- a/base/files/important_file_writer.cc
+++ b/base/files/important_file_writer.cc
@@ -18,7 +18,7 @@
 #include "base/files/file_util.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/metrics/histogram_macros.h"
+#include "base/metrics/histogram.h"
 #include "base/numerics/safe_conversions.h"
 #include "base/strings/string_number_conversions.h"
 #include "base/strings/string_util.h"
@@ -57,18 +57,9 @@
 
 // Helper function to call WriteFileAtomically() with a
 // std::unique_ptr<std::string>.
-void WriteScopedStringToFileAtomically(
-    const FilePath& path,
-    std::unique_ptr<std::string> data,
-    Closure before_write_callback,
-    Callback<void(bool success)> after_write_callback) {
-  if (!before_write_callback.is_null())
-    before_write_callback.Run();
-
-  bool result = ImportantFileWriter::WriteFileAtomically(path, *data);
-
-  if (!after_write_callback.is_null())
-    after_write_callback.Run(result);
+bool WriteScopedStringToFileAtomically(const FilePath& path,
+                                       std::unique_ptr<std::string> data) {
+  return ImportantFileWriter::WriteFileAtomically(path, *data);
 }
 
 }  // namespace
@@ -103,7 +94,6 @@
   File tmp_file(tmp_file_path, File::FLAG_OPEN | File::FLAG_WRITE);
   if (!tmp_file.IsValid()) {
     LogFailure(path, FAILED_OPENING, "could not open temporary file");
-    DeleteFile(tmp_file_path, false);
     return false;
   }
 
@@ -178,11 +168,8 @@
   if (HasPendingWrite())
     timer_.Stop();
 
-  Closure task = Bind(&WriteScopedStringToFileAtomically, path_, Passed(&data),
-                      Passed(&before_next_write_callback_),
-                      Passed(&after_next_write_callback_));
-
-  if (!task_runner_->PostTask(FROM_HERE, MakeCriticalClosure(task))) {
+  auto task = Bind(&WriteScopedStringToFileAtomically, path_, Passed(&data));
+  if (!PostWriteTask(task)) {
     // Posting the task to background message loop is not expected
     // to fail, but if it does, avoid losing data and just hit the disk
     // on the current thread.
@@ -216,11 +203,37 @@
   serializer_ = nullptr;
 }
 
-void ImportantFileWriter::RegisterOnNextWriteCallbacks(
-    const Closure& before_next_write_callback,
-    const Callback<void(bool success)>& after_next_write_callback) {
-  before_next_write_callback_ = before_next_write_callback;
-  after_next_write_callback_ = after_next_write_callback;
+void ImportantFileWriter::RegisterOnNextSuccessfulWriteCallback(
+    const Closure& on_next_successful_write) {
+  DCHECK(on_next_successful_write_.is_null());
+  on_next_successful_write_ = on_next_successful_write;
+}
+
+bool ImportantFileWriter::PostWriteTask(const Callback<bool()>& task) {
+  // TODO(gab): This code could always use PostTaskAndReplyWithResult and let
+  // ForwardSuccessfulWrite() no-op if |on_next_successful_write_| is null, but
+  // PostTaskAndReply causes memory leaks in tests (crbug.com/371974) and
+  // suppressing all of those is unrealistic hence we avoid most of them by
+  // using PostTask() in the typical scenario below.
+  if (!on_next_successful_write_.is_null()) {
+    return PostTaskAndReplyWithResult(
+        task_runner_.get(),
+        FROM_HERE,
+        MakeCriticalClosure(task),
+        Bind(&ImportantFileWriter::ForwardSuccessfulWrite,
+             weak_factory_.GetWeakPtr()));
+  }
+  return task_runner_->PostTask(
+      FROM_HERE,
+      MakeCriticalClosure(Bind(IgnoreResult(task))));
+}
+
+void ImportantFileWriter::ForwardSuccessfulWrite(bool result) {
+  DCHECK(CalledOnValidThread());
+  if (result && !on_next_successful_write_.is_null()) {
+    on_next_successful_write_.Run();
+    on_next_successful_write_.Reset();
+  }
 }
 
 }  // namespace base
diff --git a/base/files/important_file_writer.h b/base/files/important_file_writer.h
index f154b04..0bd8a7f 100644
--- a/base/files/important_file_writer.h
+++ b/base/files/important_file_writer.h
@@ -20,21 +20,24 @@
 namespace base {
 
 class SequencedTaskRunner;
+class Thread;
 
-// Helper for atomically writing a file to ensure that it won't be corrupted by
-// *application* crash during write (implemented as create, flush, rename).
+// Helper to ensure that a file won't be corrupted by the write (for example on
+// application crash). Consider a naive way to save an important file F:
 //
-// As an added benefit, ImportantFileWriter makes it less likely that the file
-// is corrupted by *system* crash, though even if the ImportantFileWriter call
-// has already returned at the time of the crash it is not specified which
-// version of the file (old or new) is preserved. And depending on system
-// configuration (hardware and software) a significant likelihood of file
-// corruption may remain, thus using ImportantFileWriter is not a valid
-// substitute for file integrity checks and recovery codepaths for malformed
-// files.
+// 1. Open F for writing, truncating it.
+// 2. Write new data to F.
 //
-// Also note that ImportantFileWriter can be *really* slow (cf. File::Flush()
-// for details) and thus please don't block shutdown on ImportantFileWriter.
+// It's good when it works, but it gets very bad if step 2. doesn't complete.
+// It can be caused by a crash, a computer hang, or a weird I/O error. And you
+// end up with a broken file.
+//
+// To be safe, we don't start with writing directly to F. Instead, we write to
+// to a temporary file. Only after that write is successful, we rename the
+// temporary file to target filename.
+//
+// If you want to know more about this approach and ext3/ext4 fsync issues, see
+// http://blog.valerieaurora.org/2009/04/16/dont-panic-fsync-ext34-and-your-data/
 class BASE_EXPORT ImportantFileWriter : public NonThreadSafe {
  public:
   // Used by ScheduleSave to lazily provide the data to be saved. Allows us
@@ -50,9 +53,8 @@
     virtual ~DataSerializer() {}
   };
 
-  // Save |data| to |path| in an atomic manner. Blocks and writes data on the
-  // current thread. Does not guarantee file integrity across system crash (see
-  // the class comment above).
+  // Save |data| to |path| in an atomic manner (see the class comment above).
+  // Blocks and writes data on the current thread.
   static bool WriteFileAtomically(const FilePath& path, StringPiece data);
 
   // Initialize the writer.
@@ -93,26 +95,25 @@
   // Serialize data pending to be saved and execute write on backend thread.
   void DoScheduledWrite();
 
-  // Registers |before_next_write_callback| and |after_next_write_callback| to
-  // be synchronously invoked from WriteFileAtomically() before its next write
-  // and after its next write, respectively. The boolean passed to
-  // |after_next_write_callback| indicates whether the write was successful.
-  // Both callbacks must be thread safe as they will be called on |task_runner_|
-  // and may be called during Chrome shutdown.
-  // If called more than once before a write is scheduled on |task_runner|, the
-  // latest callbacks clobber the others.
-  void RegisterOnNextWriteCallbacks(
-      const Closure& before_next_write_callback,
-      const Callback<void(bool success)>& after_next_write_callback);
+  // Registers |on_next_successful_write| to be called once, on the next
+  // successful write event. Only one callback can be set at once.
+  void RegisterOnNextSuccessfulWriteCallback(
+      const Closure& on_next_successful_write);
 
   TimeDelta commit_interval() const {
     return commit_interval_;
   }
 
  private:
-  // Invoked synchronously on the next write event.
-  Closure before_next_write_callback_;
-  Callback<void(bool success)> after_next_write_callback_;
+  // Helper method for WriteNow().
+  bool PostWriteTask(const Callback<bool()>& task);
+
+  // If |result| is true and |on_next_successful_write_| is set, invokes
+  // |on_successful_write_| and then resets it; no-ops otherwise.
+  void ForwardSuccessfulWrite(bool result);
+
+  // Invoked once and then reset on the next successful write event.
+  Closure on_next_successful_write_;
 
   // Path being written to.
   const FilePath path_;
diff --git a/base/files/important_file_writer_unittest.cc b/base/files/important_file_writer_unittest.cc
index 9b8dcfd..43e051e 100644
--- a/base/files/important_file_writer_unittest.cc
+++ b/base/files/important_file_writer_unittest.cc
@@ -46,61 +46,39 @@
   const std::string data_;
 };
 
-enum WriteCallbackObservationState {
-  NOT_CALLED,
-  CALLED_WITH_ERROR,
-  CALLED_WITH_SUCCESS,
-};
-
-class WriteCallbacksObserver {
+class SuccessfulWriteObserver {
  public:
-  WriteCallbacksObserver() = default;
+  SuccessfulWriteObserver() : successful_write_observed_(false) {}
 
-  // Register OnBeforeWrite() and OnAfterWrite() to be called on the next write
+  // Register on_successful_write() to be called on the next successful write
   // of |writer|.
-  void ObserveNextWriteCallbacks(ImportantFileWriter* writer);
+  void ObserveNextSuccessfulWrite(ImportantFileWriter* writer);
 
-  // Returns the |WriteCallbackObservationState| which was observed, then resets
-  // it to |NOT_CALLED|.
-  WriteCallbackObservationState GetAndResetObservationState();
+  // Returns true if a successful write was observed via on_successful_write()
+  // and resets the observation state to false regardless.
+  bool GetAndResetObservationState();
 
  private:
-  void OnBeforeWrite() {
-    EXPECT_FALSE(before_write_called_);
-    before_write_called_ = true;
+  void on_successful_write() {
+    EXPECT_FALSE(successful_write_observed_);
+    successful_write_observed_ = true;
   }
 
-  void OnAfterWrite(bool success) {
-    EXPECT_EQ(NOT_CALLED, after_write_observation_state_);
-    after_write_observation_state_ =
-        success ? CALLED_WITH_SUCCESS : CALLED_WITH_ERROR;
-  }
+  bool successful_write_observed_;
 
-  bool before_write_called_ = false;
-  WriteCallbackObservationState after_write_observation_state_ = NOT_CALLED;
-
-  DISALLOW_COPY_AND_ASSIGN(WriteCallbacksObserver);
+  DISALLOW_COPY_AND_ASSIGN(SuccessfulWriteObserver);
 };
 
-void WriteCallbacksObserver::ObserveNextWriteCallbacks(
+void SuccessfulWriteObserver::ObserveNextSuccessfulWrite(
     ImportantFileWriter* writer) {
-  writer->RegisterOnNextWriteCallbacks(
-      base::Bind(&WriteCallbacksObserver::OnBeforeWrite,
-                 base::Unretained(this)),
-      base::Bind(&WriteCallbacksObserver::OnAfterWrite,
-                 base::Unretained(this)));
+  writer->RegisterOnNextSuccessfulWriteCallback(base::Bind(
+      &SuccessfulWriteObserver::on_successful_write, base::Unretained(this)));
 }
 
-WriteCallbackObservationState
-WriteCallbacksObserver::GetAndResetObservationState() {
-  EXPECT_EQ(after_write_observation_state_ != NOT_CALLED, before_write_called_)
-      << "The before-write callback should always be called before the "
-         "after-write callback";
-
-  WriteCallbackObservationState state = after_write_observation_state_;
-  before_write_called_ = false;
-  after_write_observation_state_ = NOT_CALLED;
-  return state;
+bool SuccessfulWriteObserver::GetAndResetObservationState() {
+  bool was_successful_write_observed = successful_write_observed_;
+  successful_write_observed_ = false;
+  return was_successful_write_observed;
 }
 
 }  // namespace
@@ -110,11 +88,11 @@
   ImportantFileWriterTest() { }
   void SetUp() override {
     ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
-    file_ = temp_dir_.GetPath().AppendASCII("test-file");
+    file_ = temp_dir_.path().AppendASCII("test-file");
   }
 
  protected:
-  WriteCallbacksObserver write_callback_observer_;
+  SuccessfulWriteObserver successful_write_observer_;
   FilePath file_;
   MessageLoop loop_;
 
@@ -125,102 +103,49 @@
 TEST_F(ImportantFileWriterTest, Basic) {
   ImportantFileWriter writer(file_, ThreadTaskRunnerHandle::Get());
   EXPECT_FALSE(PathExists(writer.path()));
-  EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
-  writer.WriteNow(MakeUnique<std::string>("foo"));
+  EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
+  writer.WriteNow(WrapUnique(new std::string("foo")));
   RunLoop().RunUntilIdle();
 
-  EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
+  EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
   ASSERT_TRUE(PathExists(writer.path()));
   EXPECT_EQ("foo", GetFileContent(writer.path()));
 }
 
-TEST_F(ImportantFileWriterTest, WriteWithObserver) {
+TEST_F(ImportantFileWriterTest, BasicWithSuccessfulWriteObserver) {
   ImportantFileWriter writer(file_, ThreadTaskRunnerHandle::Get());
   EXPECT_FALSE(PathExists(writer.path()));
-  EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
-
-  // Confirm that the observer is invoked.
-  write_callback_observer_.ObserveNextWriteCallbacks(&writer);
-  writer.WriteNow(MakeUnique<std::string>("foo"));
+  EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
+  successful_write_observer_.ObserveNextSuccessfulWrite(&writer);
+  writer.WriteNow(WrapUnique(new std::string("foo")));
   RunLoop().RunUntilIdle();
 
-  EXPECT_EQ(CALLED_WITH_SUCCESS,
-            write_callback_observer_.GetAndResetObservationState());
+  // Confirm that the observer is invoked.
+  EXPECT_TRUE(successful_write_observer_.GetAndResetObservationState());
   ASSERT_TRUE(PathExists(writer.path()));
   EXPECT_EQ("foo", GetFileContent(writer.path()));
 
   // Confirm that re-installing the observer works for another write.
-  EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
-  write_callback_observer_.ObserveNextWriteCallbacks(&writer);
-  writer.WriteNow(MakeUnique<std::string>("bar"));
+  EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
+  successful_write_observer_.ObserveNextSuccessfulWrite(&writer);
+  writer.WriteNow(WrapUnique(new std::string("bar")));
   RunLoop().RunUntilIdle();
 
-  EXPECT_EQ(CALLED_WITH_SUCCESS,
-            write_callback_observer_.GetAndResetObservationState());
+  EXPECT_TRUE(successful_write_observer_.GetAndResetObservationState());
   ASSERT_TRUE(PathExists(writer.path()));
   EXPECT_EQ("bar", GetFileContent(writer.path()));
 
   // Confirm that writing again without re-installing the observer doesn't
   // result in a notification.
-  EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
-  writer.WriteNow(MakeUnique<std::string>("baz"));
+  EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
+  writer.WriteNow(WrapUnique(new std::string("baz")));
   RunLoop().RunUntilIdle();
 
-  EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
+  EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
   ASSERT_TRUE(PathExists(writer.path()));
   EXPECT_EQ("baz", GetFileContent(writer.path()));
 }
 
-TEST_F(ImportantFileWriterTest, FailedWriteWithObserver) {
-  // Use an invalid file path (relative paths are invalid) to get a
-  // FILE_ERROR_ACCESS_DENIED error when trying to write the file.
-  ImportantFileWriter writer(FilePath().AppendASCII("bad/../path"),
-                             ThreadTaskRunnerHandle::Get());
-  EXPECT_FALSE(PathExists(writer.path()));
-  EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
-  write_callback_observer_.ObserveNextWriteCallbacks(&writer);
-  writer.WriteNow(MakeUnique<std::string>("foo"));
-  RunLoop().RunUntilIdle();
-
-  // Confirm that the write observer was invoked with its boolean parameter set
-  // to false.
-  EXPECT_EQ(CALLED_WITH_ERROR,
-            write_callback_observer_.GetAndResetObservationState());
-  EXPECT_FALSE(PathExists(writer.path()));
-}
-
-TEST_F(ImportantFileWriterTest, CallbackRunsOnWriterThread) {
-  base::Thread file_writer_thread("ImportantFileWriter test thread");
-  file_writer_thread.Start();
-  ImportantFileWriter writer(file_, file_writer_thread.task_runner());
-  EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
-
-  // Block execution on |file_writer_thread| to verify that callbacks are
-  // executed on it.
-  base::WaitableEvent wait_helper(
-      base::WaitableEvent::ResetPolicy::MANUAL,
-      base::WaitableEvent::InitialState::NOT_SIGNALED);
-  file_writer_thread.task_runner()->PostTask(
-      FROM_HERE,
-      base::Bind(&base::WaitableEvent::Wait, base::Unretained(&wait_helper)));
-
-  write_callback_observer_.ObserveNextWriteCallbacks(&writer);
-  writer.WriteNow(MakeUnique<std::string>("foo"));
-  RunLoop().RunUntilIdle();
-
-  // Expect the callback to not have been executed before the
-  // |file_writer_thread| is unblocked.
-  EXPECT_EQ(NOT_CALLED, write_callback_observer_.GetAndResetObservationState());
-
-  wait_helper.Signal();
-  file_writer_thread.FlushForTesting();
-
-  EXPECT_EQ(CALLED_WITH_SUCCESS,
-            write_callback_observer_.GetAndResetObservationState());
-  ASSERT_TRUE(PathExists(writer.path()));
-  EXPECT_EQ("foo", GetFileContent(writer.path()));
-}
-
 TEST_F(ImportantFileWriterTest, ScheduleWrite) {
   ImportantFileWriter writer(file_,
                              ThreadTaskRunnerHandle::Get(),
diff --git a/base/files/memory_mapped_file_posix.cc b/base/files/memory_mapped_file_posix.cc
index 90ba6f4..4899cf0 100644
--- a/base/files/memory_mapped_file_posix.cc
+++ b/base/files/memory_mapped_file_posix.cc
@@ -31,7 +31,7 @@
 
   if (region == MemoryMappedFile::Region::kWholeFile) {
     int64_t file_len = file_.GetLength();
-    if (file_len < 0) {
+    if (file_len == -1) {
       DPLOG(ERROR) << "fstat " << file_.GetPlatformFile();
       return false;
     }
@@ -78,12 +78,7 @@
       // POSIX won't auto-extend the file when it is written so it must first
       // be explicitly extended to the maximum size. Zeros will fill the new
       // space.
-      auto file_len = file_.GetLength();
-      if (file_len < 0) {
-        DPLOG(ERROR) << "fstat " << file_.GetPlatformFile();
-        return false;
-      }
-      file_.SetLength(std::max(file_len, region.offset + region.size));
+      file_.SetLength(std::max(file_.GetLength(), region.offset + region.size));
       flags |= PROT_READ | PROT_WRITE;
       break;
   }
diff --git a/base/files/scoped_file.cc b/base/files/scoped_file.cc
index 78d4ca5..8ce45b8 100644
--- a/base/files/scoped_file.cc
+++ b/base/files/scoped_file.cc
@@ -37,14 +37,6 @@
   int close_errno = errno;
   base::debug::Alias(&close_errno);
 
-#if defined(OS_LINUX)
-  // NB: Some file descriptors can return errors from close() e.g. network
-  // filesystems such as NFS and Linux input devices. On Linux, errors from
-  // close other than EBADF do not indicate failure to actually close the fd.
-  if (ret != 0 && errno != EBADF)
-    ret = 0;
-#endif
-
   PCHECK(0 == ret);
 }
 
diff --git a/base/files/scoped_temp_dir.cc b/base/files/scoped_temp_dir.cc
index 2681521..27b758e 100644
--- a/base/files/scoped_temp_dir.cc
+++ b/base/files/scoped_temp_dir.cc
@@ -76,11 +76,6 @@
   return ret;
 }
 
-const FilePath& ScopedTempDir::GetPath() const {
-  DCHECK(!path_.empty()) << "Did you call CreateUniqueTempDir* before?";
-  return path_;
-}
-
 bool ScopedTempDir::IsValid() const {
   return !path_.empty() && DirectoryExists(path_);
 }
diff --git a/base/files/scoped_temp_dir.h b/base/files/scoped_temp_dir.h
index a5aaf84..b1f2f5b 100644
--- a/base/files/scoped_temp_dir.h
+++ b/base/files/scoped_temp_dir.h
@@ -47,9 +47,7 @@
   // when this object goes out of scope.
   FilePath Take();
 
-  // Returns the path to the created directory. Call one of the
-  // CreateUniqueTempDir* methods before getting the path.
-  const FilePath& GetPath() const;
+  const FilePath& path() const { return path_; }
 
   // Returns true if path_ is non-empty and exists.
   bool IsValid() const;
diff --git a/base/files/scoped_temp_dir_unittest.cc b/base/files/scoped_temp_dir_unittest.cc
index 024b438..3b2f28e 100644
--- a/base/files/scoped_temp_dir_unittest.cc
+++ b/base/files/scoped_temp_dir_unittest.cc
@@ -53,7 +53,7 @@
   {
     ScopedTempDir dir;
     EXPECT_TRUE(dir.CreateUniqueTempDir());
-    test_path = dir.GetPath();
+    test_path = dir.path();
     EXPECT_TRUE(DirectoryExists(test_path));
     FilePath tmp_dir;
     EXPECT_TRUE(base::GetTempDir(&tmp_dir));
@@ -72,7 +72,7 @@
   {
     ScopedTempDir dir;
     EXPECT_TRUE(dir.CreateUniqueTempDirUnderPath(base_path));
-    test_path = dir.GetPath();
+    test_path = dir.path();
     EXPECT_TRUE(DirectoryExists(test_path));
     EXPECT_TRUE(base_path.IsParent(test_path));
     EXPECT_TRUE(test_path.value().find(base_path.value()) != std::string::npos);
@@ -99,12 +99,12 @@
 TEST(ScopedTempDir, LockedTempDir) {
   ScopedTempDir dir;
   EXPECT_TRUE(dir.CreateUniqueTempDir());
-  base::File file(dir.GetPath().Append(FILE_PATH_LITERAL("temp")),
+  base::File file(dir.path().Append(FILE_PATH_LITERAL("temp")),
                   base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_WRITE);
   EXPECT_TRUE(file.IsValid());
   EXPECT_EQ(base::File::FILE_OK, file.error_details());
   EXPECT_FALSE(dir.Delete());  // We should not be able to delete.
-  EXPECT_FALSE(dir.GetPath().empty());  // We should still have a valid path.
+  EXPECT_FALSE(dir.path().empty());  // We should still have a valid path.
   file.Close();
   // Now, we should be able to delete.
   EXPECT_TRUE(dir.Delete());
diff --git a/base/id_map.h b/base/id_map.h
index d171fb1..ef6b156 100644
--- a/base/id_map.h
+++ b/base/id_map.h
@@ -7,16 +7,20 @@
 
 #include <stddef.h>
 #include <stdint.h>
-#include <memory>
 #include <set>
-#include <type_traits>
-#include <utility>
 
 #include "base/containers/hash_tables.h"
 #include "base/logging.h"
 #include "base/macros.h"
 #include "base/sequence_checker.h"
 
+// Ownership semantics - own pointer means the pointer is deleted in Remove()
+// & during destruction
+enum IDMapOwnershipSemantics {
+  IDMapExternalPointer,
+  IDMapOwnPointer
+};
+
 // This object maintains a list of IDs that can be quickly converted to
 // pointers to objects. It is implemented as a hash table, optimized for
 // relatively small data sets (in the common case, there will be exactly one
@@ -25,24 +29,25 @@
 // Items can be inserted into the container with arbitrary ID, but the caller
 // must ensure they are unique. Inserting IDs and relying on automatically
 // generated ones is not allowed because they can collide.
-
-// The map's value type (the V param) can be any dereferenceable type, such as a
-// raw pointer or smart pointer
-template <typename V, typename K = int32_t>
-class IDMap final {
+//
+// This class does not have a virtual destructor, do not inherit from it when
+// ownership semantics are set to own because pointers will leak.
+template <typename T,
+          IDMapOwnershipSemantics OS = IDMapExternalPointer,
+          typename K = int32_t>
+class IDMap {
  public:
   using KeyType = K;
 
  private:
-  using T = typename std::remove_reference<decltype(*V())>::type;
-  using HashTable = base::hash_map<KeyType, V>;
+  typedef base::hash_map<KeyType, T*> HashTable;
 
  public:
   IDMap() : iteration_depth_(0), next_id_(1), check_on_null_data_(false) {
     // A number of consumers of IDMap create it on one thread but always
     // access it from a different, but consistent, thread (or sequence)
-    // post-construction. The first call to CalledOnValidSequence() will re-bind
-    // it.
+    // post-construction. The first call to CalledOnValidSequencedThread()
+    // will re-bind it.
     sequence_checker_.DetachFromSequence();
   }
 
@@ -51,6 +56,7 @@
     // thread. However, all the accesses may take place on another thread (or
     // sequence), such as the IO thread. Detaching again to clean this up.
     sequence_checker_.DetachFromSequence();
+    Releaser<OS, 0>::release_all(&data_);
   }
 
   // Sets whether Add and Replace should DCHECK if passed in NULL data.
@@ -58,16 +64,29 @@
   void set_check_on_null_data(bool value) { check_on_null_data_ = value; }
 
   // Adds a view with an automatically generated unique ID. See AddWithID.
-  KeyType Add(V data) { return AddInternal(std::move(data)); }
+  KeyType Add(T* data) {
+    DCHECK(sequence_checker_.CalledOnValidSequencedThread());
+    DCHECK(!check_on_null_data_ || data);
+    KeyType this_id = next_id_;
+    DCHECK(data_.find(this_id) == data_.end()) << "Inserting duplicate item";
+    data_[this_id] = data;
+    next_id_++;
+    return this_id;
+  }
 
   // Adds a new data member with the specified ID. The ID must not be in
   // the list. The caller either must generate all unique IDs itself and use
   // this function, or allow this object to generate IDs and call Add. These
-  // two methods may not be mixed, or duplicate IDs may be generated.
-  void AddWithID(V data, KeyType id) { AddWithIDInternal(std::move(data), id); }
+  // two methods may not be mixed, or duplicate IDs may be generated
+  void AddWithID(T* data, KeyType id) {
+    DCHECK(sequence_checker_.CalledOnValidSequencedThread());
+    DCHECK(!check_on_null_data_ || data);
+    DCHECK(data_.find(id) == data_.end()) << "Inserting duplicate item";
+    data_[id] = data;
+  }
 
   void Remove(KeyType id) {
-    DCHECK(sequence_checker_.CalledOnValidSequence());
+    DCHECK(sequence_checker_.CalledOnValidSequencedThread());
     typename HashTable::iterator i = data_.find(id);
     if (i == data_.end()) {
       NOTREACHED() << "Attempting to remove an item not in the list";
@@ -75,28 +94,36 @@
     }
 
     if (iteration_depth_ == 0) {
+      Releaser<OS, 0>::release(i->second);
       data_.erase(i);
     } else {
       removed_ids_.insert(id);
     }
   }
 
-  // Replaces the value for |id| with |new_data| and returns the existing value.
-  // Should only be called with an already added id.
-  V Replace(KeyType id, V new_data) {
-    DCHECK(sequence_checker_.CalledOnValidSequence());
+  // Replaces the value for |id| with |new_data| and returns a pointer to the
+  // existing value. If there is no entry for |id|, the map is not altered and
+  // nullptr is returned. The OwnershipSemantics of the map have no effect on
+  // how the existing value is treated, the IDMap does not delete the existing
+  // value being replaced.
+  T* Replace(KeyType id, T* new_data) {
+    DCHECK(sequence_checker_.CalledOnValidSequencedThread());
     DCHECK(!check_on_null_data_ || new_data);
     typename HashTable::iterator i = data_.find(id);
-    DCHECK(i != data_.end());
+    if (i == data_.end()) {
+      NOTREACHED() << "Attempting to replace an item not in the list";
+      return nullptr;
+    }
 
-    std::swap(i->second, new_data);
-    return new_data;
+    T* temp = i->second;
+    i->second = new_data;
+    return temp;
   }
 
   void Clear() {
-    DCHECK(sequence_checker_.CalledOnValidSequence());
+    DCHECK(sequence_checker_.CalledOnValidSequencedThread());
     if (iteration_depth_ == 0) {
-      data_.clear();
+      Releaser<OS, 0>::release_all(&data_);
     } else {
       for (typename HashTable::iterator i = data_.begin();
            i != data_.end(); ++i)
@@ -105,20 +132,20 @@
   }
 
   bool IsEmpty() const {
-    DCHECK(sequence_checker_.CalledOnValidSequence());
+    DCHECK(sequence_checker_.CalledOnValidSequencedThread());
     return size() == 0u;
   }
 
   T* Lookup(KeyType id) const {
-    DCHECK(sequence_checker_.CalledOnValidSequence());
+    DCHECK(sequence_checker_.CalledOnValidSequencedThread());
     typename HashTable::const_iterator i = data_.find(id);
     if (i == data_.end())
-      return nullptr;
-    return &*i->second;
+      return NULL;
+    return i->second;
   }
 
   size_t size() const {
-    DCHECK(sequence_checker_.CalledOnValidSequence());
+    DCHECK(sequence_checker_.CalledOnValidSequencedThread());
     return data_.size() - removed_ids_.size();
   }
 
@@ -133,7 +160,9 @@
   template<class ReturnType>
   class Iterator {
    public:
-    Iterator(IDMap<V, K>* map) : map_(map), iter_(map_->data_.begin()) {
+    Iterator(IDMap<T, OS, K>* map)
+        : map_(map),
+          iter_(map_->data_.begin()) {
       Init();
     }
 
@@ -151,7 +180,7 @@
     }
 
     ~Iterator() {
-      DCHECK(map_->sequence_checker_.CalledOnValidSequence());
+      DCHECK(map_->sequence_checker_.CalledOnValidSequencedThread());
 
       // We're going to decrement iteration depth. Make sure it's greater than
       // zero so that it doesn't become negative.
@@ -162,29 +191,29 @@
     }
 
     bool IsAtEnd() const {
-      DCHECK(map_->sequence_checker_.CalledOnValidSequence());
+      DCHECK(map_->sequence_checker_.CalledOnValidSequencedThread());
       return iter_ == map_->data_.end();
     }
 
     KeyType GetCurrentKey() const {
-      DCHECK(map_->sequence_checker_.CalledOnValidSequence());
+      DCHECK(map_->sequence_checker_.CalledOnValidSequencedThread());
       return iter_->first;
     }
 
     ReturnType* GetCurrentValue() const {
-      DCHECK(map_->sequence_checker_.CalledOnValidSequence());
-      return &*iter_->second;
+      DCHECK(map_->sequence_checker_.CalledOnValidSequencedThread());
+      return iter_->second;
     }
 
     void Advance() {
-      DCHECK(map_->sequence_checker_.CalledOnValidSequence());
+      DCHECK(map_->sequence_checker_.CalledOnValidSequencedThread());
       ++iter_;
       SkipRemovedEntries();
     }
 
    private:
     void Init() {
-      DCHECK(map_->sequence_checker_.CalledOnValidSequence());
+      DCHECK(map_->sequence_checker_.CalledOnValidSequencedThread());
       ++map_->iteration_depth_;
       SkipRemovedEntries();
     }
@@ -197,7 +226,7 @@
       }
     }
 
-    IDMap<V, K>* map_;
+    IDMap<T, OS, K>* map_;
     typename HashTable::const_iterator iter_;
   };
 
@@ -205,22 +234,24 @@
   typedef Iterator<const T> const_iterator;
 
  private:
-  KeyType AddInternal(V data) {
-    DCHECK(sequence_checker_.CalledOnValidSequence());
-    DCHECK(!check_on_null_data_ || data);
-    KeyType this_id = next_id_;
-    DCHECK(data_.find(this_id) == data_.end()) << "Inserting duplicate item";
-    data_[this_id] = std::move(data);
-    next_id_++;
-    return this_id;
-  }
 
-  void AddWithIDInternal(V data, KeyType id) {
-    DCHECK(sequence_checker_.CalledOnValidSequence());
-    DCHECK(!check_on_null_data_ || data);
-    DCHECK(data_.find(id) == data_.end()) << "Inserting duplicate item";
-    data_[id] = std::move(data);
-  }
+  // The dummy parameter is there because C++ standard does not allow
+  // explicitly specialized templates inside classes
+  template<IDMapOwnershipSemantics OI, int dummy> struct Releaser {
+    static inline void release(T* ptr) {}
+    static inline void release_all(HashTable* table) {}
+  };
+
+  template<int dummy> struct Releaser<IDMapOwnPointer, dummy> {
+    static inline void release(T* ptr) { delete ptr;}
+    static inline void release_all(HashTable* table) {
+      for (typename HashTable::iterator i = table->begin();
+           i != table->end(); ++i) {
+        delete i->second;
+      }
+      table->clear();
+    }
+  };
 
   void Compact() {
     DCHECK_EQ(0, iteration_depth_);
diff --git a/base/id_map_unittest.cc b/base/id_map_unittest.cc
index 42949bb..a3f0808 100644
--- a/base/id_map_unittest.cc
+++ b/base/id_map_unittest.cc
@@ -6,9 +6,6 @@
 
 #include <stdint.h>
 
-#include <memory>
-
-#include "base/memory/ptr_util.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace {
@@ -26,7 +23,7 @@
 };
 
 TEST(IDMapTest, Basic) {
-  IDMap<TestObject*> map;
+  IDMap<TestObject> map;
   EXPECT_TRUE(map.IsEmpty());
   EXPECT_EQ(0U, map.size());
 
@@ -65,7 +62,7 @@
 }
 
 TEST(IDMapTest, IteratorRemainsValidWhenRemovingCurrentElement) {
-  IDMap<TestObject*> map;
+  IDMap<TestObject> map;
 
   TestObject obj1;
   TestObject obj2;
@@ -76,7 +73,7 @@
   map.Add(&obj3);
 
   {
-    IDMap<TestObject*>::const_iterator iter(&map);
+    IDMap<TestObject>::const_iterator iter(&map);
 
     EXPECT_EQ(1, map.iteration_depth());
 
@@ -98,7 +95,7 @@
 }
 
 TEST(IDMapTest, IteratorRemainsValidWhenRemovingOtherElements) {
-  IDMap<TestObject*> map;
+  IDMap<TestObject> map;
 
   const int kCount = 5;
   TestObject obj[kCount];
@@ -110,16 +107,16 @@
   int32_t ids_in_iteration_order[kCount];
   const TestObject* objs_in_iteration_order[kCount];
   int counter = 0;
-  for (IDMap<TestObject*>::const_iterator iter(&map); !iter.IsAtEnd();
-       iter.Advance()) {
+  for (IDMap<TestObject>::const_iterator iter(&map);
+       !iter.IsAtEnd(); iter.Advance()) {
     ids_in_iteration_order[counter] = iter.GetCurrentKey();
     objs_in_iteration_order[counter] = iter.GetCurrentValue();
     counter++;
   }
 
   counter = 0;
-  for (IDMap<TestObject*>::const_iterator iter(&map); !iter.IsAtEnd();
-       iter.Advance()) {
+  for (IDMap<TestObject>::const_iterator iter(&map);
+       !iter.IsAtEnd(); iter.Advance()) {
     EXPECT_EQ(1, map.iteration_depth());
 
     switch (counter) {
@@ -150,7 +147,7 @@
 }
 
 TEST(IDMapTest, CopyIterator) {
-  IDMap<TestObject*> map;
+  IDMap<TestObject> map;
 
   TestObject obj1;
   TestObject obj2;
@@ -163,12 +160,12 @@
   EXPECT_EQ(0, map.iteration_depth());
 
   {
-    IDMap<TestObject*>::const_iterator iter1(&map);
+    IDMap<TestObject>::const_iterator iter1(&map);
     EXPECT_EQ(1, map.iteration_depth());
 
     // Make sure that copying the iterator correctly increments
     // map's iteration depth.
-    IDMap<TestObject*>::const_iterator iter2(iter1);
+    IDMap<TestObject>::const_iterator iter2(iter1);
     EXPECT_EQ(2, map.iteration_depth());
   }
 
@@ -178,7 +175,7 @@
 }
 
 TEST(IDMapTest, AssignIterator) {
-  IDMap<TestObject*> map;
+  IDMap<TestObject> map;
 
   TestObject obj1;
   TestObject obj2;
@@ -191,10 +188,10 @@
   EXPECT_EQ(0, map.iteration_depth());
 
   {
-    IDMap<TestObject*>::const_iterator iter1(&map);
+    IDMap<TestObject>::const_iterator iter1(&map);
     EXPECT_EQ(1, map.iteration_depth());
 
-    IDMap<TestObject*>::const_iterator iter2(&map);
+    IDMap<TestObject>::const_iterator iter2(&map);
     EXPECT_EQ(2, map.iteration_depth());
 
     // Make sure that assigning the iterator correctly updates
@@ -208,7 +205,7 @@
 }
 
 TEST(IDMapTest, IteratorRemainsValidWhenClearing) {
-  IDMap<TestObject*> map;
+  IDMap<TestObject> map;
 
   const int kCount = 5;
   TestObject obj[kCount];
@@ -220,16 +217,16 @@
   int32_t ids_in_iteration_order[kCount];
   const TestObject* objs_in_iteration_order[kCount];
   int counter = 0;
-  for (IDMap<TestObject*>::const_iterator iter(&map); !iter.IsAtEnd();
-       iter.Advance()) {
+  for (IDMap<TestObject>::const_iterator iter(&map);
+       !iter.IsAtEnd(); iter.Advance()) {
     ids_in_iteration_order[counter] = iter.GetCurrentKey();
     objs_in_iteration_order[counter] = iter.GetCurrentValue();
     counter++;
   }
 
   counter = 0;
-  for (IDMap<TestObject*>::const_iterator iter(&map); !iter.IsAtEnd();
-       iter.Advance()) {
+  for (IDMap<TestObject>::const_iterator iter(&map);
+       !iter.IsAtEnd(); iter.Advance()) {
     switch (counter) {
       case 0:
         EXPECT_EQ(ids_in_iteration_order[0], iter.GetCurrentKey());
@@ -261,17 +258,18 @@
   int map_external_ids[kCount];
 
   int owned_del_count = 0;
+  DestructorCounter* owned_obj[kCount];
   int map_owned_ids[kCount];
 
-  IDMap<DestructorCounter*> map_external;
-  IDMap<std::unique_ptr<DestructorCounter>> map_owned;
+  IDMap<DestructorCounter> map_external;
+  IDMap<DestructorCounter, IDMapOwnPointer> map_owned;
 
   for (int i = 0; i < kCount; ++i) {
     external_obj[i] = new DestructorCounter(&external_del_count);
     map_external_ids[i] = map_external.Add(external_obj[i]);
 
-    map_owned_ids[i] =
-        map_owned.Add(base::MakeUnique<DestructorCounter>(&owned_del_count));
+    owned_obj[i] = new DestructorCounter(&owned_del_count);
+    map_owned_ids[i] = map_owned.Add(owned_obj[i]);
   }
 
   for (int i = 0; i < kCount; ++i) {
@@ -297,15 +295,17 @@
   DestructorCounter* external_obj[kCount];
 
   int owned_del_count = 0;
+  DestructorCounter* owned_obj[kCount];
 
-  IDMap<DestructorCounter*> map_external;
-  IDMap<std::unique_ptr<DestructorCounter>> map_owned;
+  IDMap<DestructorCounter> map_external;
+  IDMap<DestructorCounter, IDMapOwnPointer> map_owned;
 
   for (int i = 0; i < kCount; ++i) {
     external_obj[i] = new DestructorCounter(&external_del_count);
     map_external.Add(external_obj[i]);
 
-    map_owned.Add(base::MakeUnique<DestructorCounter>(&owned_del_count));
+    owned_obj[i] = new DestructorCounter(&owned_del_count);
+    map_owned.Add(owned_obj[i]);
   }
 
   EXPECT_EQ(external_del_count, 0);
@@ -332,16 +332,18 @@
   DestructorCounter* external_obj[kCount];
 
   int owned_del_count = 0;
+  DestructorCounter* owned_obj[kCount];
 
   {
-    IDMap<DestructorCounter*> map_external;
-    IDMap<std::unique_ptr<DestructorCounter>> map_owned;
+    IDMap<DestructorCounter> map_external;
+    IDMap<DestructorCounter, IDMapOwnPointer> map_owned;
 
     for (int i = 0; i < kCount; ++i) {
       external_obj[i] = new DestructorCounter(&external_del_count);
       map_external.Add(external_obj[i]);
 
-      map_owned.Add(base::MakeUnique<DestructorCounter>(&owned_del_count));
+      owned_obj[i] = new DestructorCounter(&owned_del_count);
+      map_owned.Add(owned_obj[i]);
     }
   }
 
@@ -356,14 +358,14 @@
 }
 
 TEST(IDMapTest, Int64KeyType) {
-  IDMap<TestObject*, int64_t> map;
+  IDMap<TestObject, IDMapExternalPointer, int64_t> map;
   TestObject obj1;
   const int64_t kId1 = 999999999999999999;
 
   map.AddWithID(&obj1, kId1);
   EXPECT_EQ(&obj1, map.Lookup(kId1));
 
-  IDMap<TestObject*, int64_t>::const_iterator iter(&map);
+  IDMap<TestObject, IDMapExternalPointer, int64_t>::const_iterator iter(&map);
   ASSERT_FALSE(iter.IsAtEnd());
   EXPECT_EQ(kId1, iter.GetCurrentKey());
   EXPECT_EQ(&obj1, iter.GetCurrentValue());
diff --git a/base/json/json_file_value_serializer.cc b/base/json/json_file_value_serializer.cc
index 661d25d..1a9b7a2 100644
--- a/base/json/json_file_value_serializer.cc
+++ b/base/json/json_file_value_serializer.cc
@@ -53,9 +53,11 @@
 }
 
 JSONFileValueDeserializer::JSONFileValueDeserializer(
-    const base::FilePath& json_file_path,
-    int options)
-    : json_file_path_(json_file_path), options_(options), last_read_size_(0U) {}
+    const base::FilePath& json_file_path)
+    : json_file_path_(json_file_path),
+      allow_trailing_comma_(false),
+      last_read_size_(0U) {
+}
 
 JSONFileValueDeserializer::~JSONFileValueDeserializer() {
 }
@@ -112,6 +114,7 @@
     return NULL;
   }
 
-  JSONStringValueDeserializer deserializer(json_string, options_);
+  JSONStringValueDeserializer deserializer(json_string);
+  deserializer.set_allow_trailing_comma(allow_trailing_comma_);
   return deserializer.Deserialize(error_code, error_str);
 }
diff --git a/base/json/json_file_value_serializer.h b/base/json/json_file_value_serializer.h
index a93950a..67d2342 100644
--- a/base/json/json_file_value_serializer.h
+++ b/base/json/json_file_value_serializer.h
@@ -48,9 +48,8 @@
 class BASE_EXPORT JSONFileValueDeserializer : public base::ValueDeserializer {
  public:
   // |json_file_path_| is the path of a file that will be source of the
-  // deserialization. |options| is a bitmask of JSONParserOptions.
-  explicit JSONFileValueDeserializer(const base::FilePath& json_file_path,
-                                     int options = 0);
+  // deserialization.
+  explicit JSONFileValueDeserializer(const base::FilePath& json_file_path);
 
   ~JSONFileValueDeserializer() override;
 
@@ -83,6 +82,10 @@
   // be a JsonFileError.
   static const char* GetErrorMessageForCode(int error_code);
 
+  void set_allow_trailing_comma(bool new_value) {
+    allow_trailing_comma_ = new_value;
+  }
+
   // Returns the size (in bytes) of JSON string read from disk in the last
   // successful |Deserialize()| call.
   size_t get_last_read_size() const { return last_read_size_; }
@@ -93,7 +96,7 @@
   int ReadFileToString(std::string* json_string);
 
   const base::FilePath json_file_path_;
-  const int options_;
+  bool allow_trailing_comma_;
   size_t last_read_size_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSONFileValueDeserializer);
diff --git a/base/json/json_parser.cc b/base/json/json_parser.cc
index c6f6409..d97eccc 100644
--- a/base/json/json_parser.cc
+++ b/base/json/json_parser.cc
@@ -24,12 +24,149 @@
 
 namespace {
 
-// Chosen to support 99.9% of documents found in the wild late 2016.
-// http://crbug.com/673263
-const int kStackMaxDepth = 200;
+const int kStackMaxDepth = 100;
 
 const int32_t kExtendedASCIIStart = 0x80;
 
+// DictionaryHiddenRootValue and ListHiddenRootValue are used in conjunction
+// with JSONStringValue as an optimization for reducing the number of string
+// copies. When this optimization is active, the parser uses a hidden root to
+// keep the original JSON input string live and creates JSONStringValue children
+// holding StringPiece references to the input string, avoiding about 2/3rds of
+// string memory copies. The real root value is Swap()ed into the new instance.
+class DictionaryHiddenRootValue : public DictionaryValue {
+ public:
+  DictionaryHiddenRootValue(std::unique_ptr<std::string> json,
+                            std::unique_ptr<Value> root)
+      : json_(std::move(json)) {
+    DCHECK(root->IsType(Value::TYPE_DICTIONARY));
+    DictionaryValue::Swap(static_cast<DictionaryValue*>(root.get()));
+  }
+
+  void Swap(DictionaryValue* other) override {
+    DVLOG(1) << "Swap()ing a DictionaryValue inefficiently.";
+
+    // First deep copy to convert JSONStringValue to std::string and swap that
+    // copy with |other|, which contains the new contents of |this|.
+    std::unique_ptr<DictionaryValue> copy(CreateDeepCopy());
+    copy->Swap(other);
+
+    // Then erase the contents of the current dictionary and swap in the
+    // new contents, originally from |other|.
+    Clear();
+    json_.reset();
+    DictionaryValue::Swap(copy.get());
+  }
+
+  // Not overriding DictionaryValue::Remove because it just calls through to
+  // the method below.
+
+  bool RemoveWithoutPathExpansion(const std::string& key,
+                                  std::unique_ptr<Value>* out) override {
+    // If the caller won't take ownership of the removed value, just call up.
+    if (!out)
+      return DictionaryValue::RemoveWithoutPathExpansion(key, out);
+
+    DVLOG(1) << "Remove()ing from a DictionaryValue inefficiently.";
+
+    // Otherwise, remove the value while its still "owned" by this and copy it
+    // to convert any JSONStringValues to std::string.
+    std::unique_ptr<Value> out_owned;
+    if (!DictionaryValue::RemoveWithoutPathExpansion(key, &out_owned))
+      return false;
+
+    *out = out_owned->CreateDeepCopy();
+
+    return true;
+  }
+
+ private:
+  std::unique_ptr<std::string> json_;
+
+  DISALLOW_COPY_AND_ASSIGN(DictionaryHiddenRootValue);
+};
+
+class ListHiddenRootValue : public ListValue {
+ public:
+  ListHiddenRootValue(std::unique_ptr<std::string> json,
+                      std::unique_ptr<Value> root)
+      : json_(std::move(json)) {
+    DCHECK(root->IsType(Value::TYPE_LIST));
+    ListValue::Swap(static_cast<ListValue*>(root.get()));
+  }
+
+  void Swap(ListValue* other) override {
+    DVLOG(1) << "Swap()ing a ListValue inefficiently.";
+
+    // First deep copy to convert JSONStringValue to std::string and swap that
+    // copy with |other|, which contains the new contents of |this|.
+    std::unique_ptr<ListValue> copy(CreateDeepCopy());
+    copy->Swap(other);
+
+    // Then erase the contents of the current list and swap in the new contents,
+    // originally from |other|.
+    Clear();
+    json_.reset();
+    ListValue::Swap(copy.get());
+  }
+
+  bool Remove(size_t index, std::unique_ptr<Value>* out) override {
+    // If the caller won't take ownership of the removed value, just call up.
+    if (!out)
+      return ListValue::Remove(index, out);
+
+    DVLOG(1) << "Remove()ing from a ListValue inefficiently.";
+
+    // Otherwise, remove the value while its still "owned" by this and copy it
+    // to convert any JSONStringValues to std::string.
+    std::unique_ptr<Value> out_owned;
+    if (!ListValue::Remove(index, &out_owned))
+      return false;
+
+    *out = out_owned->CreateDeepCopy();
+
+    return true;
+  }
+
+ private:
+  std::unique_ptr<std::string> json_;
+
+  DISALLOW_COPY_AND_ASSIGN(ListHiddenRootValue);
+};
+
+// A variant on StringValue that uses StringPiece instead of copying the string
+// into the Value. This can only be stored in a child of hidden root (above),
+// otherwise the referenced string will not be guaranteed to outlive it.
+class JSONStringValue : public Value {
+ public:
+  explicit JSONStringValue(StringPiece piece)
+      : Value(TYPE_STRING), string_piece_(piece) {}
+
+  // Overridden from Value:
+  bool GetAsString(std::string* out_value) const override {
+    string_piece_.CopyToString(out_value);
+    return true;
+  }
+  bool GetAsString(string16* out_value) const override {
+    *out_value = UTF8ToUTF16(string_piece_);
+    return true;
+  }
+  Value* DeepCopy() const override {
+    return new StringValue(string_piece_.as_string());
+  }
+  bool Equals(const Value* other) const override {
+    std::string other_string;
+    return other->IsType(TYPE_STRING) && other->GetAsString(&other_string) &&
+        StringPiece(other_string) == string_piece_;
+  }
+
+ private:
+  // The location in the original input stream.
+  StringPiece string_piece_;
+
+  DISALLOW_COPY_AND_ASSIGN(JSONStringValue);
+};
+
 // Simple class that checks for maximum recursion/"stack overflow."
 class StackMarker {
  public:
@@ -53,9 +190,6 @@
 
 }  // namespace
 
-// This is U+FFFD.
-const char kUnicodeReplacementString[] = "\xEF\xBF\xBD";
-
 JSONParser::JSONParser(int options)
     : options_(options),
       start_pos_(nullptr),
@@ -74,7 +208,16 @@
 }
 
 std::unique_ptr<Value> JSONParser::Parse(StringPiece input) {
-  start_pos_ = input.data();
+  std::unique_ptr<std::string> input_copy;
+  // If the children of a JSON root can be detached, then hidden roots cannot
+  // be used, so do not bother copying the input because StringPiece will not
+  // be used anywhere.
+  if (!(options_ & JSON_DETACHABLE_CHILDREN)) {
+    input_copy = MakeUnique<std::string>(input.as_string());
+    start_pos_ = input_copy->data();
+  } else {
+    start_pos_ = input.data();
+  }
   pos_ = start_pos_;
   end_pos_ = start_pos_ + input.length();
   index_ = 0;
@@ -108,6 +251,26 @@
     }
   }
 
+  // Dictionaries and lists can contain JSONStringValues, so wrap them in a
+  // hidden root.
+  if (!(options_ & JSON_DETACHABLE_CHILDREN)) {
+    if (root->IsType(Value::TYPE_DICTIONARY)) {
+      return MakeUnique<DictionaryHiddenRootValue>(std::move(input_copy),
+                                                   std::move(root));
+    }
+    if (root->IsType(Value::TYPE_LIST)) {
+      return MakeUnique<ListHiddenRootValue>(std::move(input_copy),
+                                             std::move(root));
+    }
+    if (root->IsType(Value::TYPE_STRING)) {
+      // A string type could be a JSONStringValue, but because there's no
+      // corresponding HiddenRootValue, the memory will be lost. Deep copy to
+      // preserve it.
+      return root->CreateDeepCopy();
+    }
+  }
+
+  // All other values can be returned directly.
   return root;
 }
 
@@ -133,62 +296,58 @@
 JSONParser::StringBuilder::StringBuilder() : StringBuilder(nullptr) {}
 
 JSONParser::StringBuilder::StringBuilder(const char* pos)
-    : pos_(pos), length_(0), has_string_(false) {}
-
-JSONParser::StringBuilder::~StringBuilder() {
-  if (has_string_)
-    string_.Destroy();
+    : pos_(pos),
+      length_(0),
+      string_(nullptr) {
 }
 
-void JSONParser::StringBuilder::operator=(StringBuilder&& other) {
-  pos_ = other.pos_;
-  length_ = other.length_;
-  has_string_ = other.has_string_;
-  if (has_string_)
-    string_.InitFromMove(std::move(other.string_));
+void JSONParser::StringBuilder::Swap(StringBuilder* other) {
+  std::swap(other->string_, string_);
+  std::swap(other->pos_, pos_);
+  std::swap(other->length_, length_);
+}
+
+JSONParser::StringBuilder::~StringBuilder() {
+  delete string_;
 }
 
 void JSONParser::StringBuilder::Append(const char& c) {
   DCHECK_GE(c, 0);
   DCHECK_LT(static_cast<unsigned char>(c), 128);
 
-  if (has_string_)
+  if (string_)
     string_->push_back(c);
   else
     ++length_;
 }
 
-void JSONParser::StringBuilder::AppendString(const char* str, size_t len) {
-  DCHECK(has_string_);
-  string_->append(str, len);
+void JSONParser::StringBuilder::AppendString(const std::string& str) {
+  DCHECK(string_);
+  string_->append(str);
 }
 
 void JSONParser::StringBuilder::Convert() {
-  if (has_string_)
+  if (string_)
     return;
+  string_  = new std::string(pos_, length_);
+}
 
-  has_string_ = true;
-  string_.Init(pos_, length_);
+bool JSONParser::StringBuilder::CanBeStringPiece() const {
+  return !string_;
 }
 
 StringPiece JSONParser::StringBuilder::AsStringPiece() {
-  if (has_string_)
-    return StringPiece(*string_);
+  if (string_)
+    return StringPiece();
   return StringPiece(pos_, length_);
 }
 
 const std::string& JSONParser::StringBuilder::AsString() {
-  if (!has_string_)
+  if (!string_)
     Convert();
   return *string_;
 }
 
-std::string JSONParser::StringBuilder::DestructiveAsString() {
-  if (has_string_)
-    return std::move(*string_);
-  return std::string(pos_, length_);
-}
-
 // JSONParser private //////////////////////////////////////////////////////////
 
 inline bool JSONParser::CanConsume(int length) {
@@ -308,11 +467,11 @@
   return false;
 }
 
-std::unique_ptr<Value> JSONParser::ParseNextToken() {
+Value* JSONParser::ParseNextToken() {
   return ParseToken(GetNextToken());
 }
 
-std::unique_ptr<Value> JSONParser::ParseToken(Token token) {
+Value* JSONParser::ParseToken(Token token) {
   switch (token) {
     case T_OBJECT_BEGIN:
       return ConsumeDictionary();
@@ -332,7 +491,7 @@
   }
 }
 
-std::unique_ptr<Value> JSONParser::ConsumeDictionary() {
+Value* JSONParser::ConsumeDictionary() {
   if (*pos_ != '{') {
     ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
     return nullptr;
@@ -370,13 +529,13 @@
 
     // The next token is the value. Ownership transfers to |dict|.
     NextChar();
-    std::unique_ptr<Value> value = ParseNextToken();
+    Value* value = ParseNextToken();
     if (!value) {
       // ReportError from deeper level.
       return nullptr;
     }
 
-    dict->SetWithoutPathExpansion(key.AsStringPiece(), std::move(value));
+    dict->SetWithoutPathExpansion(key.AsString(), value);
 
     NextChar();
     token = GetNextToken();
@@ -393,10 +552,10 @@
     }
   }
 
-  return std::move(dict);
+  return dict.release();
 }
 
-std::unique_ptr<Value> JSONParser::ConsumeList() {
+Value* JSONParser::ConsumeList() {
   if (*pos_ != '[') {
     ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
     return nullptr;
@@ -413,13 +572,13 @@
   NextChar();
   Token token = GetNextToken();
   while (token != T_ARRAY_END) {
-    std::unique_ptr<Value> item = ParseToken(token);
+    Value* item = ParseToken(token);
     if (!item) {
       // ReportError from deeper level.
       return nullptr;
     }
 
-    list->Append(std::move(item));
+    list->Append(item);
 
     NextChar();
     token = GetNextToken();
@@ -436,15 +595,22 @@
     }
   }
 
-  return std::move(list);
+  return list.release();
 }
 
-std::unique_ptr<Value> JSONParser::ConsumeString() {
+Value* JSONParser::ConsumeString() {
   StringBuilder string;
   if (!ConsumeStringRaw(&string))
     return nullptr;
 
-  return base::MakeUnique<Value>(string.DestructiveAsString());
+  // Create the Value representation, using a hidden root, if configured
+  // to do so, and if the string can be represented by StringPiece.
+  if (string.CanBeStringPiece() && !(options_ & JSON_DETACHABLE_CHILDREN))
+    return new JSONStringValue(string.AsStringPiece());
+
+  if (string.CanBeStringPiece())
+    string.Convert();
+  return new StringValue(string.AsString());
 }
 
 bool JSONParser::ConsumeStringRaw(StringBuilder* out) {
@@ -462,24 +628,16 @@
   int32_t next_char = 0;
 
   while (CanConsume(1)) {
-    int start_index = index_;
     pos_ = start_pos_ + index_;  // CBU8_NEXT is postcrement.
     CBU8_NEXT(start_pos_, index_, length, next_char);
     if (next_char < 0 || !IsValidCharacter(next_char)) {
-      if ((options_ & JSON_REPLACE_INVALID_CHARACTERS) == 0) {
-        ReportError(JSONReader::JSON_UNSUPPORTED_ENCODING, 1);
-        return false;
-      }
-      CBU8_NEXT(start_pos_, start_index, length, next_char);
-      string.Convert();
-      string.AppendString(kUnicodeReplacementString,
-                          arraysize(kUnicodeReplacementString) - 1);
-      continue;
+      ReportError(JSONReader::JSON_UNSUPPORTED_ENCODING, 1);
+      return false;
     }
 
     if (next_char == '"') {
       --index_;  // Rewind by one because of CBU8_NEXT.
-      *out = std::move(string);
+      out->Swap(&string);
       return true;
     }
 
@@ -512,8 +670,7 @@
           }
 
           int hex_digit = 0;
-          if (!HexStringToInt(StringPiece(NextChar(), 2), &hex_digit) ||
-              !IsValidCharacter(hex_digit)) {
+          if (!HexStringToInt(StringPiece(NextChar(), 2), &hex_digit)) {
             ReportError(JSONReader::JSON_INVALID_ESCAPE, -1);
             return false;
           }
@@ -541,7 +698,7 @@
             return false;
           }
 
-          string.AppendString(utf8_units.data(), utf8_units.length());
+          string.AppendString(utf8_units);
           break;
         }
         case '"':
@@ -666,11 +823,11 @@
     dest->Convert();
     // CBU8_APPEND_UNSAFE can overwrite up to 4 bytes, so utf8_units may not be
     // zero terminated at this point.  |offset| contains the correct length.
-    dest->AppendString(utf8_units, offset);
+    dest->AppendString(std::string(utf8_units, offset));
   }
 }
 
-std::unique_ptr<Value> JSONParser::ConsumeNumber() {
+Value* JSONParser::ConsumeNumber() {
   const char* num_start = pos_;
   const int start_index = index_;
   int end_index = start_index;
@@ -685,7 +842,11 @@
   end_index = index_;
 
   // The optional fraction part.
-  if (CanConsume(1) && *pos_ == '.') {
+  if (*pos_ == '.') {
+    if (!CanConsume(1)) {
+      ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
+      return nullptr;
+    }
     NextChar();
     if (!ReadInt(true)) {
       ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
@@ -695,15 +856,10 @@
   }
 
   // Optional exponent part.
-  if (CanConsume(1) && (*pos_ == 'e' || *pos_ == 'E')) {
+  if (*pos_ == 'e' || *pos_ == 'E') {
     NextChar();
-    if (!CanConsume(1)) {
-      ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
-      return nullptr;
-    }
-    if (*pos_ == '-' || *pos_ == '+') {
+    if (*pos_ == '-' || *pos_ == '+')
       NextChar();
-    }
     if (!ReadInt(true)) {
       ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
       return nullptr;
@@ -736,30 +892,25 @@
 
   int num_int;
   if (StringToInt(num_string, &num_int))
-    return base::MakeUnique<Value>(num_int);
+    return new FundamentalValue(num_int);
 
   double num_double;
   if (StringToDouble(num_string.as_string(), &num_double) &&
       std::isfinite(num_double)) {
-    return base::MakeUnique<Value>(num_double);
+    return new FundamentalValue(num_double);
   }
 
   return nullptr;
 }
 
 bool JSONParser::ReadInt(bool allow_leading_zeros) {
-  size_t len = 0;
-  char first = 0;
+  char first = *pos_;
+  int len = 0;
 
-  while (CanConsume(1)) {
-    if (!IsAsciiDigit(*pos_))
-      break;
-
-    if (len == 0)
-      first = *pos_;
-
+  char c = first;
+  while (CanConsume(1) && IsAsciiDigit(c)) {
+    c = *NextChar();
     ++len;
-    NextChar();
   }
 
   if (len == 0)
@@ -771,7 +922,7 @@
   return true;
 }
 
-std::unique_ptr<Value> JSONParser::ConsumeLiteral() {
+Value* JSONParser::ConsumeLiteral() {
   switch (*pos_) {
     case 't': {
       const char kTrueLiteral[] = "true";
@@ -782,7 +933,7 @@
         return nullptr;
       }
       NextNChars(kTrueLen - 1);
-      return base::MakeUnique<Value>(true);
+      return new FundamentalValue(true);
     }
     case 'f': {
       const char kFalseLiteral[] = "false";
@@ -793,7 +944,7 @@
         return nullptr;
       }
       NextNChars(kFalseLen - 1);
-      return base::MakeUnique<Value>(false);
+      return new FundamentalValue(false);
     }
     case 'n': {
       const char kNullLiteral[] = "null";
@@ -804,7 +955,7 @@
         return nullptr;
       }
       NextNChars(kNullLen - 1);
-      return Value::CreateNullValue();
+      return Value::CreateNullValue().release();
     }
     default:
       ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
diff --git a/base/json/json_parser.h b/base/json/json_parser.h
index 4f26458..7539fa9 100644
--- a/base/json/json_parser.h
+++ b/base/json/json_parser.h
@@ -16,7 +16,6 @@
 #include "base/gtest_prod_util.h"
 #include "base/json/json_reader.h"
 #include "base/macros.h"
-#include "base/memory/manual_constructor.h"
 #include "base/strings/string_piece.h"
 
 namespace base {
@@ -31,7 +30,7 @@
 // to be used directly; it encapsulates logic that need not be exposed publicly.
 //
 // This parser guarantees O(n) time through the input string. It also optimizes
-// base::Value by using StringPiece where possible when returning Value
+// base::StringValue by using StringPiece where possible when returning Value
 // objects by using "hidden roots," discussed in the implementation.
 //
 // Iteration happens on the byte level, with the functions CanConsume and
@@ -94,7 +93,7 @@
   // This class centralizes that logic.
   class StringBuilder {
    public:
-    // Empty constructor. Used for creating a builder with which to assign to.
+    // Empty constructor. Used for creating a builder with which to Swap().
     StringBuilder();
 
     // |pos| is the beginning of an input string, excluding the |"|.
@@ -102,7 +101,8 @@
 
     ~StringBuilder();
 
-    void operator=(StringBuilder&& other);
+    // Swaps the contents of |other| with this.
+    void Swap(StringBuilder* other);
 
     // Either increases the |length_| of the string or copies the character if
     // the StringBuilder has been converted. |c| must be in the basic ASCII
@@ -111,24 +111,23 @@
     void Append(const char& c);
 
     // Appends a string to the std::string. Must be Convert()ed to use.
-    void AppendString(const char* str, size_t len);
+    void AppendString(const std::string& str);
 
     // Converts the builder from its default StringPiece to a full std::string,
     // performing a copy. Once a builder is converted, it cannot be made a
     // StringPiece again.
     void Convert();
 
-    // Returns the builder as a StringPiece.
+    // Returns whether the builder can be converted to a StringPiece.
+    bool CanBeStringPiece() const;
+
+    // Returns the StringPiece representation. Returns an empty piece if it
+    // cannot be converted.
     StringPiece AsStringPiece();
 
     // Returns the builder as a std::string.
     const std::string& AsString();
 
-    // Returns the builder as a string, invalidating all state. This allows
-    // the internal string buffer representation to be destructively moved
-    // in cases where the builder will not be needed any more.
-    std::string DestructiveAsString();
-
    private:
     // The beginning of the input string.
     const char* pos_;
@@ -136,10 +135,9 @@
     // Number of bytes in |pos_| that make up the string being built.
     size_t length_;
 
-    // The copied string representation. Will be uninitialized until Convert()
-    // is called, which will set has_string_ to true.
-    bool has_string_;
-    base::ManualConstructor<std::string> string_;
+    // The copied string representation. NULL until Convert() is called.
+    // Strong. std::unique_ptr<T> has too much of an overhead here.
+    std::string* string_;
   };
 
   // Quick check that the stream has capacity to consume |length| more bytes.
@@ -163,27 +161,28 @@
   // currently wound to a '/'.
   bool EatComment();
 
-  // Calls GetNextToken() and then ParseToken().
-  std::unique_ptr<Value> ParseNextToken();
+  // Calls GetNextToken() and then ParseToken(). Caller owns the result.
+  Value* ParseNextToken();
 
   // Takes a token that represents the start of a Value ("a structural token"
-  // in RFC terms) and consumes it, returning the result as a Value.
-  std::unique_ptr<Value> ParseToken(Token token);
+  // in RFC terms) and consumes it, returning the result as an object the
+  // caller owns.
+  Value* ParseToken(Token token);
 
   // Assuming that the parser is currently wound to '{', this parses a JSON
   // object into a DictionaryValue.
-  std::unique_ptr<Value> ConsumeDictionary();
+  Value* ConsumeDictionary();
 
   // Assuming that the parser is wound to '[', this parses a JSON list into a
-  // std::unique_ptr<ListValue>.
-  std::unique_ptr<Value> ConsumeList();
+  // ListValue.
+  Value* ConsumeList();
 
   // Calls through ConsumeStringRaw and wraps it in a value.
-  std::unique_ptr<Value> ConsumeString();
+  Value* ConsumeString();
 
   // Assuming that the parser is wound to a double quote, this parses a string,
   // decoding any escape sequences and converts UTF-16 to UTF-8. Returns true on
-  // success and places result into |out|. Returns false on failure with
+  // success and Swap()s the result into |out|. Returns false on failure with
   // error information set.
   bool ConsumeStringRaw(StringBuilder* out);
   // Helper function for ConsumeStringRaw() that consumes the next four or 10
@@ -199,14 +198,14 @@
 
   // Assuming that the parser is wound to the start of a valid JSON number,
   // this parses and converts it to either an int or double value.
-  std::unique_ptr<Value> ConsumeNumber();
+  Value* ConsumeNumber();
   // Helper that reads characters that are ints. Returns true if a number was
   // read and false on error.
   bool ReadInt(bool allow_leading_zeros);
 
   // Consumes the literal values of |true|, |false|, and |null|, assuming the
   // parser is wound to the first character of any of those.
-  std::unique_ptr<Value> ConsumeLiteral();
+  Value* ConsumeLiteral();
 
   // Compares two string buffers of a given length.
   static bool StringsAreEqual(const char* left, const char* right, size_t len);
@@ -259,14 +258,10 @@
   FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeLiterals);
   FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeNumbers);
   FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ErrorMessages);
-  FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ReplaceInvalidCharacters);
 
   DISALLOW_COPY_AND_ASSIGN(JSONParser);
 };
 
-// Used when decoding and an invalid utf-8 sequence is encountered.
-BASE_EXPORT extern const char kUnicodeReplacementString[];
-
 }  // namespace internal
 }  // namespace base
 
diff --git a/base/json/json_parser_unittest.cc b/base/json/json_parser_unittest.cc
index e3f635b..30255ca 100644
--- a/base/json/json_parser_unittest.cc
+++ b/base/json/json_parser_unittest.cc
@@ -9,8 +9,6 @@
 #include <memory>
 
 #include "base/json/json_reader.h"
-#include "base/memory/ptr_util.h"
-#include "base/strings/stringprintf.h"
 #include "base/values.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -19,9 +17,8 @@
 
 class JSONParserTest : public testing::Test {
  public:
-  JSONParser* NewTestParser(const std::string& input,
-                            int options = JSON_PARSE_RFC) {
-    JSONParser* parser = new JSONParser(options);
+  JSONParser* NewTestParser(const std::string& input) {
+    JSONParser* parser = new JSONParser(JSON_PARSE_RFC);
     parser->start_pos_ = input.data();
     parser->pos_ = parser->start_pos_;
     parser->end_pos_ = parser->start_pos_ + input.length();
@@ -108,7 +105,7 @@
   // Literal |false|.
   input = "false,|";
   parser.reset(NewTestParser(input));
-  value = parser->ConsumeLiteral();
+  value.reset(parser->ConsumeLiteral());
   EXPECT_EQ('e', *parser->pos_);
 
   TestLastThree(parser.get());
@@ -120,13 +117,13 @@
   // Literal |null|.
   input = "null,|";
   parser.reset(NewTestParser(input));
-  value = parser->ConsumeLiteral();
+  value.reset(parser->ConsumeLiteral());
   EXPECT_EQ('l', *parser->pos_);
 
   TestLastThree(parser.get());
 
   ASSERT_TRUE(value.get());
-  EXPECT_TRUE(value->IsType(Value::Type::NONE));
+  EXPECT_TRUE(value->IsType(Value::TYPE_NULL));
 }
 
 TEST_F(JSONParserTest, ConsumeNumbers) {
@@ -146,7 +143,7 @@
   // Negative integer.
   input = "-1234,|";
   parser.reset(NewTestParser(input));
-  value = parser->ConsumeNumber();
+  value.reset(parser->ConsumeNumber());
   EXPECT_EQ('4', *parser->pos_);
 
   TestLastThree(parser.get());
@@ -158,7 +155,7 @@
   // Double.
   input = "12.34,|";
   parser.reset(NewTestParser(input));
-  value = parser->ConsumeNumber();
+  value.reset(parser->ConsumeNumber());
   EXPECT_EQ('4', *parser->pos_);
 
   TestLastThree(parser.get());
@@ -171,7 +168,7 @@
   // Scientific.
   input = "42e3,|";
   parser.reset(NewTestParser(input));
-  value = parser->ConsumeNumber();
+  value.reset(parser->ConsumeNumber());
   EXPECT_EQ('3', *parser->pos_);
 
   TestLastThree(parser.get());
@@ -183,7 +180,7 @@
   // Negative scientific.
   input = "314159e-5,|";
   parser.reset(NewTestParser(input));
-  value = parser->ConsumeNumber();
+  value.reset(parser->ConsumeNumber());
   EXPECT_EQ('5', *parser->pos_);
 
   TestLastThree(parser.get());
@@ -195,7 +192,7 @@
   // Positive scientific.
   input = "0.42e+3,|";
   parser.reset(NewTestParser(input));
-  value = parser->ConsumeNumber();
+  value.reset(parser->ConsumeNumber());
   EXPECT_EQ('3', *parser->pos_);
 
   TestLastThree(parser.get());
@@ -246,14 +243,14 @@
   EXPECT_EQ(JSONReader::JSON_UNEXPECTED_DATA_AFTER_ROOT, error_code);
 
   std::string nested_json;
-  for (int i = 0; i < 201; ++i) {
+  for (int i = 0; i < 101; ++i) {
     nested_json.insert(nested_json.begin(), '[');
     nested_json.append(1, ']');
   }
   root = JSONReader::ReadAndReturnError(nested_json, JSON_PARSE_RFC,
                                         &error_code, &error_message);
   EXPECT_FALSE(root.get());
-  EXPECT_EQ(JSONParser::FormatErrorMessage(1, 200, JSONReader::kTooMuchNesting),
+  EXPECT_EQ(JSONParser::FormatErrorMessage(1, 100, JSONReader::kTooMuchNesting),
             error_message);
   EXPECT_EQ(JSONReader::JSON_TOO_MUCH_NESTING, error_code);
 
@@ -326,69 +323,5 @@
   EXPECT_FALSE(JSONReader::Read("[\"\\ud83f\\udffe\"]"));
 }
 
-TEST_F(JSONParserTest, DecodeNegativeEscapeSequence) {
-  EXPECT_FALSE(JSONReader::Read("[\"\\x-A\"]"));
-  EXPECT_FALSE(JSONReader::Read("[\"\\u-00A\"]"));
-}
-
-// Verifies invalid utf-8 characters are replaced.
-TEST_F(JSONParserTest, ReplaceInvalidCharacters) {
-  const std::string bogus_char = "ó¿¿¿";
-  const std::string quoted_bogus_char = "\"" + bogus_char + "\"";
-  std::unique_ptr<JSONParser> parser(
-      NewTestParser(quoted_bogus_char, JSON_REPLACE_INVALID_CHARACTERS));
-  std::unique_ptr<Value> value(parser->ConsumeString());
-  ASSERT_TRUE(value.get());
-  std::string str;
-  EXPECT_TRUE(value->GetAsString(&str));
-  EXPECT_EQ(kUnicodeReplacementString, str);
-}
-
-TEST_F(JSONParserTest, ParseNumberErrors) {
-  const struct {
-    const char* input;
-    bool parse_success;
-    double value;
-  } kCases[] = {
-      // clang-format off
-      {"1", true, 1},
-      {"2.", false, 0},
-      {"42", true, 42},
-      {"6e", false, 0},
-      {"43e2", true, 4300},
-      {"43e-", false, 0},
-      {"9e-3", true, 0.009},
-      {"2e+", false, 0},
-      {"2e+2", true, 200},
-      // clang-format on
-  };
-
-  for (unsigned int i = 0; i < arraysize(kCases); ++i) {
-    auto test_case = kCases[i];
-    SCOPED_TRACE(StringPrintf("case %u: \"%s\"", i, test_case.input));
-
-    // MSan will do a better job detecting over-read errors if the input is
-    // not nul-terminated on the heap.
-    size_t str_len = strlen(test_case.input);
-    auto non_nul_termianted = MakeUnique<char[]>(str_len);
-    memcpy(non_nul_termianted.get(), test_case.input, str_len);
-
-    StringPiece string_piece(non_nul_termianted.get(), str_len);
-    std::unique_ptr<Value> result = JSONReader::Read(string_piece);
-    if (test_case.parse_success) {
-      EXPECT_TRUE(result);
-    } else {
-      EXPECT_FALSE(result);
-    }
-
-    if (!result)
-      continue;
-
-    double double_value = 0;
-    EXPECT_TRUE(result->GetAsDouble(&double_value));
-    EXPECT_EQ(test_case.value, double_value);
-  }
-}
-
 }  // namespace internal
 }  // namespace base
diff --git a/base/json/json_reader.h b/base/json/json_reader.h
index a39b37a..a954821 100644
--- a/base/json/json_reader.h
+++ b/base/json/json_reader.h
@@ -55,11 +55,6 @@
   // if the child is Remove()d from root, it would result in use-after-free
   // unless it is DeepCopy()ed or this option is used.
   JSON_DETACHABLE_CHILDREN = 1 << 1,
-
-  // If set the parser replaces invalid characters with the Unicode replacement
-  // character (U+FFFD). If not set, invalid characters trigger a hard error and
-  // parsing fails.
-  JSON_REPLACE_INVALID_CHARACTERS = 1 << 2,
 };
 
 class BASE_EXPORT JSONReader {
diff --git a/base/json/json_reader_unittest.cc b/base/json/json_reader_unittest.cc
index 1344de6..84732c4 100644
--- a/base/json/json_reader_unittest.cc
+++ b/base/json/json_reader_unittest.cc
@@ -29,7 +29,7 @@
     // some whitespace checking
     std::unique_ptr<Value> root = JSONReader().ReadToValue("   null   ");
     ASSERT_TRUE(root);
-    EXPECT_TRUE(root->IsType(Value::Type::NONE));
+    EXPECT_TRUE(root->IsType(Value::TYPE_NULL));
   }
 
   {
@@ -41,23 +41,23 @@
     // Simple bool
     std::unique_ptr<Value> root = JSONReader().ReadToValue("true  ");
     ASSERT_TRUE(root);
-    EXPECT_TRUE(root->IsType(Value::Type::BOOLEAN));
+    EXPECT_TRUE(root->IsType(Value::TYPE_BOOLEAN));
   }
 
   {
     // Embedded comment
     std::unique_ptr<Value> root = JSONReader().ReadToValue("/* comment */null");
     ASSERT_TRUE(root);
-    EXPECT_TRUE(root->IsType(Value::Type::NONE));
+    EXPECT_TRUE(root->IsType(Value::TYPE_NULL));
     root = JSONReader().ReadToValue("40 /* comment */");
     ASSERT_TRUE(root);
-    EXPECT_TRUE(root->IsType(Value::Type::INTEGER));
+    EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
     root = JSONReader().ReadToValue("true // comment");
     ASSERT_TRUE(root);
-    EXPECT_TRUE(root->IsType(Value::Type::BOOLEAN));
+    EXPECT_TRUE(root->IsType(Value::TYPE_BOOLEAN));
     root = JSONReader().ReadToValue("/* comment */\"sample string\"");
     ASSERT_TRUE(root);
-    EXPECT_TRUE(root->IsType(Value::Type::STRING));
+    EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
     std::string value;
     EXPECT_TRUE(root->GetAsString(&value));
     EXPECT_EQ("sample string", value);
@@ -75,7 +75,7 @@
     EXPECT_EQ(3u, list->GetSize());
     root = JSONReader().ReadToValue("/* comment **/42");
     ASSERT_TRUE(root);
-    EXPECT_TRUE(root->IsType(Value::Type::INTEGER));
+    EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
     EXPECT_TRUE(root->GetAsInteger(&int_val));
     EXPECT_EQ(42, int_val);
     root = JSONReader().ReadToValue(
@@ -83,7 +83,7 @@
         "// */ 43\n"
         "44");
     ASSERT_TRUE(root);
-    EXPECT_TRUE(root->IsType(Value::Type::INTEGER));
+    EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
     EXPECT_TRUE(root->GetAsInteger(&int_val));
     EXPECT_EQ(44, int_val);
   }
@@ -92,7 +92,7 @@
     // Test number formats
     std::unique_ptr<Value> root = JSONReader().ReadToValue("43");
     ASSERT_TRUE(root);
-    EXPECT_TRUE(root->IsType(Value::Type::INTEGER));
+    EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
     int int_val = 0;
     EXPECT_TRUE(root->GetAsInteger(&int_val));
     EXPECT_EQ(43, int_val);
@@ -110,7 +110,7 @@
     // clause).
     std::unique_ptr<Value> root = JSONReader().ReadToValue("0");
     ASSERT_TRUE(root);
-    EXPECT_TRUE(root->IsType(Value::Type::INTEGER));
+    EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
     int int_val = 1;
     EXPECT_TRUE(root->GetAsInteger(&int_val));
     EXPECT_EQ(0, int_val);
@@ -122,13 +122,13 @@
     std::unique_ptr<Value> root = JSONReader().ReadToValue("2147483648");
     ASSERT_TRUE(root);
     double double_val;
-    EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
+    EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
     double_val = 0.0;
     EXPECT_TRUE(root->GetAsDouble(&double_val));
     EXPECT_DOUBLE_EQ(2147483648.0, double_val);
     root = JSONReader().ReadToValue("-2147483649");
     ASSERT_TRUE(root);
-    EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
+    EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
     double_val = 0.0;
     EXPECT_TRUE(root->GetAsDouble(&double_val));
     EXPECT_DOUBLE_EQ(-2147483649.0, double_val);
@@ -138,42 +138,42 @@
     // Parse a double
     std::unique_ptr<Value> root = JSONReader().ReadToValue("43.1");
     ASSERT_TRUE(root);
-    EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
+    EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
     double double_val = 0.0;
     EXPECT_TRUE(root->GetAsDouble(&double_val));
     EXPECT_DOUBLE_EQ(43.1, double_val);
 
     root = JSONReader().ReadToValue("4.3e-1");
     ASSERT_TRUE(root);
-    EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
+    EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
     double_val = 0.0;
     EXPECT_TRUE(root->GetAsDouble(&double_val));
     EXPECT_DOUBLE_EQ(.43, double_val);
 
     root = JSONReader().ReadToValue("2.1e0");
     ASSERT_TRUE(root);
-    EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
+    EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
     double_val = 0.0;
     EXPECT_TRUE(root->GetAsDouble(&double_val));
     EXPECT_DOUBLE_EQ(2.1, double_val);
 
     root = JSONReader().ReadToValue("2.1e+0001");
     ASSERT_TRUE(root);
-    EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
+    EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
     double_val = 0.0;
     EXPECT_TRUE(root->GetAsDouble(&double_val));
     EXPECT_DOUBLE_EQ(21.0, double_val);
 
     root = JSONReader().ReadToValue("0.01");
     ASSERT_TRUE(root);
-    EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
+    EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
     double_val = 0.0;
     EXPECT_TRUE(root->GetAsDouble(&double_val));
     EXPECT_DOUBLE_EQ(0.01, double_val);
 
     root = JSONReader().ReadToValue("1.00");
     ASSERT_TRUE(root);
-    EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
+    EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
     double_val = 0.0;
     EXPECT_TRUE(root->GetAsDouble(&double_val));
     EXPECT_DOUBLE_EQ(1.0, double_val);
@@ -213,7 +213,7 @@
     // Test string parser
     std::unique_ptr<Value> root = JSONReader().ReadToValue("\"hello world\"");
     ASSERT_TRUE(root);
-    EXPECT_TRUE(root->IsType(Value::Type::STRING));
+    EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
     std::string str_val;
     EXPECT_TRUE(root->GetAsString(&str_val));
     EXPECT_EQ("hello world", str_val);
@@ -223,7 +223,7 @@
     // Empty string
     std::unique_ptr<Value> root = JSONReader().ReadToValue("\"\"");
     ASSERT_TRUE(root);
-    EXPECT_TRUE(root->IsType(Value::Type::STRING));
+    EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
     std::string str_val;
     EXPECT_TRUE(root->GetAsString(&str_val));
     EXPECT_EQ("", str_val);
@@ -234,7 +234,7 @@
     std::unique_ptr<Value> root =
         JSONReader().ReadToValue("\" \\\"\\\\\\/\\b\\f\\n\\r\\t\\v\"");
     ASSERT_TRUE(root);
-    EXPECT_TRUE(root->IsType(Value::Type::STRING));
+    EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
     std::string str_val;
     EXPECT_TRUE(root->GetAsString(&str_val));
     EXPECT_EQ(" \"\\/\b\f\n\r\t\v", str_val);
@@ -245,7 +245,7 @@
     std::unique_ptr<Value> root =
         JSONReader().ReadToValue("\"\\x41\\x00\\u1234\"");
     ASSERT_TRUE(root);
-    EXPECT_TRUE(root->IsType(Value::Type::STRING));
+    EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
     std::string str_val;
     EXPECT_TRUE(root->GetAsString(&str_val));
     EXPECT_EQ(std::wstring(L"A\0\x1234", 3), UTF8ToWide(str_val));
@@ -319,7 +319,7 @@
     EXPECT_EQ(1U, list->GetSize());
     Value* tmp_value = nullptr;
     ASSERT_TRUE(list->Get(0, &tmp_value));
-    EXPECT_TRUE(tmp_value->IsType(Value::Type::BOOLEAN));
+    EXPECT_TRUE(tmp_value->IsType(Value::TYPE_BOOLEAN));
     bool bool_value = false;
     EXPECT_TRUE(tmp_value->GetAsBoolean(&bool_value));
     EXPECT_TRUE(bool_value);
@@ -348,7 +348,7 @@
     EXPECT_DOUBLE_EQ(9.87654321, double_val);
     Value* null_val = nullptr;
     ASSERT_TRUE(dict_val->Get("null", &null_val));
-    EXPECT_TRUE(null_val->IsType(Value::Type::NONE));
+    EXPECT_TRUE(null_val->IsType(Value::TYPE_NULL));
     std::string str_val;
     EXPECT_TRUE(dict_val->GetString("S", &str_val));
     EXPECT_EQ("str", str_val);
@@ -486,7 +486,7 @@
     std::unique_ptr<Value> root =
         JSONReader().ReadToValue("\"\xe7\xbd\x91\xe9\xa1\xb5\"");
     ASSERT_TRUE(root);
-    EXPECT_TRUE(root->IsType(Value::Type::STRING));
+    EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
     std::string str_val;
     EXPECT_TRUE(root->GetAsString(&str_val));
     EXPECT_EQ(L"\x7f51\x9875", UTF8ToWide(str_val));
@@ -510,7 +510,7 @@
     // Test utf16 encoded strings.
     std::unique_ptr<Value> root = JSONReader().ReadToValue("\"\\u20ac3,14\"");
     ASSERT_TRUE(root);
-    EXPECT_TRUE(root->IsType(Value::Type::STRING));
+    EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
     std::string str_val;
     EXPECT_TRUE(root->GetAsString(&str_val));
     EXPECT_EQ(
@@ -520,7 +520,7 @@
 
     root = JSONReader().ReadToValue("\"\\ud83d\\udca9\\ud83d\\udc6c\"");
     ASSERT_TRUE(root);
-    EXPECT_TRUE(root->IsType(Value::Type::STRING));
+    EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
     str_val.clear();
     EXPECT_TRUE(root->GetAsString(&str_val));
     EXPECT_EQ("\xf0\x9f\x92\xa9\xf0\x9f\x91\xac", str_val);
@@ -548,7 +548,7 @@
   {
     // Test literal root objects.
     std::unique_ptr<Value> root = JSONReader::Read("null");
-    EXPECT_TRUE(root->IsType(Value::Type::NONE));
+    EXPECT_TRUE(root->IsType(Value::TYPE_NULL));
 
     root = JSONReader::Read("true");
     ASSERT_TRUE(root);
@@ -583,7 +583,7 @@
   JSONReader reader;
   std::unique_ptr<Value> root(reader.ReadToValue(input));
   ASSERT_TRUE(root) << reader.GetErrorMessage();
-  EXPECT_TRUE(root->IsType(Value::Type::DICTIONARY));
+  EXPECT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
 }
 #endif  // !__ANDROID__ && !__ANDROID_HOST__
 
diff --git a/base/json/json_string_value_serializer.cc b/base/json/json_string_value_serializer.cc
index 2e46ab3..cd786db 100644
--- a/base/json/json_string_value_serializer.cc
+++ b/base/json/json_string_value_serializer.cc
@@ -41,15 +41,18 @@
 }
 
 JSONStringValueDeserializer::JSONStringValueDeserializer(
-    const base::StringPiece& json_string,
-    int options)
-    : json_string_(json_string), options_(options) {}
+    const base::StringPiece& json_string)
+    : json_string_(json_string),
+      allow_trailing_comma_(false) {
+}
 
 JSONStringValueDeserializer::~JSONStringValueDeserializer() {}
 
 std::unique_ptr<Value> JSONStringValueDeserializer::Deserialize(
     int* error_code,
     std::string* error_str) {
-  return base::JSONReader::ReadAndReturnError(json_string_, options_,
-                                              error_code, error_str);
+  return base::JSONReader::ReadAndReturnError(
+      json_string_, allow_trailing_comma_ ? base::JSON_ALLOW_TRAILING_COMMAS
+                                          : base::JSON_PARSE_RFC,
+      error_code, error_str);
 }
diff --git a/base/json/json_string_value_serializer.h b/base/json/json_string_value_serializer.h
index 55a53e2..a97da23 100644
--- a/base/json/json_string_value_serializer.h
+++ b/base/json/json_string_value_serializer.h
@@ -47,10 +47,8 @@
 class BASE_EXPORT JSONStringValueDeserializer : public base::ValueDeserializer {
  public:
   // This retains a reference to the contents of |json_string|, so the data
-  // must outlive the JSONStringValueDeserializer. |options| is a bitmask of
-  // JSONParserOptions.
-  explicit JSONStringValueDeserializer(const base::StringPiece& json_string,
-                                       int options = 0);
+  // must outlive the JSONStringValueDeserializer.
+  explicit JSONStringValueDeserializer(const base::StringPiece& json_string);
 
   ~JSONStringValueDeserializer() override;
 
@@ -64,10 +62,15 @@
   std::unique_ptr<base::Value> Deserialize(int* error_code,
                                            std::string* error_message) override;
 
+  void set_allow_trailing_comma(bool new_value) {
+    allow_trailing_comma_ = new_value;
+  }
+
  private:
   // Data is owned by the caller of the constructor.
   base::StringPiece json_string_;
-  const int options_;
+  // If true, deserialization will allow trailing commas.
+  bool allow_trailing_comma_;
 
   DISALLOW_COPY_AND_ASSIGN(JSONStringValueDeserializer);
 };
diff --git a/base/json/json_value_converter.h b/base/json/json_value_converter.h
index 68ebfa2..4cca034 100644
--- a/base/json/json_value_converter.h
+++ b/base/json/json_value_converter.h
@@ -14,7 +14,8 @@
 #include "base/base_export.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/ptr_util.h"
+#include "base/memory/scoped_vector.h"
+#include "base/stl_util.h"
 #include "base/strings/string16.h"
 #include "base/strings/string_piece.h"
 #include "base/values.h"
@@ -65,9 +66,9 @@
 //     }
 //   };
 //
-// For repeated field, we just assume std::vector<std::unique_ptr<ElementType>>
-// for its container and you can put RegisterRepeatedInt or some other types.
-// Use RegisterRepeatedMessage for nested repeated fields.
+// For repeated field, we just assume ScopedVector for its container
+// and you can put RegisterRepeatedInt or some other types.  Use
+// RegisterRepeatedMessage for nested repeated fields.
 //
 // Sometimes JSON format uses string representations for other types such
 // like enum, timestamp, or URL.  You can use RegisterCustomField method
@@ -199,7 +200,7 @@
  public:
   typedef bool(*ConvertFunc)(const base::Value* value, FieldType* field);
 
-  explicit ValueFieldConverter(ConvertFunc convert_func)
+  ValueFieldConverter(ConvertFunc convert_func)
       : convert_func_(convert_func) {}
 
   bool Convert(const base::Value& value, FieldType* field) const override {
@@ -217,7 +218,7 @@
  public:
   typedef bool(*ConvertFunc)(const StringPiece& value, FieldType* field);
 
-  explicit CustomFieldConverter(ConvertFunc convert_func)
+  CustomFieldConverter(ConvertFunc convert_func)
       : convert_func_(convert_func) {}
 
   bool Convert(const base::Value& value, FieldType* field) const override {
@@ -247,13 +248,12 @@
 };
 
 template <typename Element>
-class RepeatedValueConverter
-    : public ValueConverter<std::vector<std::unique_ptr<Element>>> {
+class RepeatedValueConverter : public ValueConverter<ScopedVector<Element> > {
  public:
   RepeatedValueConverter() {}
 
   bool Convert(const base::Value& value,
-               std::vector<std::unique_ptr<Element>>* field) const override {
+               ScopedVector<Element>* field) const override {
     const base::ListValue* list = NULL;
     if (!value.GetAsList(&list)) {
       // The field is not a list.
@@ -268,7 +268,7 @@
 
       std::unique_ptr<Element> e(new Element);
       if (basic_converter_.Convert(*element, e.get())) {
-        field->push_back(std::move(e));
+        field->push_back(e.release());
       } else {
         DVLOG(1) << "failure at " << i << "-th element";
         return false;
@@ -284,12 +284,12 @@
 
 template <typename NestedType>
 class RepeatedMessageConverter
-    : public ValueConverter<std::vector<std::unique_ptr<NestedType>>> {
+    : public ValueConverter<ScopedVector<NestedType> > {
  public:
   RepeatedMessageConverter() {}
 
   bool Convert(const base::Value& value,
-               std::vector<std::unique_ptr<NestedType>>* field) const override {
+               ScopedVector<NestedType>* field) const override {
     const base::ListValue* list = NULL;
     if (!value.GetAsList(&list))
       return false;
@@ -302,7 +302,7 @@
 
       std::unique_ptr<NestedType> nested(new NestedType);
       if (converter_.Convert(*element, nested.get())) {
-        field->push_back(std::move(nested));
+        field->push_back(nested.release());
       } else {
         DVLOG(1) << "failure at " << i << "-th element";
         return false;
@@ -318,15 +318,15 @@
 
 template <typename NestedType>
 class RepeatedCustomValueConverter
-    : public ValueConverter<std::vector<std::unique_ptr<NestedType>>> {
+    : public ValueConverter<ScopedVector<NestedType> > {
  public:
   typedef bool(*ConvertFunc)(const base::Value* value, NestedType* field);
 
-  explicit RepeatedCustomValueConverter(ConvertFunc convert_func)
+  RepeatedCustomValueConverter(ConvertFunc convert_func)
       : convert_func_(convert_func) {}
 
   bool Convert(const base::Value& value,
-               std::vector<std::unique_ptr<NestedType>>* field) const override {
+               ScopedVector<NestedType>* field) const override {
     const base::ListValue* list = NULL;
     if (!value.GetAsList(&list))
       return false;
@@ -339,7 +339,7 @@
 
       std::unique_ptr<NestedType> nested(new NestedType);
       if ((*convert_func_)(element, nested.get())) {
-        field->push_back(std::move(nested));
+        field->push_back(nested.release());
       } else {
         DVLOG(1) << "failure at " << i << "-th element";
         return false;
@@ -365,42 +365,41 @@
 
   void RegisterIntField(const std::string& field_name,
                         int StructType::* field) {
-    fields_.push_back(MakeUnique<internal::FieldConverter<StructType, int>>(
+    fields_.push_back(new internal::FieldConverter<StructType, int>(
         field_name, field, new internal::BasicValueConverter<int>));
   }
 
   void RegisterStringField(const std::string& field_name,
                            std::string StructType::* field) {
-    fields_.push_back(
-        MakeUnique<internal::FieldConverter<StructType, std::string>>(
-            field_name, field, new internal::BasicValueConverter<std::string>));
+    fields_.push_back(new internal::FieldConverter<StructType, std::string>(
+        field_name, field, new internal::BasicValueConverter<std::string>));
   }
 
   void RegisterStringField(const std::string& field_name,
                            string16 StructType::* field) {
-    fields_.push_back(
-        MakeUnique<internal::FieldConverter<StructType, string16>>(
-            field_name, field, new internal::BasicValueConverter<string16>));
+    fields_.push_back(new internal::FieldConverter<StructType, string16>(
+        field_name, field, new internal::BasicValueConverter<string16>));
   }
 
   void RegisterBoolField(const std::string& field_name,
                          bool StructType::* field) {
-    fields_.push_back(MakeUnique<internal::FieldConverter<StructType, bool>>(
+    fields_.push_back(new internal::FieldConverter<StructType, bool>(
         field_name, field, new internal::BasicValueConverter<bool>));
   }
 
   void RegisterDoubleField(const std::string& field_name,
                            double StructType::* field) {
-    fields_.push_back(MakeUnique<internal::FieldConverter<StructType, double>>(
+    fields_.push_back(new internal::FieldConverter<StructType, double>(
         field_name, field, new internal::BasicValueConverter<double>));
   }
 
   template <class NestedType>
   void RegisterNestedField(
       const std::string& field_name, NestedType StructType::* field) {
-    fields_.push_back(
-        MakeUnique<internal::FieldConverter<StructType, NestedType>>(
-            field_name, field, new internal::NestedValueConverter<NestedType>));
+    fields_.push_back(new internal::FieldConverter<StructType, NestedType>(
+            field_name,
+            field,
+            new internal::NestedValueConverter<NestedType>));
   }
 
   template <typename FieldType>
@@ -408,10 +407,10 @@
       const std::string& field_name,
       FieldType StructType::* field,
       bool (*convert_func)(const StringPiece&, FieldType*)) {
-    fields_.push_back(
-        MakeUnique<internal::FieldConverter<StructType, FieldType>>(
-            field_name, field,
-            new internal::CustomFieldConverter<FieldType>(convert_func)));
+    fields_.push_back(new internal::FieldConverter<StructType, FieldType>(
+        field_name,
+        field,
+        new internal::CustomFieldConverter<FieldType>(convert_func)));
   }
 
   template <typename FieldType>
@@ -419,76 +418,71 @@
       const std::string& field_name,
       FieldType StructType::* field,
       bool (*convert_func)(const base::Value*, FieldType*)) {
-    fields_.push_back(
-        MakeUnique<internal::FieldConverter<StructType, FieldType>>(
-            field_name, field,
-            new internal::ValueFieldConverter<FieldType>(convert_func)));
+    fields_.push_back(new internal::FieldConverter<StructType, FieldType>(
+        field_name,
+        field,
+        new internal::ValueFieldConverter<FieldType>(convert_func)));
   }
 
-  void RegisterRepeatedInt(
-      const std::string& field_name,
-      std::vector<std::unique_ptr<int>> StructType::*field) {
+  void RegisterRepeatedInt(const std::string& field_name,
+                           ScopedVector<int> StructType::* field) {
     fields_.push_back(
-        MakeUnique<internal::FieldConverter<StructType,
-                                            std::vector<std::unique_ptr<int>>>>(
+        new internal::FieldConverter<StructType, ScopedVector<int> >(
             field_name, field, new internal::RepeatedValueConverter<int>));
   }
 
-  void RegisterRepeatedString(
-      const std::string& field_name,
-      std::vector<std::unique_ptr<std::string>> StructType::*field) {
+  void RegisterRepeatedString(const std::string& field_name,
+                              ScopedVector<std::string> StructType::* field) {
     fields_.push_back(
-        MakeUnique<internal::FieldConverter<
-            StructType, std::vector<std::unique_ptr<std::string>>>>(
-            field_name, field,
+        new internal::FieldConverter<StructType, ScopedVector<std::string> >(
+            field_name,
+            field,
             new internal::RepeatedValueConverter<std::string>));
   }
 
-  void RegisterRepeatedString(
-      const std::string& field_name,
-      std::vector<std::unique_ptr<string16>> StructType::*field) {
-    fields_.push_back(MakeUnique<internal::FieldConverter<
-                          StructType, std::vector<std::unique_ptr<string16>>>>(
-        field_name, field, new internal::RepeatedValueConverter<string16>));
+  void RegisterRepeatedString(const std::string& field_name,
+                              ScopedVector<string16> StructType::* field) {
+    fields_.push_back(
+        new internal::FieldConverter<StructType, ScopedVector<string16> >(
+            field_name,
+            field,
+            new internal::RepeatedValueConverter<string16>));
   }
 
-  void RegisterRepeatedDouble(
-      const std::string& field_name,
-      std::vector<std::unique_ptr<double>> StructType::*field) {
-    fields_.push_back(MakeUnique<internal::FieldConverter<
-                          StructType, std::vector<std::unique_ptr<double>>>>(
-        field_name, field, new internal::RepeatedValueConverter<double>));
+  void RegisterRepeatedDouble(const std::string& field_name,
+                              ScopedVector<double> StructType::* field) {
+    fields_.push_back(
+        new internal::FieldConverter<StructType, ScopedVector<double> >(
+            field_name, field, new internal::RepeatedValueConverter<double>));
   }
 
-  void RegisterRepeatedBool(
-      const std::string& field_name,
-      std::vector<std::unique_ptr<bool>> StructType::*field) {
-    fields_.push_back(MakeUnique<internal::FieldConverter<
-                          StructType, std::vector<std::unique_ptr<bool>>>>(
-        field_name, field, new internal::RepeatedValueConverter<bool>));
+  void RegisterRepeatedBool(const std::string& field_name,
+                            ScopedVector<bool> StructType::* field) {
+    fields_.push_back(
+        new internal::FieldConverter<StructType, ScopedVector<bool> >(
+            field_name, field, new internal::RepeatedValueConverter<bool>));
   }
 
   template <class NestedType>
   void RegisterRepeatedCustomValue(
       const std::string& field_name,
-      std::vector<std::unique_ptr<NestedType>> StructType::*field,
+      ScopedVector<NestedType> StructType::* field,
       bool (*convert_func)(const base::Value*, NestedType*)) {
     fields_.push_back(
-        MakeUnique<internal::FieldConverter<
-            StructType, std::vector<std::unique_ptr<NestedType>>>>(
-            field_name, field,
+        new internal::FieldConverter<StructType, ScopedVector<NestedType> >(
+            field_name,
+            field,
             new internal::RepeatedCustomValueConverter<NestedType>(
                 convert_func)));
   }
 
   template <class NestedType>
-  void RegisterRepeatedMessage(
-      const std::string& field_name,
-      std::vector<std::unique_ptr<NestedType>> StructType::*field) {
+  void RegisterRepeatedMessage(const std::string& field_name,
+                               ScopedVector<NestedType> StructType::* field) {
     fields_.push_back(
-        MakeUnique<internal::FieldConverter<
-            StructType, std::vector<std::unique_ptr<NestedType>>>>(
-            field_name, field,
+        new internal::FieldConverter<StructType, ScopedVector<NestedType> >(
+            field_name,
+            field,
             new internal::RepeatedMessageConverter<NestedType>));
   }
 
@@ -497,9 +491,9 @@
     if (!value.GetAsDictionary(&dictionary_value))
       return false;
 
-    for (size_t i = 0; i < fields_.size(); ++i) {
+    for(size_t i = 0; i < fields_.size(); ++i) {
       const internal::FieldConverterBase<StructType>* field_converter =
-          fields_[i].get();
+          fields_[i];
       const base::Value* field = NULL;
       if (dictionary_value->Get(field_converter->field_path(), &field)) {
         if (!field_converter->ConvertField(*field, output)) {
@@ -512,8 +506,7 @@
   }
 
  private:
-  std::vector<std::unique_ptr<internal::FieldConverterBase<StructType>>>
-      fields_;
+  ScopedVector<internal::FieldConverterBase<StructType> > fields_;
 
   DISALLOW_COPY_AND_ASSIGN(JSONValueConverter);
 };
diff --git a/base/json/json_value_converter_unittest.cc b/base/json/json_value_converter_unittest.cc
index 6a603d3..56ade24 100644
--- a/base/json/json_value_converter_unittest.cc
+++ b/base/json/json_value_converter_unittest.cc
@@ -9,6 +9,7 @@
 #include <vector>
 
 #include "base/json/json_reader.h"
+#include "base/memory/scoped_vector.h"
 #include "base/strings/string_piece.h"
 #include "base/values.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -26,8 +27,8 @@
   bool baz;
   bool bstruct;
   SimpleEnum simple_enum;
-  std::vector<std::unique_ptr<int>> ints;
-  std::vector<std::unique_ptr<std::string>> string_values;
+  ScopedVector<int> ints;
+  ScopedVector<std::string> string_values;
   SimpleMessage() : foo(0), baz(false), bstruct(false), simple_enum(FOO) {}
 
   static bool ParseSimpleEnum(const StringPiece& value, SimpleEnum* field) {
@@ -79,7 +80,7 @@
 struct NestedMessage {
   double foo;
   SimpleMessage child;
-  std::vector<std::unique_ptr<SimpleMessage>> children;
+  ScopedVector<SimpleMessage> children;
 
   NestedMessage() : foo(0) {}
 
@@ -162,7 +163,7 @@
   EXPECT_EQ("value_2", *message.child.string_values[1]);
 
   EXPECT_EQ(2, static_cast<int>(message.children.size()));
-  const SimpleMessage* first_child = message.children[0].get();
+  const SimpleMessage* first_child = message.children[0];
   ASSERT_TRUE(first_child);
   EXPECT_EQ(2, first_child->foo);
   EXPECT_EQ("foobar", first_child->bar);
@@ -171,7 +172,7 @@
   ASSERT_EQ(1U, first_child->string_values.size());
   EXPECT_EQ("value_1", *first_child->string_values[0]);
 
-  const SimpleMessage* second_child = message.children[1].get();
+  const SimpleMessage* second_child = message.children[1];
   ASSERT_TRUE(second_child);
   EXPECT_EQ(3, second_child->foo);
   EXPECT_EQ("barbaz", second_child->bar);
diff --git a/base/json/json_value_serializer_unittest.cc b/base/json/json_value_serializer_unittest.cc
index 1d58c61..0c079b7 100644
--- a/base/json/json_value_serializer_unittest.cc
+++ b/base/json/json_value_serializer_unittest.cc
@@ -78,10 +78,11 @@
 }
 
 void ValidateJsonList(const std::string& json) {
-  std::unique_ptr<ListValue> list = ListValue::From(JSONReader::Read(json));
-  ASSERT_TRUE(list);
+  std::unique_ptr<Value> root = JSONReader::Read(json);
+  ASSERT_TRUE(root.get() && root->IsType(Value::TYPE_LIST));
+  ListValue* list = static_cast<ListValue*>(root.get());
   ASSERT_EQ(1U, list->GetSize());
-  Value* elt = nullptr;
+  Value* elt = NULL;
   ASSERT_TRUE(list->Get(0, &elt));
   int value = 0;
   ASSERT_TRUE(elt && elt->GetAsInteger(&value));
@@ -97,7 +98,7 @@
   std::string error_message;
   std::unique_ptr<Value> value =
       str_deserializer.Deserialize(&error_code, &error_message);
-  ASSERT_TRUE(value);
+  ASSERT_TRUE(value.get());
   ASSERT_EQ(0, error_code);
   ASSERT_TRUE(error_message.empty());
   // Verify if the same JSON is still there.
@@ -108,7 +109,7 @@
 TEST(JSONValueDeserializerTest, ReadProperJSONFromStringPiece) {
   // Create a StringPiece for the substring of kProperJSONPadded that matches
   // kProperJSON.
-  StringPiece proper_json(kProperJSONPadded);
+  base::StringPiece proper_json(kProperJSONPadded);
   proper_json = proper_json.substr(5, proper_json.length() - 10);
   JSONStringValueDeserializer str_deserializer(proper_json);
 
@@ -116,7 +117,7 @@
   std::string error_message;
   std::unique_ptr<Value> value =
       str_deserializer.Deserialize(&error_code, &error_message);
-  ASSERT_TRUE(value);
+  ASSERT_TRUE(value.get());
   ASSERT_EQ(0, error_code);
   ASSERT_TRUE(error_message.empty());
   // Verify if the same JSON is still there.
@@ -133,14 +134,13 @@
   std::string error_message;
   std::unique_ptr<Value> value =
       str_deserializer.Deserialize(&error_code, &error_message);
-  ASSERT_FALSE(value);
+  ASSERT_FALSE(value.get());
   ASSERT_NE(0, error_code);
   ASSERT_FALSE(error_message.empty());
-  // Repeat with commas allowed.
-  JSONStringValueDeserializer str_deserializer2(kProperJSONWithCommas,
-                                                JSON_ALLOW_TRAILING_COMMAS);
-  value = str_deserializer2.Deserialize(&error_code, &error_message);
-  ASSERT_TRUE(value);
+  // Now the flag is set and it must pass.
+  str_deserializer.set_allow_trailing_comma(true);
+  value = str_deserializer.Deserialize(&error_code, &error_message);
+  ASSERT_TRUE(value.get());
   ASSERT_EQ(JSONReader::JSON_TRAILING_COMMA, error_code);
   // Verify if the same JSON is still there.
   CheckJSONIsStillTheSame(*value);
@@ -151,7 +151,7 @@
   ScopedTempDir tempdir;
   ASSERT_TRUE(tempdir.CreateUniqueTempDir());
   // Write it down in the file.
-  FilePath temp_file(tempdir.GetPath().AppendASCII("test.json"));
+  FilePath temp_file(tempdir.path().AppendASCII("test.json"));
   ASSERT_EQ(static_cast<int>(strlen(kProperJSON)),
             WriteFile(temp_file, kProperJSON, strlen(kProperJSON)));
 
@@ -162,7 +162,7 @@
   std::string error_message;
   std::unique_ptr<Value> value =
       file_deserializer.Deserialize(&error_code, &error_message);
-  ASSERT_TRUE(value);
+  ASSERT_TRUE(value.get());
   ASSERT_EQ(0, error_code);
   ASSERT_TRUE(error_message.empty());
   // Verify if the same JSON is still there.
@@ -175,7 +175,7 @@
   ScopedTempDir tempdir;
   ASSERT_TRUE(tempdir.CreateUniqueTempDir());
   // Write it down in the file.
-  FilePath temp_file(tempdir.GetPath().AppendASCII("test.json"));
+  FilePath temp_file(tempdir.path().AppendASCII("test.json"));
   ASSERT_EQ(static_cast<int>(strlen(kProperJSONWithCommas)),
             WriteFile(temp_file, kProperJSONWithCommas,
                       strlen(kProperJSONWithCommas)));
@@ -187,31 +187,31 @@
   std::string error_message;
   std::unique_ptr<Value> value =
       file_deserializer.Deserialize(&error_code, &error_message);
-  ASSERT_FALSE(value);
+  ASSERT_FALSE(value.get());
   ASSERT_NE(0, error_code);
   ASSERT_FALSE(error_message.empty());
-  // Repeat with commas allowed.
-  JSONFileValueDeserializer file_deserializer2(temp_file,
-                                               JSON_ALLOW_TRAILING_COMMAS);
-  value = file_deserializer2.Deserialize(&error_code, &error_message);
-  ASSERT_TRUE(value);
+  // Now the flag is set and it must pass.
+  file_deserializer.set_allow_trailing_comma(true);
+  value = file_deserializer.Deserialize(&error_code, &error_message);
+  ASSERT_TRUE(value.get());
   ASSERT_EQ(JSONReader::JSON_TRAILING_COMMA, error_code);
   // Verify if the same JSON is still there.
   CheckJSONIsStillTheSame(*value);
 }
 
 TEST(JSONValueDeserializerTest, AllowTrailingComma) {
+  std::unique_ptr<Value> root;
+  std::unique_ptr<Value> root_expected;
   static const char kTestWithCommas[] = "{\"key\": [true,],}";
   static const char kTestNoCommas[] = "{\"key\": [true]}";
 
-  JSONStringValueDeserializer deserializer(kTestWithCommas,
-                                           JSON_ALLOW_TRAILING_COMMAS);
+  JSONStringValueDeserializer deserializer(kTestWithCommas);
+  deserializer.set_allow_trailing_comma(true);
   JSONStringValueDeserializer deserializer_expected(kTestNoCommas);
-  std::unique_ptr<Value> root = deserializer.Deserialize(nullptr, nullptr);
-  ASSERT_TRUE(root);
-  std::unique_ptr<Value> root_expected;
-  root_expected = deserializer_expected.Deserialize(nullptr, nullptr);
-  ASSERT_TRUE(root_expected);
+  root = deserializer.Deserialize(NULL, NULL);
+  ASSERT_TRUE(root.get());
+  root_expected = deserializer_expected.Deserialize(NULL, NULL);
+  ASSERT_TRUE(root_expected.get());
   ASSERT_TRUE(root->Equals(root_expected.get()));
 }
 
@@ -219,14 +219,16 @@
   static const char kOriginalSerialization[] =
     "{\"bool\":true,\"double\":3.14,\"int\":42,\"list\":[1,2],\"null\":null}";
   JSONStringValueDeserializer deserializer(kOriginalSerialization);
-  std::unique_ptr<DictionaryValue> root_dict =
-      DictionaryValue::From(deserializer.Deserialize(nullptr, nullptr));
-  ASSERT_TRUE(root_dict);
+  std::unique_ptr<Value> root = deserializer.Deserialize(NULL, NULL);
+  ASSERT_TRUE(root.get());
+  ASSERT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
 
-  Value* null_value = nullptr;
+  DictionaryValue* root_dict = static_cast<DictionaryValue*>(root.get());
+
+  Value* null_value = NULL;
   ASSERT_TRUE(root_dict->Get("null", &null_value));
   ASSERT_TRUE(null_value);
-  ASSERT_TRUE(null_value->IsType(Value::Type::NONE));
+  ASSERT_TRUE(null_value->IsType(Value::TYPE_NULL));
 
   bool bool_value = false;
   ASSERT_TRUE(root_dict->GetBoolean("bool", &bool_value));
@@ -327,9 +329,8 @@
 
   // escaped ascii text -> json
   JSONStringValueDeserializer deserializer(kExpected);
-  std::unique_ptr<Value> deserial_root =
-      deserializer.Deserialize(nullptr, nullptr);
-  ASSERT_TRUE(deserial_root);
+  std::unique_ptr<Value> deserial_root = deserializer.Deserialize(NULL, NULL);
+  ASSERT_TRUE(deserial_root.get());
   DictionaryValue* dict_root =
       static_cast<DictionaryValue*>(deserial_root.get());
   string16 web_value;
@@ -352,9 +353,8 @@
 
   // escaped ascii text -> json
   JSONStringValueDeserializer deserializer(kExpected);
-  std::unique_ptr<Value> deserial_root =
-      deserializer.Deserialize(nullptr, nullptr);
-  ASSERT_TRUE(deserial_root);
+  std::unique_ptr<Value> deserial_root = deserializer.Deserialize(NULL, NULL);
+  ASSERT_TRUE(deserial_root.get());
   DictionaryValue* dict_root =
       static_cast<DictionaryValue*>(deserial_root.get());
   string16 test_value;
@@ -364,8 +364,8 @@
   // Test converting escaped regular chars
   static const char kEscapedChars[] = "{\"test\":\"\\u0067\\u006f\"}";
   JSONStringValueDeserializer deserializer2(kEscapedChars);
-  deserial_root = deserializer2.Deserialize(nullptr, nullptr);
-  ASSERT_TRUE(deserial_root);
+  deserial_root = deserializer2.Deserialize(NULL, NULL);
+  ASSERT_TRUE(deserial_root.get());
   dict_root = static_cast<DictionaryValue*>(deserial_root.get());
   ASSERT_TRUE(dict_root->GetString("test", &test_value));
   ASSERT_EQ(ASCIIToUTF16("go"), test_value);
@@ -380,48 +380,54 @@
   ValidateJsonList("[ 1 //// ,2\r\n ]");
 
   // It's ok to have a comment in a string.
-  std::unique_ptr<ListValue> list =
-      ListValue::From(JSONReader::Read("[\"// ok\\n /* foo */ \"]"));
-  ASSERT_TRUE(list);
+  std::unique_ptr<Value> root = JSONReader::Read("[\"// ok\\n /* foo */ \"]");
+  ASSERT_TRUE(root.get() && root->IsType(Value::TYPE_LIST));
+  ListValue* list = static_cast<ListValue*>(root.get());
   ASSERT_EQ(1U, list->GetSize());
-  Value* elt = nullptr;
+  Value* elt = NULL;
   ASSERT_TRUE(list->Get(0, &elt));
   std::string value;
   ASSERT_TRUE(elt && elt->GetAsString(&value));
   ASSERT_EQ("// ok\n /* foo */ ", value);
 
   // You can't nest comments.
-  ASSERT_FALSE(JSONReader::Read("/* /* inner */ outer */ [ 1 ]"));
+  root = JSONReader::Read("/* /* inner */ outer */ [ 1 ]");
+  ASSERT_FALSE(root.get());
 
   // Not a open comment token.
-  ASSERT_FALSE(JSONReader::Read("/ * * / [1]"));
+  root = JSONReader::Read("/ * * / [1]");
+  ASSERT_FALSE(root.get());
 }
 
 #if !defined(__ANDROID__) && !defined(__ANDROID_HOST__)
-
 class JSONFileValueSerializerTest : public testing::Test {
  protected:
   void SetUp() override { ASSERT_TRUE(temp_dir_.CreateUniqueTempDir()); }
 
-  ScopedTempDir temp_dir_;
+  base::ScopedTempDir temp_dir_;
 };
 
 TEST_F(JSONFileValueSerializerTest, Roundtrip) {
-  FilePath original_file_path;
+  base::FilePath original_file_path;
   ASSERT_TRUE(PathService::Get(DIR_TEST_DATA, &original_file_path));
-  original_file_path = original_file_path.AppendASCII("serializer_test.json");
+  original_file_path =
+      original_file_path.Append(FILE_PATH_LITERAL("serializer_test.json"));
 
   ASSERT_TRUE(PathExists(original_file_path));
 
   JSONFileValueDeserializer deserializer(original_file_path);
-  std::unique_ptr<DictionaryValue> root_dict =
-      DictionaryValue::From(deserializer.Deserialize(nullptr, nullptr));
-  ASSERT_TRUE(root_dict);
+  std::unique_ptr<Value> root;
+  root = deserializer.Deserialize(NULL, NULL);
 
-  Value* null_value = nullptr;
+  ASSERT_TRUE(root.get());
+  ASSERT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
+
+  DictionaryValue* root_dict = static_cast<DictionaryValue*>(root.get());
+
+  Value* null_value = NULL;
   ASSERT_TRUE(root_dict->Get("null", &null_value));
   ASSERT_TRUE(null_value);
-  ASSERT_TRUE(null_value->IsType(Value::Type::NONE));
+  ASSERT_TRUE(null_value->IsType(Value::TYPE_NULL));
 
   bool bool_value = false;
   ASSERT_TRUE(root_dict->GetBoolean("bool", &bool_value));
@@ -436,34 +442,8 @@
   ASSERT_EQ("hello", string_value);
 
   // Now try writing.
-  const FilePath written_file_path =
-      temp_dir_.GetPath().AppendASCII("test_output.js");
-
-  ASSERT_FALSE(PathExists(written_file_path));
-  JSONFileValueSerializer serializer(written_file_path);
-  ASSERT_TRUE(serializer.Serialize(*root_dict));
-  ASSERT_TRUE(PathExists(written_file_path));
-
-  // Now compare file contents.
-  EXPECT_TRUE(TextContentsEqual(original_file_path, written_file_path));
-  EXPECT_TRUE(DeleteFile(written_file_path, false));
-}
-
-TEST_F(JSONFileValueSerializerTest, RoundtripNested) {
-  FilePath original_file_path;
-  ASSERT_TRUE(PathService::Get(DIR_TEST_DATA, &original_file_path));
-  original_file_path =
-      original_file_path.AppendASCII("serializer_nested_test.json");
-
-  ASSERT_TRUE(PathExists(original_file_path));
-
-  JSONFileValueDeserializer deserializer(original_file_path);
-  std::unique_ptr<Value> root = deserializer.Deserialize(nullptr, nullptr);
-  ASSERT_TRUE(root);
-
-  // Now try writing.
-  FilePath written_file_path =
-      temp_dir_.GetPath().AppendASCII("test_output.json");
+  const base::FilePath written_file_path =
+      temp_dir_.path().Append(FILE_PATH_LITERAL("test_output.js"));
 
   ASSERT_FALSE(PathExists(written_file_path));
   JSONFileValueSerializer serializer(written_file_path);
@@ -472,18 +452,46 @@
 
   // Now compare file contents.
   EXPECT_TRUE(TextContentsEqual(original_file_path, written_file_path));
-  EXPECT_TRUE(DeleteFile(written_file_path, false));
+  EXPECT_TRUE(base::DeleteFile(written_file_path, false));
+}
+
+TEST_F(JSONFileValueSerializerTest, RoundtripNested) {
+  base::FilePath original_file_path;
+  ASSERT_TRUE(PathService::Get(DIR_TEST_DATA, &original_file_path));
+  original_file_path = original_file_path.Append(
+      FILE_PATH_LITERAL("serializer_nested_test.json"));
+
+  ASSERT_TRUE(PathExists(original_file_path));
+
+  JSONFileValueDeserializer deserializer(original_file_path);
+  std::unique_ptr<Value> root;
+  root = deserializer.Deserialize(NULL, NULL);
+  ASSERT_TRUE(root.get());
+
+  // Now try writing.
+  base::FilePath written_file_path = temp_dir_.path().Append(
+      FILE_PATH_LITERAL("test_output.json"));
+
+  ASSERT_FALSE(PathExists(written_file_path));
+  JSONFileValueSerializer serializer(written_file_path);
+  ASSERT_TRUE(serializer.Serialize(*root));
+  ASSERT_TRUE(PathExists(written_file_path));
+
+  // Now compare file contents.
+  EXPECT_TRUE(TextContentsEqual(original_file_path, written_file_path));
+  EXPECT_TRUE(base::DeleteFile(written_file_path, false));
 }
 
 TEST_F(JSONFileValueSerializerTest, NoWhitespace) {
-  FilePath source_file_path;
+  base::FilePath source_file_path;
   ASSERT_TRUE(PathService::Get(DIR_TEST_DATA, &source_file_path));
-  source_file_path =
-      source_file_path.AppendASCII("serializer_test_nowhitespace.json");
+  source_file_path = source_file_path.Append(
+      FILE_PATH_LITERAL("serializer_test_nowhitespace.json"));
   ASSERT_TRUE(PathExists(source_file_path));
   JSONFileValueDeserializer deserializer(source_file_path);
-  std::unique_ptr<Value> root = deserializer.Deserialize(nullptr, nullptr);
-  ASSERT_TRUE(root);
+  std::unique_ptr<Value> root;
+  root = deserializer.Deserialize(NULL, NULL);
+  ASSERT_TRUE(root.get());
 }
 #endif  // !__ANDROID__ && !__ANDROID_HOST__
 
diff --git a/base/json/json_writer.cc b/base/json/json_writer.cc
index 07b9d50..0b658ee 100644
--- a/base/json/json_writer.cc
+++ b/base/json/json_writer.cc
@@ -57,12 +57,12 @@
 
 bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
   switch (node.GetType()) {
-    case Value::Type::NONE: {
+    case Value::TYPE_NULL: {
       json_string_->append("null");
       return true;
     }
 
-    case Value::Type::BOOLEAN: {
+    case Value::TYPE_BOOLEAN: {
       bool value;
       bool result = node.GetAsBoolean(&value);
       DCHECK(result);
@@ -70,7 +70,7 @@
       return result;
     }
 
-    case Value::Type::INTEGER: {
+    case Value::TYPE_INTEGER: {
       int value;
       bool result = node.GetAsInteger(&value);
       DCHECK(result);
@@ -78,7 +78,7 @@
       return result;
     }
 
-    case Value::Type::DOUBLE: {
+    case Value::TYPE_DOUBLE: {
       double value;
       bool result = node.GetAsDouble(&value);
       DCHECK(result);
@@ -110,7 +110,7 @@
       return result;
     }
 
-    case Value::Type::STRING: {
+    case Value::TYPE_STRING: {
       std::string value;
       bool result = node.GetAsString(&value);
       DCHECK(result);
@@ -118,7 +118,7 @@
       return result;
     }
 
-    case Value::Type::LIST: {
+    case Value::TYPE_LIST: {
       json_string_->push_back('[');
       if (pretty_print_)
         json_string_->push_back(' ');
@@ -128,7 +128,7 @@
       bool result = node.GetAsList(&list);
       DCHECK(result);
       for (const auto& value : *list) {
-        if (omit_binary_values_ && value->GetType() == Value::Type::BINARY)
+        if (omit_binary_values_ && value->GetType() == Value::TYPE_BINARY)
           continue;
 
         if (first_value_has_been_output) {
@@ -149,7 +149,7 @@
       return result;
     }
 
-    case Value::Type::DICTIONARY: {
+    case Value::TYPE_DICTIONARY: {
       json_string_->push_back('{');
       if (pretty_print_)
         json_string_->append(kPrettyPrintLineEnding);
@@ -161,7 +161,7 @@
       for (DictionaryValue::Iterator itr(*dict); !itr.IsAtEnd();
            itr.Advance()) {
         if (omit_binary_values_ &&
-            itr.value().GetType() == Value::Type::BINARY) {
+            itr.value().GetType() == Value::TYPE_BINARY) {
           continue;
         }
 
@@ -194,7 +194,7 @@
       return result;
     }
 
-    case Value::Type::BINARY:
+    case Value::TYPE_BINARY:
       // Successful only if we're allowed to omit it.
       DLOG_IF(ERROR, !omit_binary_values_) << "Cannot serialize binary value.";
       return omit_binary_values_;
diff --git a/base/json/json_writer.h b/base/json/json_writer.h
index 57cb8c1..ef43341 100644
--- a/base/json/json_writer.h
+++ b/base/json/json_writer.h
@@ -37,8 +37,6 @@
   };
 
   // Given a root node, generates a JSON string and puts it into |json|.
-  // The output string is overwritten and not appended.
-  //
   // TODO(tc): Should we generate json if it would be invalid json (e.g.,
   // |node| is not a DictionaryValue/ListValue or if there are inf/-inf float
   // values)? Return true on success and false on failure.
diff --git a/base/json/json_writer_unittest.cc b/base/json/json_writer_unittest.cc
index 6cb236f..233ac5e 100644
--- a/base/json/json_writer_unittest.cc
+++ b/base/json/json_writer_unittest.cc
@@ -27,27 +27,27 @@
   EXPECT_EQ("[]", output_js);
 
   // Test integer values.
-  EXPECT_TRUE(JSONWriter::Write(Value(42), &output_js));
+  EXPECT_TRUE(JSONWriter::Write(FundamentalValue(42), &output_js));
   EXPECT_EQ("42", output_js);
 
   // Test boolean values.
-  EXPECT_TRUE(JSONWriter::Write(Value(true), &output_js));
+  EXPECT_TRUE(JSONWriter::Write(FundamentalValue(true), &output_js));
   EXPECT_EQ("true", output_js);
 
   // Test Real values should always have a decimal or an 'e'.
-  EXPECT_TRUE(JSONWriter::Write(Value(1.0), &output_js));
+  EXPECT_TRUE(JSONWriter::Write(FundamentalValue(1.0), &output_js));
   EXPECT_EQ("1.0", output_js);
 
   // Test Real values in the the range (-1, 1) must have leading zeros
-  EXPECT_TRUE(JSONWriter::Write(Value(0.2), &output_js));
+  EXPECT_TRUE(JSONWriter::Write(FundamentalValue(0.2), &output_js));
   EXPECT_EQ("0.2", output_js);
 
   // Test Real values in the the range (-1, 1) must have leading zeros
-  EXPECT_TRUE(JSONWriter::Write(Value(-0.8), &output_js));
+  EXPECT_TRUE(JSONWriter::Write(FundamentalValue(-0.8), &output_js));
   EXPECT_EQ("-0.8", output_js);
 
   // Test String values.
-  EXPECT_TRUE(JSONWriter::Write(Value("foo"), &output_js));
+  EXPECT_TRUE(JSONWriter::Write(StringValue("foo"), &output_js));
   EXPECT_EQ("\"foo\"", output_js);
 }
 
@@ -61,7 +61,7 @@
   std::unique_ptr<DictionaryValue> inner_dict(new DictionaryValue());
   inner_dict->SetInteger("inner int", 10);
   list->Append(std::move(inner_dict));
-  list->Append(MakeUnique<ListValue>());
+  list->Append(WrapUnique(new ListValue()));
   list->AppendBoolean(true);
   root_dict.Set("list", std::move(list));
 
@@ -119,9 +119,9 @@
 
   ListValue binary_list;
   binary_list.Append(BinaryValue::CreateWithCopiedBuffer("asdf", 4));
-  binary_list.Append(MakeUnique<Value>(5));
+  binary_list.Append(WrapUnique(new FundamentalValue(5)));
   binary_list.Append(BinaryValue::CreateWithCopiedBuffer("asdf", 4));
-  binary_list.Append(MakeUnique<Value>(2));
+  binary_list.Append(WrapUnique(new FundamentalValue(2)));
   binary_list.Append(BinaryValue::CreateWithCopiedBuffer("asdf", 4));
   EXPECT_FALSE(JSONWriter::Write(binary_list, &output_js));
   EXPECT_TRUE(JSONWriter::WriteWithOptions(
@@ -144,7 +144,7 @@
   std::string output_js;
 
   // Test allowing a double with no fractional part to be written as an integer.
-  Value double_value(1e10);
+  FundamentalValue double_value(1e10);
   EXPECT_TRUE(JSONWriter::WriteWithOptions(
       double_value, JSONWriter::OPTIONS_OMIT_DOUBLE_TYPE_PRESERVATION,
       &output_js));
diff --git a/base/lazy_instance.h b/base/lazy_instance.h
index 5481f90..ac970c5 100644
--- a/base/lazy_instance.h
+++ b/base/lazy_instance.h
@@ -24,11 +24,11 @@
 // requires that Type be a complete type so we can determine the size.
 //
 // Example usage:
-//   static LazyInstance<MyClass>::Leaky inst = LAZY_INSTANCE_INITIALIZER;
+//   static LazyInstance<MyClass> my_instance = LAZY_INSTANCE_INITIALIZER;
 //   void SomeMethod() {
-//     inst.Get().SomeMethod();  // MyClass::SomeMethod()
+//     my_instance.Get().SomeMethod();  // MyClass::SomeMethod()
 //
-//     MyClass* ptr = inst.Pointer();
+//     MyClass* ptr = my_instance.Pointer();
 //     ptr->DoDoDo();  // MyClass::DoDoDo
 //   }
 
@@ -57,15 +57,22 @@
 namespace base {
 
 template <typename Type>
-struct LazyInstanceTraitsBase {
+struct DefaultLazyInstanceTraits {
+  static const bool kRegisterOnExit = true;
+#ifndef NDEBUG
+  static const bool kAllowedToAccessOnNonjoinableThread = false;
+#endif
+
   static Type* New(void* instance) {
-    DCHECK_EQ(reinterpret_cast<uintptr_t>(instance) & (ALIGNOF(Type) - 1), 0u);
+    DCHECK_EQ(reinterpret_cast<uintptr_t>(instance) & (ALIGNOF(Type) - 1), 0u)
+        << ": Bad boy, the buffer passed to placement new is not aligned!\n"
+        "This may break some stuff like SSE-based optimizations assuming the "
+        "<Type> objects are word aligned.";
     // Use placement new to initialize our instance in our preallocated space.
     // The parenthesis is very important here to force POD type initialization.
     return new (instance) Type();
   }
-
-  static void CallDestructor(Type* instance) {
+  static void Delete(Type* instance) {
     // Explicitly call the destructor.
     instance->~Type();
   }
@@ -75,25 +82,6 @@
 // can implement the more complicated pieces out of line in the .cc file.
 namespace internal {
 
-// This traits class causes destruction the contained Type at process exit via
-// AtExitManager. This is probably generally not what you want. Instead, prefer
-// Leaky below.
-template <typename Type>
-struct DestructorAtExitLazyInstanceTraits {
-  static const bool kRegisterOnExit = true;
-#if DCHECK_IS_ON()
-  static const bool kAllowedToAccessOnNonjoinableThread = false;
-#endif
-
-  static Type* New(void* instance) {
-    return LazyInstanceTraitsBase<Type>::New(instance);
-  }
-
-  static void Delete(Type* instance) {
-    LazyInstanceTraitsBase<Type>::CallDestructor(instance);
-  }
-};
-
 // Use LazyInstance<T>::Leaky for a less-verbose call-site typedef; e.g.:
 // base::LazyInstance<T>::Leaky my_leaky_lazy_instance;
 // instead of:
@@ -105,22 +93,19 @@
 template <typename Type>
 struct LeakyLazyInstanceTraits {
   static const bool kRegisterOnExit = false;
-#if DCHECK_IS_ON()
+#ifndef NDEBUG
   static const bool kAllowedToAccessOnNonjoinableThread = true;
 #endif
 
   static Type* New(void* instance) {
     ANNOTATE_SCOPED_MEMORY_LEAK;
-    return LazyInstanceTraitsBase<Type>::New(instance);
+    return DefaultLazyInstanceTraits<Type>::New(instance);
   }
   static void Delete(Type*) {}
 };
 
-template <typename Type>
-struct ErrorMustSelectLazyOrDestructorAtExitForLazyInstance {};
-
 // Our AtomicWord doubles as a spinlock, where a value of
-// kLazyInstanceStateCreating means the spinlock is being held for creation.
+// kBeingCreatedMarker means the spinlock is being held for creation.
 static const subtle::AtomicWord kLazyInstanceStateCreating = 1;
 
 // Check if instance needs to be created. If so return true otherwise
@@ -137,10 +122,7 @@
 
 }  // namespace internal
 
-template <
-    typename Type,
-    typename Traits =
-        internal::ErrorMustSelectLazyOrDestructorAtExitForLazyInstance<Type>>
+template <typename Type, typename Traits = DefaultLazyInstanceTraits<Type> >
 class LazyInstance {
  public:
   // Do not define a destructor, as doing so makes LazyInstance a
@@ -152,16 +134,14 @@
 
   // Convenience typedef to avoid having to repeat Type for leaky lazy
   // instances.
-  typedef LazyInstance<Type, internal::LeakyLazyInstanceTraits<Type>> Leaky;
-  typedef LazyInstance<Type, internal::DestructorAtExitLazyInstanceTraits<Type>>
-      DestructorAtExit;
+  typedef LazyInstance<Type, internal::LeakyLazyInstanceTraits<Type> > Leaky;
 
   Type& Get() {
     return *Pointer();
   }
 
   Type* Pointer() {
-#if DCHECK_IS_ON()
+#ifndef NDEBUG
     // Avoid making TLS lookup on release builds.
     if (!Traits::kAllowedToAccessOnNonjoinableThread)
       ThreadRestrictions::AssertSingletonAllowed();
diff --git a/base/lazy_instance_unittest.cc b/base/lazy_instance_unittest.cc
index 0aa4659..8947b12 100644
--- a/base/lazy_instance_unittest.cc
+++ b/base/lazy_instance_unittest.cc
@@ -45,8 +45,7 @@
 
 class SlowDelegate : public base::DelegateSimpleThread::Delegate {
  public:
-  explicit SlowDelegate(
-      base::LazyInstance<SlowConstructor>::DestructorAtExit* lazy)
+  explicit SlowDelegate(base::LazyInstance<SlowConstructor>* lazy)
       : lazy_(lazy) {}
 
   void Run() override {
@@ -55,13 +54,13 @@
   }
 
  private:
-  base::LazyInstance<SlowConstructor>::DestructorAtExit* lazy_;
+  base::LazyInstance<SlowConstructor>* lazy_;
 };
 
 }  // namespace
 
-static base::LazyInstance<ConstructAndDestructLogger>::DestructorAtExit
-    lazy_logger = LAZY_INSTANCE_INITIALIZER;
+static base::LazyInstance<ConstructAndDestructLogger> lazy_logger =
+    LAZY_INSTANCE_INITIALIZER;
 
 TEST(LazyInstanceTest, Basic) {
   {
@@ -82,7 +81,7 @@
   EXPECT_EQ(4, destructed_seq_.GetNext());
 }
 
-static base::LazyInstance<SlowConstructor>::DestructorAtExit lazy_slow =
+static base::LazyInstance<SlowConstructor> lazy_slow =
     LAZY_INSTANCE_INITIALIZER;
 
 TEST(LazyInstanceTest, ConstructorThreadSafety) {
@@ -127,8 +126,7 @@
   bool deleted1 = false;
   {
     base::ShadowingAtExitManager shadow;
-    static base::LazyInstance<DeleteLogger>::DestructorAtExit test =
-        LAZY_INSTANCE_INITIALIZER;
+    static base::LazyInstance<DeleteLogger> test = LAZY_INSTANCE_INITIALIZER;
     test.Get().SetDeletedPtr(&deleted1);
   }
   EXPECT_TRUE(deleted1);
@@ -166,12 +164,9 @@
   // Create some static instances with increasing sizes and alignment
   // requirements. By ordering this way, the linker will need to do some work to
   // ensure proper alignment of the static data.
-  static LazyInstance<AlignedData<4>>::DestructorAtExit align4 =
-      LAZY_INSTANCE_INITIALIZER;
-  static LazyInstance<AlignedData<32>>::DestructorAtExit align32 =
-      LAZY_INSTANCE_INITIALIZER;
-  static LazyInstance<AlignedData<4096>>::DestructorAtExit align4096 =
-      LAZY_INSTANCE_INITIALIZER;
+  static LazyInstance<AlignedData<4> > align4 = LAZY_INSTANCE_INITIALIZER;
+  static LazyInstance<AlignedData<32> > align32 = LAZY_INSTANCE_INITIALIZER;
+  static LazyInstance<AlignedData<4096> > align4096 = LAZY_INSTANCE_INITIALIZER;
 
   EXPECT_ALIGNED(align4.Pointer(), 4);
   EXPECT_ALIGNED(align32.Pointer(), 32);
diff --git a/base/location.h b/base/location.h
index dd78515..21e270c 100644
--- a/base/location.h
+++ b/base/location.h
@@ -97,7 +97,7 @@
 BASE_EXPORT const void* GetProgramCounter();
 
 // Define a macro to record the current source location.
-#define FROM_HERE FROM_HERE_WITH_EXPLICIT_FUNCTION(__func__)
+#define FROM_HERE FROM_HERE_WITH_EXPLICIT_FUNCTION(__FUNCTION__)
 
 #define FROM_HERE_WITH_EXPLICIT_FUNCTION(function_name)                        \
     ::tracked_objects::Location(function_name,                                 \
diff --git a/base/logging.cc b/base/logging.cc
index a8736ba..381e9ee 100644
--- a/base/logging.cc
+++ b/base/logging.cc
@@ -7,12 +7,12 @@
 #include <limits.h>
 #include <stdint.h>
 
-#include "base/debug/activity_tracker.h"
 #include "base/macros.h"
 #include "build/build_config.h"
 
 #if defined(OS_WIN)
 #include <io.h>
+#include <windows.h>
 typedef HANDLE FileHandle;
 typedef HANDLE MutexHandle;
 // Windows warns on using write().  It prefers _write().
@@ -346,11 +346,6 @@
 
 }  // namespace
 
-// This is never instantiated, it's just used for EAT_STREAM_PARAMETERS to have
-// an object of the correct type on the LHS of the unused part of the ternary
-// operator.
-std::ostream* g_swallow_stream;
-
 LoggingSettings::LoggingSettings()
     : logging_dest(LOG_DEFAULT),
       log_file(nullptr),
@@ -742,12 +737,6 @@
   }
 
   if (severity_ == LOG_FATAL) {
-    // Write the log message to the global activity tracker, if running.
-    base::debug::GlobalActivityTracker* tracker =
-        base::debug::GlobalActivityTracker::Get();
-    if (tracker)
-      tracker->RecordLogMessage(str_newline);
-
     // Ensure the first characters of the string are on the stack so they
     // are contained in minidumps for diagnostic purposes.
     char str_stack[1024];
@@ -791,13 +780,18 @@
   if (g_log_thread_id)
     stream_ << base::PlatformThread::CurrentId() << ':';
   if (g_log_timestamp) {
-#if defined(OS_POSIX)
-    timeval tv;
-    gettimeofday(&tv, nullptr);
-    time_t t = tv.tv_sec;
+    time_t t = time(nullptr);
+#if defined(__ANDROID__) || defined(ANDROID)
     struct tm local_time;
-	memset(&local_time, 0, sizeof(local_time));
+    memset(&local_time, 0, sizeof(local_time));
+#else
+    struct tm local_time = {0};
+#endif
+#ifdef _MSC_VER
+    localtime_s(&local_time, &t);
+#else
     localtime_r(&t, &local_time);
+#endif
     struct tm* tm_time = &local_time;
     stream_ << std::setfill('0')
             << std::setw(2) << 1 + tm_time->tm_mon
@@ -806,23 +800,7 @@
             << std::setw(2) << tm_time->tm_hour
             << std::setw(2) << tm_time->tm_min
             << std::setw(2) << tm_time->tm_sec
-            << '.'
-            << std::setw(6) << tv.tv_usec
             << ':';
-#elif defined(OS_WIN)
-    SYSTEMTIME local_time;
-    GetLocalTime(&local_time);
-    stream_ << std::setfill('0')
-            << std::setw(2) << local_time.wMonth
-            << std::setw(2) << local_time.wDay
-            << '/'
-            << std::setw(2) << local_time.wHour
-            << std::setw(2) << local_time.wMinute
-            << std::setw(2) << local_time.wSecond
-            << '.'
-            << std::setw(3) << local_time.wMilliseconds
-            << ':';
-#endif
   }
   if (g_log_tickcount)
     stream_ << TickCount() << ':';
diff --git a/base/logging.h b/base/logging.h
index 7ca018e..2bfc972 100644
--- a/base/logging.h
+++ b/base/logging.h
@@ -15,7 +15,6 @@
 #include <utility>
 
 #include "base/base_export.h"
-#include "base/compiler_specific.h"
 #include "base/debug/debugger.h"
 #include "base/macros.h"
 #include "base/template_util.h"
@@ -141,6 +140,34 @@
 // There is the special severity of DFATAL, which logs FATAL in debug mode,
 // ERROR in normal mode.
 
+// Note that "The behavior of a C++ program is undefined if it adds declarations
+// or definitions to namespace std or to a namespace within namespace std unless
+// otherwise specified." --C++11[namespace.std]
+//
+// We've checked that this particular definition has the intended behavior on
+// our implementations, but it's prone to breaking in the future, and please
+// don't imitate this in your own definitions without checking with some
+// standard library experts.
+namespace std {
+// These functions are provided as a convenience for logging, which is where we
+// use streams (it is against Google style to use streams in other places). It
+// is designed to allow you to emit non-ASCII Unicode strings to the log file,
+// which is normally ASCII. It is relatively slow, so try not to use it for
+// common cases. Non-ASCII characters will be converted to UTF-8 by these
+// operators.
+BASE_EXPORT std::ostream& operator<<(std::ostream& out, const wchar_t* wstr);
+inline std::ostream& operator<<(std::ostream& out, const std::wstring& wstr) {
+  return out << wstr.c_str();
+}
+
+template<typename T>
+typename std::enable_if<std::is_enum<T>::value, std::ostream&>::type operator<<(
+    std::ostream& out, T value) {
+  return out << static_cast<typename std::underlying_type<T>::type>(value);
+}
+
+}  // namespace std
+
 namespace logging {
 
 // TODO(avi): do we want to do a unification of character types here?
@@ -310,16 +337,15 @@
 // by LOG() and LOG_IF, etc. Since these are used all over our code, it's
 // better to have compact code for these operations.
 #define COMPACT_GOOGLE_LOG_EX_INFO(ClassName, ...) \
-  ::logging::ClassName(__FILE__, __LINE__, ::logging::LOG_INFO, ##__VA_ARGS__)
-#define COMPACT_GOOGLE_LOG_EX_WARNING(ClassName, ...)              \
-  ::logging::ClassName(__FILE__, __LINE__, ::logging::LOG_WARNING, \
-                       ##__VA_ARGS__)
+  logging::ClassName(__FILE__, __LINE__, logging::LOG_INFO , ##__VA_ARGS__)
+#define COMPACT_GOOGLE_LOG_EX_WARNING(ClassName, ...) \
+  logging::ClassName(__FILE__, __LINE__, logging::LOG_WARNING , ##__VA_ARGS__)
 #define COMPACT_GOOGLE_LOG_EX_ERROR(ClassName, ...) \
-  ::logging::ClassName(__FILE__, __LINE__, ::logging::LOG_ERROR, ##__VA_ARGS__)
+  logging::ClassName(__FILE__, __LINE__, logging::LOG_ERROR , ##__VA_ARGS__)
 #define COMPACT_GOOGLE_LOG_EX_FATAL(ClassName, ...) \
-  ::logging::ClassName(__FILE__, __LINE__, ::logging::LOG_FATAL, ##__VA_ARGS__)
+  logging::ClassName(__FILE__, __LINE__, logging::LOG_FATAL , ##__VA_ARGS__)
 #define COMPACT_GOOGLE_LOG_EX_DFATAL(ClassName, ...) \
-  ::logging::ClassName(__FILE__, __LINE__, ::logging::LOG_DFATAL, ##__VA_ARGS__)
+  logging::ClassName(__FILE__, __LINE__, logging::LOG_DFATAL , ##__VA_ARGS__)
 
 #define COMPACT_GOOGLE_LOG_INFO \
   COMPACT_GOOGLE_LOG_EX_INFO(LogMessage)
@@ -380,7 +406,7 @@
 
 // The VLOG macros log with negative verbosities.
 #define VLOG_STREAM(verbose_level) \
-  ::logging::LogMessage(__FILE__, __LINE__, -verbose_level).stream()
+  logging::LogMessage(__FILE__, __LINE__, -verbose_level).stream()
 
 #define VLOG(verbose_level) \
   LAZY_STREAM(VLOG_STREAM(verbose_level), VLOG_IS_ON(verbose_level))
@@ -391,11 +417,11 @@
 
 #if defined (OS_WIN)
 #define VPLOG_STREAM(verbose_level) \
-  ::logging::Win32ErrorLogMessage(__FILE__, __LINE__, -verbose_level, \
+  logging::Win32ErrorLogMessage(__FILE__, __LINE__, -verbose_level, \
     ::logging::GetLastSystemErrorCode()).stream()
 #elif defined(OS_POSIX)
 #define VPLOG_STREAM(verbose_level) \
-  ::logging::ErrnoLogMessage(__FILE__, __LINE__, -verbose_level, \
+  logging::ErrnoLogMessage(__FILE__, __LINE__, -verbose_level, \
     ::logging::GetLastSystemErrorCode()).stream()
 #endif
 
@@ -408,7 +434,7 @@
 
 // TODO(akalin): Add more VLOG variants, e.g. VPLOG.
 
-#define LOG_ASSERT(condition) \
+#define LOG_ASSERT(condition)  \
   LOG_IF(FATAL, !(condition)) << "Assert failed: " #condition ". "
 
 #if defined(OS_WIN)
@@ -427,23 +453,9 @@
 #define PLOG_IF(severity, condition) \
   LAZY_STREAM(PLOG_STREAM(severity), LOG_IS_ON(severity) && (condition))
 
-BASE_EXPORT extern std::ostream* g_swallow_stream;
-
-// Note that g_swallow_stream is used instead of an arbitrary LOG() stream to
-// avoid the creation of an object with a non-trivial destructor (LogMessage).
-// On MSVC x86 (checked on 2015 Update 3), this causes a few additional
-// pointless instructions to be emitted even at full optimization level, even
-// though the : arm of the ternary operator is clearly never executed. Using a
-// simpler object to be &'d with Voidify() avoids these extra instructions.
-// Using a simpler POD object with a templated operator<< also works to avoid
-// these instructions. However, this causes warnings on statically defined
-// implementations of operator<<(std::ostream, ...) in some .cc files, because
-// they become defined-but-unreferenced functions. A reinterpret_cast of 0 to an
-// ostream* also is not suitable, because some compilers warn of undefined
-// behavior.
-#define EAT_STREAM_PARAMETERS \
-  true ? (void)0              \
-       : ::logging::LogMessageVoidify() & (*::logging::g_swallow_stream)
+// The actual stream used isn't important.
+#define EAT_STREAM_PARAMETERS                                           \
+  true ? (void) 0 : ::logging::LogMessageVoidify() & LOG_STREAM(FATAL)
 
 // Captures the result of a CHECK_EQ (for example) and facilitates testing as a
 // boolean.
@@ -460,84 +472,6 @@
   std::string* message_;
 };
 
-// Crashes in the fastest possible way with no attempt at logging.
-// There are different constraints to satisfy here, see http://crbug.com/664209
-// for more context:
-// - The trap instructions, and hence the PC value at crash time, have to be
-//   distinct and not get folded into the same opcode by the compiler.
-//   On Linux/Android this is tricky because GCC still folds identical
-//   asm volatile blocks. The workaround is generating distinct opcodes for
-//   each CHECK using the __COUNTER__ macro.
-// - The debug info for the trap instruction has to be attributed to the source
-//   line that has the CHECK(), to make crash reports actionable. This rules
-//   out the ability of using a inline function, at least as long as clang
-//   doesn't support attribute(artificial).
-// - Failed CHECKs should produce a signal that is distinguishable from an
-//   invalid memory access, to improve the actionability of crash reports.
-// - The compiler should treat the CHECK as no-return instructions, so that the
-//   trap code can be efficiently packed in the prologue of the function and
-//   doesn't interfere with the main execution flow.
-// - When debugging, developers shouldn't be able to accidentally step over a
-//   CHECK. This is achieved by putting opcodes that will cause a non
-//   continuable exception after the actual trap instruction.
-// - Don't cause too much binary bloat.
-#if defined(COMPILER_GCC)
-
-#if defined(ARCH_CPU_X86_FAMILY) && !defined(OS_NACL)
-// int 3 will generate a SIGTRAP.
-#define TRAP_SEQUENCE() \
-  asm volatile(         \
-      "int3; ud2; push %0;" ::"i"(static_cast<unsigned char>(__COUNTER__)))
-
-#elif defined(ARCH_CPU_ARMEL) && !defined(OS_NACL)
-// bkpt will generate a SIGBUS when running on armv7 and a SIGTRAP when running
-// as a 32 bit userspace app on arm64. There doesn't seem to be any way to
-// cause a SIGTRAP from userspace without using a syscall (which would be a
-// problem for sandboxing).
-#define TRAP_SEQUENCE() \
-  asm volatile("bkpt #0; udf %0;" ::"i"(__COUNTER__ % 256))
-
-#elif defined(ARCH_CPU_ARM64) && !defined(OS_NACL)
-// This will always generate a SIGTRAP on arm64.
-#define TRAP_SEQUENCE() \
-  asm volatile("brk #0; hlt %0;" ::"i"(__COUNTER__ % 65536))
-
-#else
-// Crash report accuracy will not be guaranteed on other architectures, but at
-// least this will crash as expected.
-#define TRAP_SEQUENCE() __builtin_trap()
-#endif  // ARCH_CPU_*
-
-#define IMMEDIATE_CRASH()    \
-  ({                         \
-    TRAP_SEQUENCE();         \
-    __builtin_unreachable(); \
-  })
-
-#elif defined(COMPILER_MSVC)
-
-// Clang is cleverer about coalescing int3s, so we need to add a unique-ish
-// instruction following the __debugbreak() to have it emit distinct locations
-// for CHECKs rather than collapsing them all together. It would be nice to use
-// a short intrinsic to do this (and perhaps have only one implementation for
-// both clang and MSVC), however clang-cl currently does not support intrinsics.
-// On the flip side, MSVC x64 doesn't support inline asm. So, we have to have
-// two implementations. Normally clang-cl's version will be 5 bytes (1 for
-// `int3`, 2 for `ud2`, 2 for `push byte imm`, however, TODO(scottmg):
-// https://crbug.com/694670 clang-cl doesn't currently support %'ing
-// __COUNTER__, so eventually it will emit the dword form of push.
-// TODO(scottmg): Reinvestigate a short sequence that will work on both
-// compilers once clang supports more intrinsics. See https://crbug.com/693713.
-#if defined(__clang__)
-#define IMMEDIATE_CRASH() ({__asm int 3 __asm ud2 __asm push __COUNTER__})
-#else
-#define IMMEDIATE_CRASH() __debugbreak()
-#endif  // __clang__
-
-#else
-#error Port
-#endif
-
 // CHECK dies with a fatal error if condition is not true.  It is *not*
 // controlled by NDEBUG, so the check will be executed regardless of
 // compilation mode.
@@ -547,14 +481,20 @@
 
 #if defined(OFFICIAL_BUILD) && defined(NDEBUG)
 
-// Make all CHECK functions discard their log strings to reduce code bloat, and
-// improve performance, for official release builds.
-//
+// Make all CHECK functions discard their log strings to reduce code
+// bloat, and improve performance, for official release builds.
+
+#if defined(COMPILER_GCC) || __clang__
+#define LOGGING_CRASH() __builtin_trap()
+#else
+#define LOGGING_CRASH() ((void)(*(volatile char*)0 = 0))
+#endif
+
 // This is not calling BreakDebugger since this is called frequently, and
 // calling an out-of-line function instead of a noreturn inline macro prevents
 // compiler optimizations.
-#define CHECK(condition) \
-  UNLIKELY(!(condition)) ? IMMEDIATE_CRASH() : EAT_STREAM_PARAMETERS
+#define CHECK(condition)                                                \
+  !(condition) ? LOGGING_CRASH() : EAT_STREAM_PARAMETERS
 
 #define PCHECK(condition) CHECK(condition)
 
@@ -570,26 +510,26 @@
 // __analysis_assume gets confused on some conditions:
 // http://randomascii.wordpress.com/2011/09/13/analyze-for-visual-studio-the-ugly-part-5/
 
-#define CHECK(condition)                    \
-  __analysis_assume(!!(condition)),         \
-      LAZY_STREAM(LOG_STREAM(FATAL), false) \
-          << "Check failed: " #condition ". "
+#define CHECK(condition)                \
+  __analysis_assume(!!(condition)),     \
+  LAZY_STREAM(LOG_STREAM(FATAL), false) \
+  << "Check failed: " #condition ". "
 
-#define PCHECK(condition)                    \
-  __analysis_assume(!!(condition)),          \
-      LAZY_STREAM(PLOG_STREAM(FATAL), false) \
-          << "Check failed: " #condition ". "
+#define PCHECK(condition)                \
+  __analysis_assume(!!(condition)),      \
+  LAZY_STREAM(PLOG_STREAM(FATAL), false) \
+  << "Check failed: " #condition ". "
 
 #else  // _PREFAST_
 
 // Do as much work as possible out of line to reduce inline code size.
-#define CHECK(condition)                                                      \
-  LAZY_STREAM(::logging::LogMessage(__FILE__, __LINE__, #condition).stream(), \
+#define CHECK(condition)                                                    \
+  LAZY_STREAM(logging::LogMessage(__FILE__, __LINE__, #condition).stream(), \
               !(condition))
 
 #define PCHECK(condition)                       \
   LAZY_STREAM(PLOG_STREAM(FATAL), !(condition)) \
-      << "Check failed: " #condition ". "
+  << "Check failed: " #condition ". "
 
 #endif  // _PREFAST_
 
@@ -601,12 +541,12 @@
 //   CHECK_EQ(2, a);
 #define CHECK_OP(name, op, val1, val2)                                         \
   switch (0) case 0: default:                                                  \
-  if (::logging::CheckOpResult true_if_passed =                                \
-      ::logging::Check##name##Impl((val1), (val2),                             \
-                                   #val1 " " #op " " #val2))                   \
+  if (logging::CheckOpResult true_if_passed =                                  \
+      logging::Check##name##Impl((val1), (val2),                               \
+                                 #val1 " " #op " " #val2))                     \
    ;                                                                           \
   else                                                                         \
-    ::logging::LogMessage(__FILE__, __LINE__, true_if_passed.message()).stream()
+    logging::LogMessage(__FILE__, __LINE__, true_if_passed.message()).stream()
 
 #endif  // !(OFFICIAL_BUILD && NDEBUG)
 
@@ -614,26 +554,12 @@
 // it uses the definition for operator<<, with a few special cases below.
 template <typename T>
 inline typename std::enable_if<
-    base::internal::SupportsOstreamOperator<const T&>::value &&
-        !std::is_function<typename std::remove_pointer<T>::type>::value,
+    base::internal::SupportsOstreamOperator<const T&>::value,
     void>::type
 MakeCheckOpValueString(std::ostream* os, const T& v) {
   (*os) << v;
 }
 
-// Provide an overload for functions and function pointers. Function pointers
-// don't implicitly convert to void* but do implicitly convert to bool, so
-// without this function pointers are always printed as 1 or 0. (MSVC isn't
-// standards-conforming here and converts function pointers to regular
-// pointers, so this is a no-op for MSVC.)
-template <typename T>
-inline typename std::enable_if<
-    std::is_function<typename std::remove_pointer<T>::type>::value,
-    void>::type
-MakeCheckOpValueString(std::ostream* os, const T& v) {
-  (*os) << reinterpret_cast<const void*>(v);
-}
-
 // We need overloads for enums that don't support operator<<.
 // (i.e. scoped enums where no operator<< overload was declared).
 template <typename T>
@@ -685,20 +611,16 @@
 // The (int, int) specialization works around the issue that the compiler
 // will not instantiate the template version of the function on values of
 // unnamed enum type - see comment below.
-#define DEFINE_CHECK_OP_IMPL(name, op)                                       \
-  template <class t1, class t2>                                              \
-  inline std::string* Check##name##Impl(const t1& v1, const t2& v2,          \
-                                        const char* names) {                 \
-    if (v1 op v2)                                                            \
-      return NULL;                                                           \
-    else                                                                     \
-      return ::logging::MakeCheckOpString(v1, v2, names);                    \
-  }                                                                          \
+#define DEFINE_CHECK_OP_IMPL(name, op) \
+  template <class t1, class t2> \
+  inline std::string* Check##name##Impl(const t1& v1, const t2& v2, \
+                                        const char* names) { \
+    if (v1 op v2) return NULL; \
+    else return MakeCheckOpString(v1, v2, names); \
+  } \
   inline std::string* Check##name##Impl(int v1, int v2, const char* names) { \
-    if (v1 op v2)                                                            \
-      return NULL;                                                           \
-    else                                                                     \
-      return ::logging::MakeCheckOpString(v1, v2, names);                    \
+    if (v1 op v2) return NULL; \
+    else return MakeCheckOpString(v1, v2, names); \
   }
 DEFINE_CHECK_OP_IMPL(EQ, ==)
 DEFINE_CHECK_OP_IMPL(NE, !=)
@@ -716,6 +638,12 @@
 #define CHECK_GT(val1, val2) CHECK_OP(GT, > , val1, val2)
 
 #if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON)
+#define ENABLE_DLOG 0
+#else
+#define ENABLE_DLOG 1
+#endif
+
+#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON)
 #define DCHECK_IS_ON() 0
 #else
 #define DCHECK_IS_ON() 1
@@ -723,7 +651,7 @@
 
 // Definitions for DLOG et al.
 
-#if DCHECK_IS_ON()
+#if ENABLE_DLOG
 
 #define DLOG_IS_ON(severity) LOG_IS_ON(severity)
 #define DLOG_IF(severity, condition) LOG_IF(severity, condition)
@@ -732,11 +660,12 @@
 #define DVLOG_IF(verboselevel, condition) VLOG_IF(verboselevel, condition)
 #define DVPLOG_IF(verboselevel, condition) VPLOG_IF(verboselevel, condition)
 
-#else  // DCHECK_IS_ON()
+#else  // ENABLE_DLOG
 
-// If !DCHECK_IS_ON(), we want to avoid emitting any references to |condition|
-// (which may reference a variable defined only if DCHECK_IS_ON()).
-// Contrast this with DCHECK et al., which has different behavior.
+// If ENABLE_DLOG is off, we want to avoid emitting any references to
+// |condition| (which may reference a variable defined only if NDEBUG
+// is not defined).  Contrast this with DCHECK et al., which has
+// different behavior.
 
 #define DLOG_IS_ON(severity) false
 #define DLOG_IF(severity, condition) EAT_STREAM_PARAMETERS
@@ -745,7 +674,19 @@
 #define DVLOG_IF(verboselevel, condition) EAT_STREAM_PARAMETERS
 #define DVPLOG_IF(verboselevel, condition) EAT_STREAM_PARAMETERS
 
-#endif  // DCHECK_IS_ON()
+#endif  // ENABLE_DLOG
+
+// DEBUG_MODE is for uses like
+//   if (DEBUG_MODE) foo.CheckThatFoo();
+// instead of
+//   #ifndef NDEBUG
+//     foo.CheckThatFoo();
+//   #endif
+//
+// We tie its state to ENABLE_DLOG.
+enum { DEBUG_MODE = ENABLE_DLOG };
+
+#undef ENABLE_DLOG
 
 #define DLOG(severity)                                          \
   LAZY_STREAM(LOG_STREAM(severity), DLOG_IS_ON(severity))
@@ -780,63 +721,31 @@
 // whether DCHECKs are enabled; this is so that we don't get unused
 // variable warnings if the only use of a variable is in a DCHECK.
 // This behavior is different from DLOG_IF et al.
-//
-// Note that the definition of the DCHECK macros depends on whether or not
-// DCHECK_IS_ON() is true. When DCHECK_IS_ON() is false, the macros use
-// EAT_STREAM_PARAMETERS to avoid expressions that would create temporaries.
 
 #if defined(_PREFAST_) && defined(OS_WIN)
 // See comments on the previous use of __analysis_assume.
 
-#define DCHECK(condition)                    \
-  __analysis_assume(!!(condition)),          \
-      LAZY_STREAM(LOG_STREAM(DCHECK), false) \
-          << "Check failed: " #condition ". "
+#define DCHECK(condition)                                               \
+  __analysis_assume(!!(condition)),                                     \
+  LAZY_STREAM(LOG_STREAM(DCHECK), false)                                \
+  << "Check failed: " #condition ". "
 
-#define DPCHECK(condition)                    \
-  __analysis_assume(!!(condition)),           \
-      LAZY_STREAM(PLOG_STREAM(DCHECK), false) \
-          << "Check failed: " #condition ". "
+#define DPCHECK(condition)                                              \
+  __analysis_assume(!!(condition)),                                     \
+  LAZY_STREAM(PLOG_STREAM(DCHECK), false)                               \
+  << "Check failed: " #condition ". "
 
-#elif defined(__clang_analyzer__)
+#else  // _PREFAST_
 
-// Keeps the static analyzer from proceeding along the current codepath,
-// otherwise false positive errors may be generated  by null pointer checks.
-inline constexpr bool AnalyzerNoReturn() __attribute__((analyzer_noreturn)) {
-  return false;
-}
-
-#define DCHECK(condition)                                                     \
-  LAZY_STREAM(                                                                \
-      LOG_STREAM(DCHECK),                                                     \
-      DCHECK_IS_ON() ? (logging::AnalyzerNoReturn() || !(condition)) : false) \
+#define DCHECK(condition)                                                \
+  LAZY_STREAM(LOG_STREAM(DCHECK), DCHECK_IS_ON() ? !(condition) : false) \
       << "Check failed: " #condition ". "
 
-#define DPCHECK(condition)                                                    \
-  LAZY_STREAM(                                                                \
-      PLOG_STREAM(DCHECK),                                                    \
-      DCHECK_IS_ON() ? (logging::AnalyzerNoReturn() || !(condition)) : false) \
+#define DPCHECK(condition)                                                \
+  LAZY_STREAM(PLOG_STREAM(DCHECK), DCHECK_IS_ON() ? !(condition) : false) \
       << "Check failed: " #condition ". "
 
-#else
-
-#if DCHECK_IS_ON()
-
-#define DCHECK(condition)                       \
-  LAZY_STREAM(LOG_STREAM(DCHECK), !(condition)) \
-      << "Check failed: " #condition ". "
-#define DPCHECK(condition)                       \
-  LAZY_STREAM(PLOG_STREAM(DCHECK), !(condition)) \
-      << "Check failed: " #condition ". "
-
-#else  // DCHECK_IS_ON()
-
-#define DCHECK(condition) EAT_STREAM_PARAMETERS << !(condition)
-#define DPCHECK(condition) EAT_STREAM_PARAMETERS << !(condition)
-
-#endif  // DCHECK_IS_ON()
-
-#endif
+#endif  // _PREFAST_
 
 // Helper macro for binary operators.
 // Don't use this macro directly in your code, use DCHECK_EQ et al below.
@@ -844,37 +753,16 @@
 // macro is used in an 'if' clause such as:
 // if (a == 1)
 //   DCHECK_EQ(2, a);
-#if DCHECK_IS_ON()
-
-#define DCHECK_OP(name, op, val1, val2)                                \
-  switch (0) case 0: default:                                          \
-  if (::logging::CheckOpResult true_if_passed =                        \
-      DCHECK_IS_ON() ?                                                 \
-      ::logging::Check##name##Impl((val1), (val2),                     \
-                                   #val1 " " #op " " #val2) : nullptr) \
-   ;                                                                   \
-  else                                                                 \
-    ::logging::LogMessage(__FILE__, __LINE__, ::logging::LOG_DCHECK,   \
-                          true_if_passed.message()).stream()
-
-#else  // DCHECK_IS_ON()
-
-// When DCHECKs aren't enabled, DCHECK_OP still needs to reference operator<<
-// overloads for |val1| and |val2| to avoid potential compiler warnings about
-// unused functions. For the same reason, it also compares |val1| and |val2|
-// using |op|.
-//
-// Note that the contract of DCHECK_EQ, etc is that arguments are only evaluated
-// once. Even though |val1| and |val2| appear twice in this version of the macro
-// expansion, this is OK, since the expression is never actually evaluated.
-#define DCHECK_OP(name, op, val1, val2)                             \
-  EAT_STREAM_PARAMETERS << (::logging::MakeCheckOpValueString(      \
-                                ::logging::g_swallow_stream, val1), \
-                            ::logging::MakeCheckOpValueString(      \
-                                ::logging::g_swallow_stream, val2), \
-                            (val1)op(val2))
-
-#endif  // DCHECK_IS_ON()
+#define DCHECK_OP(name, op, val1, val2)                               \
+  switch (0) case 0: default:                                         \
+  if (logging::CheckOpResult true_if_passed =                         \
+      DCHECK_IS_ON() ?                                                \
+      logging::Check##name##Impl((val1), (val2),                      \
+                                 #val1 " " #op " " #val2) : nullptr)  \
+   ;                                                                  \
+  else                                                                \
+    logging::LogMessage(__FILE__, __LINE__, ::logging::LOG_DCHECK,    \
+                        true_if_passed.message()).stream()
 
 // Equality/Inequality checks - compare two values, and log a
 // LOG_DCHECK message including the two values when the result is not
@@ -882,7 +770,7 @@
 // defined.
 //
 // You may append to the error message like so:
-//   DCHECK_NE(1, 2) << "The world must be ending!";
+//   DCHECK_NE(1, 2) << ": The world must be ending!";
 //
 // We are very careful to ensure that each argument is evaluated exactly
 // once, and that anything which is legal to pass as a function argument is
@@ -946,9 +834,6 @@
 
   std::ostream& stream() { return stream_; }
 
-  LogSeverity severity() { return severity_; }
-  std::string str() { return stream_.str(); }
-
  private:
   void Init(const char* file, int line);
 
@@ -1056,14 +941,12 @@
 // Async signal safe logging mechanism.
 BASE_EXPORT void RawLog(int level, const char* message);
 
-#define RAW_LOG(level, message) \
-  ::logging::RawLog(::logging::LOG_##level, message)
+#define RAW_LOG(level, message) logging::RawLog(logging::LOG_ ## level, message)
 
-#define RAW_CHECK(condition)                               \
-  do {                                                     \
-    if (!(condition))                                      \
-      ::logging::RawLog(::logging::LOG_FATAL,              \
-                        "Check failed: " #condition "\n"); \
+#define RAW_CHECK(condition)                                                   \
+  do {                                                                         \
+    if (!(condition))                                                          \
+      logging::RawLog(logging::LOG_FATAL, "Check failed: " #condition "\n");   \
   } while (0)
 
 #if defined(OS_WIN)
@@ -1076,27 +959,6 @@
 
 }  // namespace logging
 
-// Note that "The behavior of a C++ program is undefined if it adds declarations
-// or definitions to namespace std or to a namespace within namespace std unless
-// otherwise specified." --C++11[namespace.std]
-//
-// We've checked that this particular definition has the intended behavior on
-// our implementations, but it's prone to breaking in the future, and please
-// don't imitate this in your own definitions without checking with some
-// standard library experts.
-namespace std {
-// These functions are provided as a convenience for logging, which is where we
-// use streams (it is against Google style to use streams in other places). It
-// is designed to allow you to emit non-ASCII Unicode strings to the log file,
-// which is normally ASCII. It is relatively slow, so try not to use it for
-// common cases. Non-ASCII characters will be converted to UTF-8 by these
-// operators.
-BASE_EXPORT std::ostream& operator<<(std::ostream& out, const wchar_t* wstr);
-inline std::ostream& operator<<(std::ostream& out, const std::wstring& wstr) {
-  return out << wstr.c_str();
-}
-}  // namespace std
-
 // The NOTIMPLEMENTED() macro annotates codepaths which have
 // not been implemented yet.
 //
diff --git a/base/logging_unittest.cc b/base/logging_unittest.cc
index 04f349c..8a20c54 100644
--- a/base/logging_unittest.cc
+++ b/base/logging_unittest.cc
@@ -9,21 +9,6 @@
 #include "testing/gmock/include/gmock/gmock.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
-#if defined(OS_POSIX)
-#include <signal.h>
-#include <unistd.h>
-#include "base/posix/eintr_wrapper.h"
-#endif  // OS_POSIX
-
-#if defined(OS_LINUX) || defined(OS_ANDROID)
-#include <ucontext.h>
-#endif
-
-#if defined(OS_WIN)
-#include <excpt.h>
-#include <windows.h>
-#endif  // OS_WIN
-
 namespace logging {
 
 namespace {
@@ -69,14 +54,16 @@
 
 TEST_F(LoggingTest, BasicLogging) {
   MockLogSource mock_log_source;
-  EXPECT_CALL(mock_log_source, Log())
-      .Times(DCHECK_IS_ON() ? 16 : 8)
-      .WillRepeatedly(Return("log message"));
+  EXPECT_CALL(mock_log_source, Log()).Times(DEBUG_MODE ? 16 : 8).
+      WillRepeatedly(Return("log message"));
 
   SetMinLogLevel(LOG_INFO);
 
   EXPECT_TRUE(LOG_IS_ON(INFO));
-  EXPECT_TRUE((DCHECK_IS_ON() != 0) == DLOG_IS_ON(INFO));
+  // As of g++-4.5, the first argument to EXPECT_EQ cannot be a
+  // constant expression.
+  const bool kIsDebugMode = (DEBUG_MODE != 0);
+  EXPECT_TRUE(kIsDebugMode == DLOG_IS_ON(INFO));
   EXPECT_TRUE(VLOG_IS_ON(0));
 
   LOG(INFO) << mock_log_source.Log();
@@ -203,154 +190,6 @@
 
 #endif
 
-#if defined(OFFICIAL_BUILD) && defined(OS_WIN)
-NOINLINE void CheckContainingFunc(int death_location) {
-  CHECK(death_location != 1);
-  CHECK(death_location != 2);
-  CHECK(death_location != 3);
-}
-
-int GetCheckExceptionData(EXCEPTION_POINTERS* p, DWORD* code, void** addr) {
-  *code = p->ExceptionRecord->ExceptionCode;
-  *addr = p->ExceptionRecord->ExceptionAddress;
-  return EXCEPTION_EXECUTE_HANDLER;
-}
-
-TEST_F(LoggingTest, CheckCausesDistinctBreakpoints) {
-  DWORD code1 = 0;
-  DWORD code2 = 0;
-  DWORD code3 = 0;
-  void* addr1 = nullptr;
-  void* addr2 = nullptr;
-  void* addr3 = nullptr;
-
-  // Record the exception code and addresses.
-  __try {
-    CheckContainingFunc(1);
-  } __except (
-      GetCheckExceptionData(GetExceptionInformation(), &code1, &addr1)) {
-  }
-
-  __try {
-    CheckContainingFunc(2);
-  } __except (
-      GetCheckExceptionData(GetExceptionInformation(), &code2, &addr2)) {
-  }
-
-  __try {
-    CheckContainingFunc(3);
-  } __except (
-      GetCheckExceptionData(GetExceptionInformation(), &code3, &addr3)) {
-  }
-
-  // Ensure that the exception codes are correct (in particular, breakpoints,
-  // not access violations).
-  EXPECT_EQ(STATUS_BREAKPOINT, code1);
-  EXPECT_EQ(STATUS_BREAKPOINT, code2);
-  EXPECT_EQ(STATUS_BREAKPOINT, code3);
-
-  // Ensure that none of the CHECKs are colocated.
-  EXPECT_NE(addr1, addr2);
-  EXPECT_NE(addr1, addr3);
-  EXPECT_NE(addr2, addr3);
-}
-
-#elif defined(OS_POSIX) && !defined(OS_NACL) && !defined(OS_IOS) && \
-    (defined(ARCH_CPU_X86_FAMILY) || defined(ARCH_CPU_ARM_FAMILY))
-
-int g_child_crash_pipe;
-
-void CheckCrashTestSighandler(int, siginfo_t* info, void* context_ptr) {
-  // Conversely to what clearly stated in "man 2 sigaction", some Linux kernels
-  // do NOT populate the |info->si_addr| in the case of a SIGTRAP. Hence we
-  // need the arch-specific boilerplate below, which is inspired by breakpad.
-  // At the same time, on OSX, ucontext.h is deprecated but si_addr works fine.
-  uintptr_t crash_addr = 0;
-#if defined(OS_MACOSX)
-  crash_addr = reinterpret_cast<uintptr_t>(info->si_addr);
-#else  // OS_POSIX && !OS_MACOSX
-  struct ucontext* context = reinterpret_cast<struct ucontext*>(context_ptr);
-#if defined(ARCH_CPU_X86)
-  crash_addr = static_cast<uintptr_t>(context->uc_mcontext.gregs[REG_EIP]);
-#elif defined(ARCH_CPU_X86_64)
-  crash_addr = static_cast<uintptr_t>(context->uc_mcontext.gregs[REG_RIP]);
-#elif defined(ARCH_CPU_ARMEL)
-  crash_addr = static_cast<uintptr_t>(context->uc_mcontext.arm_pc);
-#elif defined(ARCH_CPU_ARM64)
-  crash_addr = static_cast<uintptr_t>(context->uc_mcontext.pc);
-#endif  // ARCH_*
-#endif  // OS_POSIX && !OS_MACOSX
-  HANDLE_EINTR(write(g_child_crash_pipe, &crash_addr, sizeof(uintptr_t)));
-  _exit(0);
-}
-
-// CHECK causes a direct crash (without jumping to another function) only in
-// official builds. Unfortunately, continuous test coverage on official builds
-// is lower. DO_CHECK here falls back on a home-brewed implementation in
-// non-official builds, to catch regressions earlier in the CQ.
-#if defined(OFFICIAL_BUILD)
-#define DO_CHECK CHECK
-#else
-#define DO_CHECK(cond) \
-  if (!(cond))         \
-  IMMEDIATE_CRASH()
-#endif
-
-void CrashChildMain(int death_location) {
-  struct sigaction act = {};
-  act.sa_sigaction = CheckCrashTestSighandler;
-  act.sa_flags = SA_SIGINFO;
-  ASSERT_EQ(0, sigaction(SIGTRAP, &act, NULL));
-  ASSERT_EQ(0, sigaction(SIGBUS, &act, NULL));
-  ASSERT_EQ(0, sigaction(SIGILL, &act, NULL));
-  DO_CHECK(death_location != 1);
-  DO_CHECK(death_location != 2);
-  printf("\n");
-  DO_CHECK(death_location != 3);
-
-  // Should never reach this point.
-  const uintptr_t failed = 0;
-  HANDLE_EINTR(write(g_child_crash_pipe, &failed, sizeof(uintptr_t)));
-};
-
-void SpawnChildAndCrash(int death_location, uintptr_t* child_crash_addr) {
-  int pipefd[2];
-  ASSERT_EQ(0, pipe(pipefd));
-
-  int pid = fork();
-  ASSERT_GE(pid, 0);
-
-  if (pid == 0) {      // child process.
-    close(pipefd[0]);  // Close reader (parent) end.
-    g_child_crash_pipe = pipefd[1];
-    CrashChildMain(death_location);
-    FAIL() << "The child process was supposed to crash. It didn't.";
-  }
-
-  close(pipefd[1]);  // Close writer (child) end.
-  DCHECK(child_crash_addr);
-  int res = HANDLE_EINTR(read(pipefd[0], child_crash_addr, sizeof(uintptr_t)));
-  ASSERT_EQ(static_cast<int>(sizeof(uintptr_t)), res);
-}
-
-TEST_F(LoggingTest, CheckCausesDistinctBreakpoints) {
-  uintptr_t child_crash_addr_1 = 0;
-  uintptr_t child_crash_addr_2 = 0;
-  uintptr_t child_crash_addr_3 = 0;
-
-  SpawnChildAndCrash(1, &child_crash_addr_1);
-  SpawnChildAndCrash(2, &child_crash_addr_2);
-  SpawnChildAndCrash(3, &child_crash_addr_3);
-
-  ASSERT_NE(0u, child_crash_addr_1);
-  ASSERT_NE(0u, child_crash_addr_2);
-  ASSERT_NE(0u, child_crash_addr_3);
-  ASSERT_NE(child_crash_addr_1, child_crash_addr_2);
-  ASSERT_NE(child_crash_addr_1, child_crash_addr_3);
-  ASSERT_NE(child_crash_addr_2, child_crash_addr_3);
-}
-#endif  // OS_POSIX
-
 TEST_F(LoggingTest, DebugLoggingReleaseBehavior) {
 #if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
   int debug_only_variable = 1;
@@ -378,14 +217,6 @@
 #endif
 }
 
-void DcheckEmptyFunction1() {
-  // Provide a body so that Release builds do not cause the compiler to
-  // optimize DcheckEmptyFunction1 and DcheckEmptyFunction2 as a single
-  // function, which breaks the Dcheck tests below.
-  LOG(INFO) << "DcheckEmptyFunction1";
-}
-void DcheckEmptyFunction2() {}
-
 TEST_F(LoggingTest, Dcheck) {
 #if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON)
   // Release build.
@@ -427,31 +258,6 @@
   EXPECT_EQ(0, log_sink_call_count);
   DCHECK_EQ(Animal::DOG, Animal::CAT);
   EXPECT_EQ(DCHECK_IS_ON() ? 1 : 0, log_sink_call_count);
-
-  // Test DCHECK on functions and function pointers.
-  log_sink_call_count = 0;
-  struct MemberFunctions {
-    void MemberFunction1() {
-      // See the comment in DcheckEmptyFunction1().
-      LOG(INFO) << "Do not merge with MemberFunction2.";
-    }
-    void MemberFunction2() {}
-  };
-  void (MemberFunctions::*mp1)() = &MemberFunctions::MemberFunction1;
-  void (MemberFunctions::*mp2)() = &MemberFunctions::MemberFunction2;
-  void (*fp1)() = DcheckEmptyFunction1;
-  void (*fp2)() = DcheckEmptyFunction2;
-  void (*fp3)() = DcheckEmptyFunction1;
-  DCHECK_EQ(fp1, fp3);
-  EXPECT_EQ(0, log_sink_call_count);
-  DCHECK_EQ(mp1, &MemberFunctions::MemberFunction1);
-  EXPECT_EQ(0, log_sink_call_count);
-  DCHECK_EQ(mp2, &MemberFunctions::MemberFunction2);
-  EXPECT_EQ(0, log_sink_call_count);
-  DCHECK_EQ(fp1, fp2);
-  EXPECT_EQ(DCHECK_IS_ON() ? 1 : 0, log_sink_call_count);
-  DCHECK_EQ(mp2, &MemberFunctions::MemberFunction1);
-  EXPECT_EQ(DCHECK_IS_ON() ? 2 : 0, log_sink_call_count);
 }
 
 TEST_F(LoggingTest, DcheckReleaseBehavior) {
diff --git a/base/mac/bind_objc_block.h b/base/mac/bind_objc_block.h
index 9a481ed..2434d44 100644
--- a/base/mac/bind_objc_block.h
+++ b/base/mac/bind_objc_block.h
@@ -9,7 +9,6 @@
 
 #include "base/bind.h"
 #include "base/callback_forward.h"
-#include "base/compiler_specific.h"
 #include "base/mac/scoped_block.h"
 
 // BindBlock builds a callback from an Objective-C block. Example usages:
@@ -28,13 +27,6 @@
 // seven total arguments, and the bound block itself is used as one of these
 // arguments, so functionally the templates are limited to binding blocks with
 // zero through six arguments.
-//
-// For code compiled with ARC (automatic reference counting), use BindBlockArc.
-// This is because the method has a different implementation (to avoid over-
-// retaining the block) and need to have a different name not to break the ODR
-// (one definition rule). Another subtle difference is that the implementation
-// will call a different version of ScopedBlock constructor thus the linker must
-// not merge both functions.
 
 namespace base {
 
@@ -49,8 +41,6 @@
 
 }  // namespace internal
 
-#if !defined(__has_feature) || !__has_feature(objc_arc)
-
 // Construct a callback from an objective-C block with up to six arguments (see
 // note above).
 template<typename R, typename... Args>
@@ -62,18 +52,6 @@
               block)));
 }
 
-#else
-
-// Construct a callback from an objective-C block with up to six arguments (see
-// note above).
-template <typename R, typename... Args>
-base::Callback<R(Args...)> BindBlockArc(R (^block)(Args...)) {
-  return base::Bind(&base::internal::RunBlock<R, Args...>,
-                    base::mac::ScopedBlock<R (^)(Args...)>(block));
-}
-
-#endif
-
 }  // namespace base
 
 #endif  // BASE_MAC_BIND_OBJC_BLOCK_H_
diff --git a/base/mac/bundle_locations.h b/base/mac/bundle_locations.h
index 5cc44ba..276290b 100644
--- a/base/mac/bundle_locations.h
+++ b/base/mac/bundle_locations.h
@@ -12,6 +12,7 @@
 #import <Foundation/Foundation.h>
 #else  // __OBJC__
 class NSBundle;
+class NSString;
 #endif  // __OBJC__
 
 namespace base {
diff --git a/base/mac/foundation_util.h b/base/mac/foundation_util.h
index 69b6128..ee23a17 100644
--- a/base/mac/foundation_util.h
+++ b/base/mac/foundation_util.h
@@ -55,12 +55,6 @@
 typedef struct OpaqueSecTrustRef* SecACLRef;
 typedef struct OpaqueSecTrustedApplicationRef* SecTrustedApplicationRef;
 
-#if defined(OS_IOS)
-typedef struct CF_BRIDGED_TYPE(id) __SecPolicy* SecPolicyRef;
-#else
-typedef struct OpaqueSecPolicyRef* SecPolicyRef;
-#endif
-
 namespace base {
 
 class FilePath;
@@ -147,8 +141,6 @@
 TYPE_NAME_FOR_CF_TYPE_DECL(CTFont);
 TYPE_NAME_FOR_CF_TYPE_DECL(CTRun);
 
-TYPE_NAME_FOR_CF_TYPE_DECL(SecPolicy);
-
 #undef TYPE_NAME_FOR_CF_TYPE_DECL
 
 // Retain/release calls for memory management in C++.
@@ -309,7 +301,6 @@
 CF_CAST_DECL(CTRun);
 
 CF_CAST_DECL(SecACL);
-CF_CAST_DECL(SecPolicy);
 CF_CAST_DECL(SecTrustedApplication);
 
 #undef CF_CAST_DECL
diff --git a/base/mac/foundation_util.mm b/base/mac/foundation_util.mm
index eb8284e..4f6fa60 100644
--- a/base/mac/foundation_util.mm
+++ b/base/mac/foundation_util.mm
@@ -213,10 +213,6 @@
 TYPE_NAME_FOR_CF_TYPE_DEFN(CTFont);
 TYPE_NAME_FOR_CF_TYPE_DEFN(CTRun);
 
-#if !defined(OS_IOS)
-TYPE_NAME_FOR_CF_TYPE_DEFN(SecPolicy);
-#endif
-
 #undef TYPE_NAME_FOR_CF_TYPE_DEFN
 
 void NSObjectRetain(void* obj) {
@@ -412,7 +408,6 @@
 
 #if !defined(OS_IOS)
 CF_CAST_DEFN(SecACL);
-CF_CAST_DEFN(SecPolicy);
 CF_CAST_DEFN(SecTrustedApplication);
 #endif
 
diff --git a/base/mac/mac_util.h b/base/mac/mac_util.h
index 67d1880..84948f7 100644
--- a/base/mac/mac_util.h
+++ b/base/mac/mac_util.h
@@ -5,11 +5,11 @@
 #ifndef BASE_MAC_MAC_UTIL_H_
 #define BASE_MAC_MAC_UTIL_H_
 
+#include <AvailabilityMacros.h>
+#include <Carbon/Carbon.h>
 #include <stdint.h>
 #include <string>
 
-#import <CoreGraphics/CoreGraphics.h>
-
 #include "base/base_export.h"
 
 namespace base {
@@ -31,6 +31,9 @@
   kFullScreenModeNormal = 10,
 };
 
+BASE_EXPORT std::string PathFromFSRef(const FSRef& ref);
+BASE_EXPORT bool FSRefFromPath(const std::string& path, FSRef* ref);
+
 // Returns an sRGB color space.  The return value is a static value; do not
 // release it!
 BASE_EXPORT CGColorSpaceRef GetSRGBColorSpace();
@@ -63,6 +66,11 @@
 BASE_EXPORT void SwitchFullScreenModes(FullScreenMode from_mode,
                                        FullScreenMode to_mode);
 
+// Returns true if this process is in the foreground, meaning that it's the
+// frontmost process, the one whose menu bar is shown at the top of the main
+// display.
+BASE_EXPORT bool AmIForeground();
+
 // Excludes the file given by |file_path| from being backed up by Time Machine.
 BASE_EXPORT bool SetFileBackupExclusion(const FilePath& file_path);
 
@@ -100,63 +108,85 @@
 // an error, or true otherwise.
 BASE_EXPORT bool RemoveQuarantineAttribute(const FilePath& file_path);
 
-namespace internal {
-
-// Returns the system's Mac OS X minor version. This is the |y| value
-// in 10.y or 10.y.z.
-BASE_EXPORT int MacOSXMinorVersion();
-
-}  // namespace internal
-
 // Run-time OS version checks. Use these instead of
-// base::SysInfo::OperatingSystemVersionNumbers. Prefer the "AtLeast" and
-// "AtMost" variants to those that check for a specific version, unless you
+// base::SysInfo::OperatingSystemVersionNumbers. Prefer the "OrEarlier" and
+// "OrLater" variants to those that check for a specific version, unless you
 // know for sure that you need to check for a specific version.
 
-#define DEFINE_IS_OS_FUNCS(V, TEST_DEPLOYMENT_TARGET) \
-  inline bool IsOS10_##V() {                          \
-    TEST_DEPLOYMENT_TARGET(>, V, false)               \
-    return internal::MacOSXMinorVersion() == V;       \
-  }                                                   \
-  inline bool IsAtLeastOS10_##V() {                   \
-    TEST_DEPLOYMENT_TARGET(>=, V, true)               \
-    return internal::MacOSXMinorVersion() >= V;       \
-  }                                                   \
-  inline bool IsAtMostOS10_##V() {                    \
-    TEST_DEPLOYMENT_TARGET(>, V, false)               \
-    return internal::MacOSXMinorVersion() <= V;       \
-  }
+// Mavericks is OS X 10.9, Darwin 13.
+BASE_EXPORT bool IsOSMavericks();
 
-#define TEST_DEPLOYMENT_TARGET(OP, V, RET)                      \
-  if (MAC_OS_X_VERSION_MIN_REQUIRED OP MAC_OS_X_VERSION_10_##V) \
-    return RET;
-#define IGNORE_DEPLOYMENT_TARGET(OP, V, RET)
+// Yosemite is OS X 10.10, Darwin 14.
+BASE_EXPORT bool IsOSYosemite();
+BASE_EXPORT bool IsOSYosemiteOrEarlier();
+BASE_EXPORT bool IsOSYosemiteOrLater();
 
-DEFINE_IS_OS_FUNCS(9, TEST_DEPLOYMENT_TARGET)
-DEFINE_IS_OS_FUNCS(10, TEST_DEPLOYMENT_TARGET)
+// El Capitan is OS X 10.11, Darwin 15.
+BASE_EXPORT bool IsOSElCapitan();
+BASE_EXPORT bool IsOSElCapitanOrEarlier();
+BASE_EXPORT bool IsOSElCapitanOrLater();
 
-#ifdef MAC_OS_X_VERSION_10_11
-DEFINE_IS_OS_FUNCS(11, TEST_DEPLOYMENT_TARGET)
-#else
-DEFINE_IS_OS_FUNCS(11, IGNORE_DEPLOYMENT_TARGET)
-#endif
-
-#ifdef MAC_OS_X_VERSION_10_12
-DEFINE_IS_OS_FUNCS(12, TEST_DEPLOYMENT_TARGET)
-#else
-DEFINE_IS_OS_FUNCS(12, IGNORE_DEPLOYMENT_TARGET)
-#endif
-
-#undef IGNORE_DEPLOYMENT_TARGET
-#undef TEST_DEPLOYMENT_TARGET
-#undef DEFINE_IS_OS_FUNCS
+// Sierra is macOS 10.12, Darwin 16.
+BASE_EXPORT bool IsOSSierra();
+BASE_EXPORT bool IsOSSierraOrLater();
 
 // This should be infrequently used. It only makes sense to use this to avoid
 // codepaths that are very likely to break on future (unreleased, untested,
 // unborn) OS releases, or to log when the OS is newer than any known version.
-inline bool IsOSLaterThan10_12_DontCallThis() {
-  return !IsAtMostOS10_12();
-}
+BASE_EXPORT bool IsOSLaterThanSierra_DontCallThis();
+
+// Inline functions that are redundant due to version ranges being mutually-
+// exclusive.
+inline bool IsOSYosemiteOrEarlier() { return !IsOSElCapitanOrLater(); }
+inline bool IsOSElCapitanOrEarlier() { return !IsOSSierraOrLater(); }
+
+// When the deployment target is set, the code produced cannot run on earlier
+// OS releases. That enables some of the IsOS* family to be implemented as
+// constant-value inline functions. The MAC_OS_X_VERSION_MIN_REQUIRED macro
+// contains the value of the deployment target.
+
+#if defined(MAC_OS_X_VERSION_10_9) && \
+    MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_9
+#define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_9
+inline bool IsOSMavericks() { return false; }
+#endif
+
+#if defined(MAC_OS_X_VERSION_10_10) && \
+    MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_10
+#define BASE_MAC_MAC_UTIL_H_INLINED_GE_10_10
+inline bool IsOSYosemiteOrLater() { return true; }
+#endif
+
+#if defined(MAC_OS_X_VERSION_10_10) && \
+    MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_10
+#define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_10
+inline bool IsOSYosemite() { return false; }
+#endif
+
+#if defined(MAC_OS_X_VERSION_10_11) && \
+    MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_11
+#define BASE_MAC_MAC_UTIL_H_INLINED_GE_10_11
+inline bool IsOSElCapitanOrLater() { return true; }
+#endif
+
+#if defined(MAC_OS_X_VERSION_10_11) && \
+    MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_11
+#define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_11
+inline bool IsOSElCapitan() { return false; }
+#endif
+
+#if defined(MAC_OS_X_VERSION_10_12) && \
+    MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_12
+#define BASE_MAC_MAC_UTIL_H_INLINED_GE_10_12
+inline bool IsOSSierraOrLater() { return true; }
+#endif
+
+#if defined(MAC_OS_X_VERSION_10_12) && \
+    MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_12
+#define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_12
+inline bool IsOSSierra() { return false; }
+inline bool IsOSLaterThanSierra_DontCallThis() { return true; }
+#endif
 
 // Retrieve the system's model identifier string from the IOKit registry:
 // for example, "MacPro4,1", "MacBookPro6,1". Returns empty string upon
diff --git a/base/mac/mach_port_broker.mm b/base/mac/mach_port_broker.mm
index 6d9fec5..bd47017 100644
--- a/base/mac/mach_port_broker.mm
+++ b/base/mac/mach_port_broker.mm
@@ -154,7 +154,12 @@
   // Use the kernel audit information to make sure this message is from
   // a task that this process spawned. The kernel audit token contains the
   // unspoofable pid of the task that sent the message.
-  pid_t child_pid = audit_token_to_pid(msg.trailer.msgh_audit);
+  //
+  // TODO(rsesek): In the 10.7 SDK, there's audit_token_to_pid().
+  pid_t child_pid;
+  audit_token_to_au32(msg.trailer.msgh_audit,
+      NULL, NULL, NULL, NULL, NULL, &child_pid, NULL, NULL);
+
   mach_port_t child_task_port = msg.child_task_port.name;
 
   // Take the lock and update the broker information.
diff --git a/base/mac/scoped_authorizationref.h b/base/mac/scoped_authorizationref.h
index b83f8df..03cde86 100644
--- a/base/mac/scoped_authorizationref.h
+++ b/base/mac/scoped_authorizationref.h
@@ -11,7 +11,7 @@
 #include "base/macros.h"
 
 // ScopedAuthorizationRef maintains ownership of an AuthorizationRef.  It is
-// patterned after the unique_ptr interface.
+// patterned after the scoped_ptr interface.
 
 namespace base {
 namespace mac {
diff --git a/base/mac/scoped_block.h b/base/mac/scoped_block.h
index 10ab4b4..8199677 100644
--- a/base/mac/scoped_block.h
+++ b/base/mac/scoped_block.h
@@ -36,33 +36,9 @@
 
 // ScopedBlock<> is patterned after ScopedCFTypeRef<>, but uses Block_copy() and
 // Block_release() instead of CFRetain() and CFRelease().
+
 template <typename B>
-class ScopedBlock : public ScopedTypeRef<B, internal::ScopedBlockTraits<B>> {
- public:
-  using Traits = internal::ScopedBlockTraits<B>;
-
-#if !defined(__has_feature) || !__has_feature(objc_arc)
-  explicit ScopedBlock(
-      B block = Traits::InvalidValue(),
-      base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
-      : ScopedTypeRef<B, Traits>(block, policy) {}
-#else
-  explicit ScopedBlock(B block = Traits::InvalidValue())
-      : ScopedTypeRef<B, Traits>(block, base::scoped_policy::RETAIN) {}
-#endif
-
-#if !defined(__has_feature) || !__has_feature(objc_arc)
-  void reset(B block = Traits::InvalidValue(),
-             base::scoped_policy::OwnershipPolicy policy =
-                 base::scoped_policy::ASSUME) {
-    ScopedTypeRef<B, Traits>::reset(block, policy);
-  }
-#else
-  void reset(B block = Traits::InvalidValue()) {
-    ScopedTypeRef<B, Traits>::reset(block, base::scoped_policy::RETAIN);
-  }
-#endif
-};
+using ScopedBlock = ScopedTypeRef<B, internal::ScopedBlockTraits<B>>;
 
 }  // namespace mac
 }  // namespace base
diff --git a/base/mac/scoped_nsobject.h b/base/mac/scoped_nsobject.h
index ecd8e78..cc54aa0 100644
--- a/base/mac/scoped_nsobject.h
+++ b/base/mac/scoped_nsobject.h
@@ -102,7 +102,7 @@
       : ScopedTypeRef<NST, Traits>(that_as_subclass) {}
 
   scoped_nsprotocol(scoped_nsprotocol<NST>&& that)
-      : ScopedTypeRef<NST, Traits>(std::move(that)) {}
+      : ScopedTypeRef<NST, Traits>(that) {}
 
   scoped_nsprotocol& operator=(const scoped_nsprotocol<NST>& that) {
     ScopedTypeRef<NST, Traits>::operator=(that);
@@ -166,7 +166,7 @@
       : scoped_nsprotocol<NST*>(that_as_subclass) {}
 
   scoped_nsobject(scoped_nsobject<NST>&& that)
-      : scoped_nsprotocol<NST*>(std::move(that)) {}
+      : scoped_nsprotocol<NST*>(that) {}
 
   scoped_nsobject& operator=(const scoped_nsobject<NST>& that) {
     scoped_nsprotocol<NST*>::operator=(that);
@@ -214,8 +214,7 @@
   explicit scoped_nsobject(const scoped_nsobject<NSR>& that_as_subclass)
       : scoped_nsprotocol<id>(that_as_subclass) {}
 
-  scoped_nsobject(scoped_nsobject<id>&& that)
-      : scoped_nsprotocol<id>(std::move(that)) {}
+  scoped_nsobject(scoped_nsobject<id>&& that) : scoped_nsprotocol<id>(that) {}
 
   scoped_nsobject& operator=(const scoped_nsobject<id>& that) {
     scoped_nsprotocol<id>::operator=(that);
diff --git a/base/mac/sdk_forward_declarations.h b/base/mac/sdk_forward_declarations.h
index 306cd93..e9a11f7 100644
--- a/base/mac/sdk_forward_declarations.h
+++ b/base/mac/sdk_forward_declarations.h
@@ -3,7 +3,7 @@
 // found in the LICENSE file.
 
 // This file contains forward declarations for items in later SDKs than the
-// default one with which Chromium is built (currently 10.10).
+// default one with which Chromium is built (currently 10.6).
 // If you call any function from this header, be sure to check at runtime for
 // respondsToSelector: before calling these functions (else your code will crash
 // on older OS X versions that chrome still supports).
@@ -14,18 +14,206 @@
 #import <AppKit/AppKit.h>
 #import <CoreBluetooth/CoreBluetooth.h>
 #import <CoreWLAN/CoreWLAN.h>
-#import <IOBluetooth/IOBluetooth.h>
 #import <ImageCaptureCore/ImageCaptureCore.h>
-#import <QuartzCore/QuartzCore.h>
+#import <IOBluetooth/IOBluetooth.h>
 #include <stdint.h>
 
 #include "base/base_export.h"
 
 // ----------------------------------------------------------------------------
+// Either define or forward declare classes only available in OSX 10.7+.
+// ----------------------------------------------------------------------------
+
+#if !defined(MAC_OS_X_VERSION_10_7) || \
+    MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_7
+
+@interface CWChannel : NSObject
+@end
+
+@interface CBPeripheral : NSObject
+@end
+
+@interface CBCentralManager : NSObject
+@end
+
+@interface CBUUID : NSObject
+@end
+
+#else
+
+@class CWChannel;
+@class CBPeripheral;
+@class CBCentralManager;
+@class CBUUID;
+
+#endif  // MAC_OS_X_VERSION_10_7
+
+#if !defined(MAC_OS_X_VERSION_10_8) || \
+    MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_8
+
+@interface NSUUID : NSObject
+@end
+
+#else
+
+@class NSUUID;
+
+#endif  // MAC_OS_X_VERSION_10_8
+
+#if !defined(MAC_OS_X_VERSION_10_9) || \
+    MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_9
+
+// NSProgress is public API in 10.9, but a version of it exists and is usable
+// in 10.8.
+@interface NSProgress : NSObject
+@end
+
+@interface NSAppearance : NSObject
+@end
+
+#else
+
+@class NSProgress;
+@class NSAppearance;
+
+#endif  // MAC_OS_X_VERSION_10_9
+
+// ----------------------------------------------------------------------------
 // Define typedefs, enums, and protocols not available in the version of the
 // OSX SDK being compiled against.
 // ----------------------------------------------------------------------------
 
+#if !defined(MAC_OS_X_VERSION_10_7) || \
+    MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_7
+
+enum {
+  NSEventPhaseNone = 0,  // event not associated with a phase.
+  NSEventPhaseBegan = 0x1 << 0,
+  NSEventPhaseStationary = 0x1 << 1,
+  NSEventPhaseChanged = 0x1 << 2,
+  NSEventPhaseEnded = 0x1 << 3,
+  NSEventPhaseCancelled = 0x1 << 4
+};
+typedef NSUInteger NSEventPhase;
+
+enum {
+  NSFullScreenWindowMask = 1 << 14,
+};
+
+enum {
+  NSApplicationPresentationFullScreen = 1 << 10,
+};
+
+enum {
+  NSWindowCollectionBehaviorFullScreenPrimary = 1 << 7,
+  NSWindowCollectionBehaviorFullScreenAuxiliary = 1 << 8,
+};
+
+enum {
+  NSEventSwipeTrackingLockDirection = 0x1 << 0,
+  NSEventSwipeTrackingClampGestureAmount = 0x1 << 1,
+};
+typedef NSUInteger NSEventSwipeTrackingOptions;
+
+enum {
+  NSWindowAnimationBehaviorDefault = 0,
+  NSWindowAnimationBehaviorNone = 2,
+  NSWindowAnimationBehaviorDocumentWindow = 3,
+  NSWindowAnimationBehaviorUtilityWindow = 4,
+  NSWindowAnimationBehaviorAlertPanel = 5
+};
+typedef NSInteger NSWindowAnimationBehavior;
+
+enum {
+  NSWindowDocumentVersionsButton = 6,
+  NSWindowFullScreenButton,
+};
+typedef NSUInteger NSWindowButton;
+
+enum CWChannelBand {
+  kCWChannelBandUnknown = 0,
+  kCWChannelBand2GHz = 1,
+  kCWChannelBand5GHz = 2,
+};
+
+enum {
+  kCWSecurityNone = 0,
+  kCWSecurityWEP = 1,
+  kCWSecurityWPAPersonal = 2,
+  kCWSecurityWPAPersonalMixed = 3,
+  kCWSecurityWPA2Personal = 4,
+  kCWSecurityPersonal = 5,
+  kCWSecurityDynamicWEP = 6,
+  kCWSecurityWPAEnterprise = 7,
+  kCWSecurityWPAEnterpriseMixed = 8,
+  kCWSecurityWPA2Enterprise = 9,
+  kCWSecurityEnterprise = 10,
+  kCWSecurityUnknown = NSIntegerMax,
+};
+
+typedef NSInteger CWSecurity;
+
+enum {
+  kBluetoothFeatureLESupportedController = (1 << 6L),
+};
+
+@protocol IOBluetoothDeviceInquiryDelegate
+- (void)deviceInquiryStarted:(IOBluetoothDeviceInquiry*)sender;
+- (void)deviceInquiryDeviceFound:(IOBluetoothDeviceInquiry*)sender
+                          device:(IOBluetoothDevice*)device;
+- (void)deviceInquiryComplete:(IOBluetoothDeviceInquiry*)sender
+                        error:(IOReturn)error
+                      aborted:(BOOL)aborted;
+@end
+
+enum {
+  CBPeripheralStateDisconnected = 0,
+  CBPeripheralStateConnecting,
+  CBPeripheralStateConnected,
+};
+typedef NSInteger CBPeripheralState;
+
+enum {
+  CBCentralManagerStateUnknown = 0,
+  CBCentralManagerStateResetting,
+  CBCentralManagerStateUnsupported,
+  CBCentralManagerStateUnauthorized,
+  CBCentralManagerStatePoweredOff,
+  CBCentralManagerStatePoweredOn,
+};
+typedef NSInteger CBCentralManagerState;
+
+@protocol CBCentralManagerDelegate;
+
+@protocol CBCentralManagerDelegate<NSObject>
+- (void)centralManagerDidUpdateState:(CBCentralManager*)central;
+- (void)centralManager:(CBCentralManager*)central
+    didDiscoverPeripheral:(CBPeripheral*)peripheral
+        advertisementData:(NSDictionary*)advertisementData
+                     RSSI:(NSNumber*)RSSI;
+@end
+
+#endif  // MAC_OS_X_VERSION_10_7
+
+#if !defined(MAC_OS_X_VERSION_10_8) || \
+    MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_8
+
+enum { NSEventPhaseMayBegin = 0x1 << 5 };
+
+#endif  // MAC_OS_X_VERSION_10_8
+
+#if !defined(MAC_OS_X_VERSION_10_9) || \
+    MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_9
+
+enum {
+  NSWindowOcclusionStateVisible = 1UL << 1,
+};
+typedef NSUInteger NSWindowOcclusionState;
+
+enum { NSWorkspaceLaunchWithErrorPresentation = 0x00000040 };
+
+#endif  // MAC_OS_X_VERSION_10_9
+
 #if !defined(MAC_OS_X_VERSION_10_11) || \
     MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_11
 
@@ -44,29 +232,7 @@
 - (instancetype)initWithPressureBehavior:(NSPressureBehavior)pressureBehavior;
 @end
 
-enum {
-  NSSpringLoadingHighlightNone = 0,
-  NSSpringLoadingHighlightStandard,
-  NSSpringLoadingHighlightEmphasized
-};
-typedef NSUInteger NSSpringLoadingHighlight;
-
-#endif  // MAC_OS_X_VERSION_10_11
-
-#if !defined(MAC_OS_X_VERSION_10_12) || \
-    MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_12
-
-// The protocol was formalized by the 10.12 SDK, but it was informally used
-// before.
-@protocol CAAnimationDelegate
-- (void)animationDidStart:(CAAnimation*)animation;
-- (void)animationDidStop:(CAAnimation*)animation finished:(BOOL)finished;
-@end
-
-@protocol CALayerDelegate
-@end
-
-#endif  // MAC_OS_X_VERSION_10_12
+#endif // MAC_OS_X_VERSION_10_11
 
 // ----------------------------------------------------------------------------
 // Define NSStrings only available in newer versions of the OSX SDK to force
@@ -74,9 +240,27 @@
 // ----------------------------------------------------------------------------
 
 extern "C" {
+#if !defined(MAC_OS_X_VERSION_10_7) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_7
+BASE_EXPORT extern NSString* const NSWindowWillEnterFullScreenNotification;
+BASE_EXPORT extern NSString* const NSWindowWillExitFullScreenNotification;
+BASE_EXPORT extern NSString* const NSWindowDidEnterFullScreenNotification;
+BASE_EXPORT extern NSString* const NSWindowDidExitFullScreenNotification;
+BASE_EXPORT extern NSString* const
+    NSWindowDidChangeBackingPropertiesNotification;
+BASE_EXPORT extern NSString* const CBAdvertisementDataServiceDataKey;
+BASE_EXPORT extern NSString* const CBAdvertisementDataServiceUUIDsKey;
+#endif  // MAC_OS_X_VERSION_10_7
+
+#if !defined(MAC_OS_X_VERSION_10_9) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_9
+BASE_EXPORT extern NSString* const NSWindowDidChangeOcclusionStateNotification;
+BASE_EXPORT extern NSString* const CBAdvertisementDataOverflowServiceUUIDsKey;
+BASE_EXPORT extern NSString* const CBAdvertisementDataIsConnectable;
+#endif  // MAC_OS_X_VERSION_10_9
+
 #if !defined(MAC_OS_X_VERSION_10_10) || \
     MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_10
-BASE_EXPORT extern NSString* const CIDetectorTypeQRCode;
 BASE_EXPORT extern NSString* const NSUserActivityTypeBrowsingWeb;
 BASE_EXPORT extern NSString* const NSAppearanceNameVibrantDark;
 BASE_EXPORT extern NSString* const NSAppearanceNameVibrantLight;
@@ -84,26 +268,229 @@
 }  // extern "C"
 
 // ----------------------------------------------------------------------------
-// If compiling against an older version of the OSX SDK, declare classes and
-// functions that are available in newer versions of the OSX SDK. If compiling
-// against a newer version of the OSX SDK, redeclare those same classes and
-// functions to suppress -Wpartial-availability warnings.
+// If compiling against an older version of the OSX SDK, declare functions that
+// are available in newer versions of the OSX SDK. If compiling against a newer
+// version of the OSX SDK, redeclare those same functions to suppress
+// -Wpartial-availability warnings.
 // ----------------------------------------------------------------------------
 
+// Once Chrome no longer supports OSX 10.6, everything within this preprocessor
+// block can be removed.
+#if !defined(MAC_OS_X_VERSION_10_7) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_7
+
+@interface NSEvent (LionSDK)
++ (BOOL)isSwipeTrackingFromScrollEventsEnabled;
+- (NSEventPhase)momentumPhase;
+- (NSEventPhase)phase;
+- (BOOL)hasPreciseScrollingDeltas;
+- (CGFloat)scrollingDeltaX;
+- (CGFloat)scrollingDeltaY;
+- (void)trackSwipeEventWithOptions:(NSEventSwipeTrackingOptions)options
+          dampenAmountThresholdMin:(CGFloat)minDampenThreshold
+                               max:(CGFloat)maxDampenThreshold
+                      usingHandler:(void (^)(CGFloat gestureAmount,
+                                             NSEventPhase phase,
+                                             BOOL isComplete,
+                                             BOOL* stop))trackingHandler;
+- (BOOL)isDirectionInvertedFromDevice;
+@end
+
+@interface NSApplication (LionSDK)
+- (void)disableRelaunchOnLogin;
+@end
+
+@interface CALayer (LionSDK)
+- (CGFloat)contentsScale;
+- (void)setContentsScale:(CGFloat)contentsScale;
+@end
+
+@interface NSScreen (LionSDK)
+- (CGFloat)backingScaleFactor;
+- (NSRect)convertRectToBacking:(NSRect)aRect;
+@end
+
+@interface NSWindow (LionSDK)
+- (CGFloat)backingScaleFactor;
+- (NSWindowAnimationBehavior)animationBehavior;
+- (void)setAnimationBehavior:(NSWindowAnimationBehavior)newAnimationBehavior;
+- (void)toggleFullScreen:(id)sender;
+- (void)setRestorable:(BOOL)flag;
+- (NSRect)convertRectFromScreen:(NSRect)aRect;
+- (NSRect)convertRectToScreen:(NSRect)aRect;
+@end
+
+@interface NSCursor (LionSDKDeclarations)
++ (NSCursor*)IBeamCursorForVerticalLayout;
+@end
+
+@interface NSAnimationContext (LionSDK)
++ (void)runAnimationGroup:(void (^)(NSAnimationContext* context))changes
+        completionHandler:(void (^)(void))completionHandler;
+@property(copy) void (^completionHandler)(void);
+@end
+
+@interface NSView (LionSDK)
+- (NSSize)convertSizeFromBacking:(NSSize)size;
+- (void)setWantsBestResolutionOpenGLSurface:(BOOL)flag;
+- (NSDraggingSession*)beginDraggingSessionWithItems:(NSArray*)items
+                                              event:(NSEvent*)event
+                                             source:
+                                                 (id<NSDraggingSource>)source;
+@end
+
+@interface NSObject (ICCameraDeviceDelegateLionSDK)
+- (void)deviceDidBecomeReadyWithCompleteContentCatalog:(ICDevice*)device;
+- (void)didDownloadFile:(ICCameraFile*)file
+                  error:(NSError*)error
+                options:(NSDictionary*)options
+            contextInfo:(void*)contextInfo;
+@end
+
+@interface CWInterface (LionSDK)
+- (BOOL)associateToNetwork:(CWNetwork*)network
+                  password:(NSString*)password
+                     error:(NSError**)error;
+- (NSSet*)scanForNetworksWithName:(NSString*)networkName error:(NSError**)error;
+@end
+
+@interface CWChannel (LionSDK)
+@property(readonly) CWChannelBand channelBand;
+@end
+
+@interface CWNetwork (LionSDK)
+@property(readonly) CWChannel* wlanChannel;
+@property(readonly) NSInteger rssiValue;
+- (BOOL)supportsSecurity:(CWSecurity)security;
+@end
+
+@interface IOBluetoothHostController (LionSDK)
+- (NSString*)nameAsString;
+- (BluetoothHCIPowerState)powerState;
+@end
+
+@interface IOBluetoothL2CAPChannel (LionSDK)
+@property(readonly) BluetoothL2CAPMTU outgoingMTU;
+@end
+
+@interface IOBluetoothDevice (LionSDK)
+- (NSString*)addressString;
+- (unsigned int)classOfDevice;
+- (BluetoothConnectionHandle)connectionHandle;
+- (BluetoothHCIRSSIValue)rawRSSI;
+- (NSArray*)services;
+- (IOReturn)performSDPQuery:(id)target uuids:(NSArray*)uuids;
+@end
+
+@interface CBPeripheral (LionSDK)
+@property(readonly, nonatomic) CFUUIDRef UUID;
+@property(retain, readonly) NSString* name;
+@property(readonly) BOOL isConnected;
+@end
+
+@interface CBCentralManager (LionSDK)
+@property(readonly) CBCentralManagerState state;
+- (id)initWithDelegate:(id<CBCentralManagerDelegate>)delegate
+                 queue:(dispatch_queue_t)queue;
+- (void)scanForPeripheralsWithServices:(NSArray*)serviceUUIDs
+                               options:(NSDictionary*)options;
+- (void)stopScan;
+@end
+
+@interface CBUUID (LionSDK)
+@property(nonatomic, readonly) NSData* data;
++ (CBUUID*)UUIDWithString:(NSString*)theString;
+@end
+
+BASE_EXPORT extern "C" void NSAccessibilityPostNotificationWithUserInfo(
+    id object,
+    NSString* notification,
+    NSDictionary* user_info);
+
+#endif  // MAC_OS_X_VERSION_10_7
+
+// Once Chrome no longer supports OSX 10.7, everything within this preprocessor
+// block can be removed.
+#if !defined(MAC_OS_X_VERSION_10_8) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_8
+
+@interface NSColor (MountainLionSDK)
+- (CGColorRef)CGColor;
+@end
+
+@interface NSUUID (MountainLionSDK)
+- (NSString*)UUIDString;
+@end
+
+@interface NSControl (MountainLionSDK)
+@property BOOL allowsExpansionToolTips;
+@end
+
+#endif  // MAC_OS_X_VERSION_10_8
+
+// Once Chrome no longer supports OSX 10.8, everything within this preprocessor
+// block can be removed.
+#if !defined(MAC_OS_X_VERSION_10_9) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_9
+
+@interface NSProgress (MavericksSDK)
+
+- (instancetype)initWithParent:(NSProgress*)parentProgressOrNil
+                      userInfo:(NSDictionary*)userInfoOrNil;
+@property(copy) NSString* kind;
+
+@property int64_t totalUnitCount;
+@property int64_t completedUnitCount;
+
+@property(getter=isCancellable) BOOL cancellable;
+@property(getter=isPausable) BOOL pausable;
+@property(readonly, getter=isCancelled) BOOL cancelled;
+@property(readonly, getter=isPaused) BOOL paused;
+@property(copy) void (^cancellationHandler)(void);
+@property(copy) void (^pausingHandler)(void);
+- (void)cancel;
+- (void)pause;
+
+- (void)setUserInfoObject:(id)objectOrNil forKey:(NSString*)key;
+- (NSDictionary*)userInfo;
+
+@property(readonly, getter=isIndeterminate) BOOL indeterminate;
+@property(readonly) double fractionCompleted;
+
+- (void)publish;
+- (void)unpublish;
+
+@end
+
+@interface NSScreen (MavericksSDK)
++ (BOOL)screensHaveSeparateSpaces;
+@end
+
+@interface NSView (MavericksSDK)
+- (void)setCanDrawSubviewsIntoLayer:(BOOL)flag;
+- (void)setAppearance:(NSAppearance*)appearance;
+- (NSAppearance*)effectiveAppearance;
+@end
+
+@interface NSWindow (MavericksSDK)
+- (NSWindowOcclusionState)occlusionState;
+@end
+
+@interface NSAppearance (MavericksSDK)
++ (id<NSObject>)appearanceNamed:(NSString*)name;
+@end
+
+@interface CBPeripheral (MavericksSDK)
+@property(readonly, nonatomic) NSUUID* identifier;
+@end
+
+#endif  // MAC_OS_X_VERSION_10_9
+
 // Once Chrome no longer supports OSX 10.9, everything within this preprocessor
 // block can be removed.
 #if !defined(MAC_OS_X_VERSION_10_10) || \
     MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_10
 
-@interface NSUserActivity (YosemiteSDK)
-@property(readonly, copy) NSString* activityType;
-@property(copy) NSDictionary* userInfo;
-@property(copy) NSURL* webpageURL;
-- (instancetype)initWithActivityType:(NSString*)activityType;
-- (void)becomeCurrent;
-- (void)invalidate;
-@end
-
 @interface CBUUID (YosemiteSDK)
 - (NSString*)UUIDString;
 @end
@@ -116,36 +503,6 @@
 - (void)setTitlebarAppearsTransparent:(BOOL)flag;
 @end
 
-@interface NSProcessInfo (YosemiteSDK)
-@property(readonly) NSOperatingSystemVersion operatingSystemVersion;
-@end
-
-@interface NSLayoutConstraint (YosemiteSDK)
-@property(getter=isActive) BOOL active;
-+ (void)activateConstraints:(NSArray*)constraints;
-@end
-
-@interface NSVisualEffectView (YosemiteSDK)
-- (void)setState:(NSVisualEffectState)state;
-@end
-
-@class NSVisualEffectView;
-
-@interface CIQRCodeFeature (YosemiteSDK)
-@property(readonly) CGRect bounds;
-@property(readonly) CGPoint topLeft;
-@property(readonly) CGPoint topRight;
-@property(readonly) CGPoint bottomLeft;
-@property(readonly) CGPoint bottomRight;
-@property(readonly, copy) NSString* messageString;
-@end
-
-@class CIQRCodeFeature;
-
-@interface NSView (YosemiteSDK)
-- (BOOL)isAccessibilitySelectorAllowed:(SEL)selector;
-@end
-
 #endif  // MAC_OS_X_VERSION_10_10
 
 // Once Chrome no longer supports OSX 10.10.2, everything within this
@@ -153,91 +510,10 @@
 #if !defined(MAC_OS_X_VERSION_10_10_3) || \
     MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_10_3
 
-@interface NSEvent (Yosemite_3_SDK)
+@interface NSEvent (YosemiteSDK)
 @property(readonly) NSInteger stage;
 @end
 
-@interface NSView (Yosemite_3_SDK)
-- (void)setPressureConfiguration:(NSPressureConfiguration*)aConfiguration;
-@end
-
-#endif  // MAC_OS_X_VERSION_10_10
-
-// ----------------------------------------------------------------------------
-// Define NSStrings only available in newer versions of the OSX SDK to force
-// them to be statically linked.
-// ----------------------------------------------------------------------------
-
-extern "C" {
-#if !defined(MAC_OS_X_VERSION_10_11) || \
-    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_11
-BASE_EXPORT extern NSString* const CIDetectorTypeText;
-#endif  // MAC_OS_X_VERSION_10_11
-}  // extern "C"
-
-// Once Chrome no longer supports OSX 10.10, everything within this
-// preprocessor block can be removed.
-#if !defined(MAC_OS_X_VERSION_10_11) || \
-    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_11
-
-@class NSLayoutDimension;
-@class NSLayoutXAxisAnchor;
-@class NSLayoutYAxisAnchor;
-
-@interface NSObject (ElCapitanSDK)
-- (NSLayoutConstraint*)constraintEqualToConstant:(CGFloat)c;
-@end
-
-@interface NSView (ElCapitanSDK)
-@property(readonly, strong) NSLayoutXAxisAnchor* leftAnchor;
-@property(readonly, strong) NSLayoutXAxisAnchor* rightAnchor;
-@property(readonly, strong) NSLayoutYAxisAnchor* bottomAnchor;
-@property(readonly, strong) NSLayoutDimension* widthAnchor;
-@end
-
-@interface NSWindow (ElCapitanSDK)
-- (void)performWindowDragWithEvent:(NSEvent*)event;
-@end
-
-@interface CIRectangleFeature (ElCapitanSDK)
-@property(readonly) CGRect bounds;
-@end
-
-@class CIRectangleFeature;
-
-#endif  // MAC_OS_X_VERSION_10_11
-
-// Once Chrome no longer supports OSX 10.11, everything within this
-// preprocessor block can be removed.
-#if !defined(MAC_OS_X_VERSION_10_12) || \
-    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_12
-
-@interface NSWindow (SierraSDK)
-@property(class) BOOL allowsAutomaticWindowTabbing;
-@end
-
-#endif  // MAC_OS_X_VERSION_10_12
-
-// Once Chrome no longer supports OSX 10.12.0, everything within this
-// preprocessor block can be removed.
-#if !defined(MAC_OS_X_VERSION_10_12_1) || \
-    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_12_1
-
-@interface NSButton (SierraPointOneSDK)
-@property(copy) NSColor* bezelColor;
-@property BOOL imageHugsTitle;
-+ (instancetype)buttonWithTitle:(NSString*)title
-                         target:(id)target
-                         action:(SEL)action;
-+ (instancetype)buttonWithImage:(NSImage*)image
-                         target:(id)target
-                         action:(SEL)action;
-+ (instancetype)buttonWithTitle:(NSString*)title
-                          image:(NSImage*)image
-                         target:(id)target
-                         action:(SEL)action;
-@end
-
 @interface NSView (YosemiteSDK)
 - (void)setPressureConfiguration:(NSPressureConfiguration*)aConfiguration;
 @end
@@ -250,6 +526,8 @@
 // declared in the OSX 10.9+ SDK, so when compiling against an OSX 10.9+ SDK,
 // declare the symbol.
 // ----------------------------------------------------------------------------
+#if defined(MAC_OS_X_VERSION_10_9) && \
+    MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_9
 BASE_EXPORT extern "C" NSString* const kCWSSIDDidChangeNotification;
-
+#endif
 #endif  // BASE_MAC_SDK_FORWARD_DECLARATIONS_H_
diff --git a/base/mac/sdk_forward_declarations.mm b/base/mac/sdk_forward_declarations.mm
index c624dae..4e1d7ec 100644
--- a/base/mac/sdk_forward_declarations.mm
+++ b/base/mac/sdk_forward_declarations.mm
@@ -4,17 +4,43 @@
 
 #include "base/mac/sdk_forward_declarations.h"
 
+#if !defined(MAC_OS_X_VERSION_10_7) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_7
+NSString* const NSWindowWillEnterFullScreenNotification =
+    @"NSWindowWillEnterFullScreenNotification";
+
+NSString* const NSWindowWillExitFullScreenNotification =
+    @"NSWindowWillExitFullScreenNotification";
+
+NSString* const NSWindowDidEnterFullScreenNotification =
+    @"NSWindowDidEnterFullScreenNotification";
+
+NSString* const NSWindowDidExitFullScreenNotification =
+    @"NSWindowDidExitFullScreenNotification";
+
+NSString* const NSWindowDidChangeBackingPropertiesNotification =
+    @"NSWindowDidChangeBackingPropertiesNotification";
+
+NSString* const CBAdvertisementDataServiceDataKey = @"kCBAdvDataServiceData";
+
+NSString* const CBAdvertisementDataServiceUUIDsKey = @"kCBAdvDataServiceUUIDs";
+#endif  // MAC_OS_X_VERSION_10_7
+
+#if !defined(MAC_OS_X_VERSION_10_9) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_9
+NSString* const NSWindowDidChangeOcclusionStateNotification =
+    @"NSWindowDidChangeOcclusionStateNotification";
+
+NSString* const CBAdvertisementDataOverflowServiceUUIDsKey =
+    @"kCBAdvDataOverflowServiceUUIDs";
+
+NSString* const CBAdvertisementDataIsConnectable = @"kCBAdvDataIsConnectable";
+#endif  // MAC_OS_X_VERSION_10_9
+
 #if !defined(MAC_OS_X_VERSION_10_10) || \
     MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_10
-NSString* const CIDetectorTypeQRCode = @"CIDetectorTypeQRCode";
-
 NSString* const NSUserActivityTypeBrowsingWeb =
     @"NSUserActivityTypeBrowsingWeb";
 
 NSString* const NSAppearanceNameVibrantDark = @"NSAppearanceNameVibrantDark";
 #endif  // MAC_OS_X_VERSION_10_10
-
-#if !defined(MAC_OS_X_VERSION_10_11) || \
-    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_11
-NSString* const CIDetectorTypeText = @"CIDetectorTypeText";
-#endif  // MAC_OS_X_VERSION_10_11
diff --git a/base/memory/aligned_memory_unittest.cc b/base/memory/aligned_memory_unittest.cc
index 892c50e..abe0cf3 100644
--- a/base/memory/aligned_memory_unittest.cc
+++ b/base/memory/aligned_memory_unittest.cc
@@ -44,6 +44,10 @@
 
   EXPECT_ALIGNED(raw8.void_data(), 8);
   EXPECT_ALIGNED(raw16.void_data(), 16);
+
+  // TODO(ios): __attribute__((aligned(X))) with X >= 128 does not works on
+  // the stack when building for arm64 on iOS, http://crbug.com/349003
+#if !(defined(OS_IOS) && defined(ARCH_CPU_ARM64))
   EXPECT_ALIGNED(raw128.void_data(), 128);
 
   // NaCl x86-64 compiler emits non-validating instructions for >128
@@ -57,10 +61,14 @@
   EXPECT_EQ(256u, ALIGNOF(raw256));
   EXPECT_ALIGNED(raw256.void_data(), 256);
 
+  // TODO(ios): This test hits an armv7 bug in clang. crbug.com/138066
+#if !(defined(OS_IOS) && defined(ARCH_CPU_ARM_FAMILY))
   AlignedMemory<8, 4096> raw4096;
   EXPECT_EQ(4096u, ALIGNOF(raw4096));
   EXPECT_ALIGNED(raw4096.void_data(), 4096);
+#endif  // !(defined(OS_IOS) && defined(ARCH_CPU_ARM_FAMILY))
 #endif  // !(defined(OS_NACL) && defined(ARCH_CPU_X86_64))
+#endif  // !(defined(OS_IOS) && defined(ARCH_CPU_ARM64))
 }
 
 TEST(AlignedMemoryTest, DynamicAllocation) {
diff --git a/base/memory/linked_ptr.h b/base/memory/linked_ptr.h
index 6851286..649dc10 100644
--- a/base/memory/linked_ptr.h
+++ b/base/memory/linked_ptr.h
@@ -69,7 +69,7 @@
   mutable linked_ptr_internal const* next_;
 };
 
-// TODO(http://crbug.com/556939): DEPRECATED: Use unique_ptr instead (now that
+// TODO(http://crbug.com/556939): DEPRECATED: Use scoped_ptr instead (now that
 // we have support for moveable types inside STL containers).
 template <typename T>
 class linked_ptr {
diff --git a/base/memory/ref_counted.cc b/base/memory/ref_counted.cc
index 46bbd7a..f5924d0 100644
--- a/base/memory/ref_counted.cc
+++ b/base/memory/ref_counted.cc
@@ -10,32 +10,37 @@
 namespace subtle {
 
 bool RefCountedThreadSafeBase::HasOneRef() const {
-  return AtomicRefCountIsOne(&ref_count_);
+  return AtomicRefCountIsOne(
+      &const_cast<RefCountedThreadSafeBase*>(this)->ref_count_);
 }
 
-RefCountedThreadSafeBase::RefCountedThreadSafeBase() = default;
+RefCountedThreadSafeBase::RefCountedThreadSafeBase() : ref_count_(0) {
+#ifndef NDEBUG
+  in_dtor_ = false;
+#endif
+}
 
 RefCountedThreadSafeBase::~RefCountedThreadSafeBase() {
-#if DCHECK_IS_ON()
+#ifndef NDEBUG
   DCHECK(in_dtor_) << "RefCountedThreadSafe object deleted without "
                       "calling Release()";
 #endif
 }
 
 void RefCountedThreadSafeBase::AddRef() const {
-#if DCHECK_IS_ON()
+#ifndef NDEBUG
   DCHECK(!in_dtor_);
 #endif
   AtomicRefCountInc(&ref_count_);
 }
 
 bool RefCountedThreadSafeBase::Release() const {
-#if DCHECK_IS_ON()
+#ifndef NDEBUG
   DCHECK(!in_dtor_);
   DCHECK(!AtomicRefCountIsZero(&ref_count_));
 #endif
   if (!AtomicRefCountDec(&ref_count_)) {
-#if DCHECK_IS_ON()
+#ifndef NDEBUG
     in_dtor_ = true;
 #endif
     return true;
diff --git a/base/memory/ref_counted.h b/base/memory/ref_counted.h
index 9dd09ad..b026d9a 100644
--- a/base/memory/ref_counted.h
+++ b/base/memory/ref_counted.h
@@ -14,8 +14,10 @@
 #include "base/atomic_ref_count.h"
 #include "base/base_export.h"
 #include "base/compiler_specific.h"
-#include "base/logging.h"
 #include "base/macros.h"
+#ifndef NDEBUG
+#include "base/logging.h"
+#endif
 #include "base/threading/thread_collision_warner.h"
 #include "build/build_config.h"
 
@@ -30,16 +32,16 @@
  protected:
   RefCountedBase()
       : ref_count_(0)
-#if DCHECK_IS_ON()
-        , in_dtor_(false)
-#endif
-  {
+  #ifndef NDEBUG
+      , in_dtor_(false)
+  #endif
+      {
   }
 
   ~RefCountedBase() {
-#if DCHECK_IS_ON()
+  #ifndef NDEBUG
     DCHECK(in_dtor_) << "RefCounted object deleted without calling Release()";
-#endif
+  #endif
   }
 
 
@@ -48,9 +50,9 @@
     // Current thread books the critical section "AddRelease"
     // without release it.
     // DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
-#if DCHECK_IS_ON()
+  #ifndef NDEBUG
     DCHECK(!in_dtor_);
-#endif
+  #endif
     ++ref_count_;
   }
 
@@ -60,21 +62,21 @@
     // Current thread books the critical section "AddRelease"
     // without release it.
     // DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
-#if DCHECK_IS_ON()
+  #ifndef NDEBUG
     DCHECK(!in_dtor_);
-#endif
+  #endif
     if (--ref_count_ == 0) {
-#if DCHECK_IS_ON()
+  #ifndef NDEBUG
       in_dtor_ = true;
-#endif
+  #endif
       return true;
     }
     return false;
   }
 
  private:
-  mutable size_t ref_count_;
-#if DCHECK_IS_ON()
+  mutable int ref_count_;
+#ifndef NDEBUG
   mutable bool in_dtor_;
 #endif
 
@@ -97,9 +99,9 @@
   bool Release() const;
 
  private:
-  mutable AtomicRefCount ref_count_ = 0;
-#if DCHECK_IS_ON()
-  mutable bool in_dtor_ = false;
+  mutable AtomicRefCount ref_count_;
+#ifndef NDEBUG
+  mutable bool in_dtor_;
 #endif
 
   DISALLOW_COPY_AND_ASSIGN(RefCountedThreadSafeBase);
@@ -124,7 +126,7 @@
 template <class T>
 class RefCounted : public subtle::RefCountedBase {
  public:
-  RefCounted() = default;
+  RefCounted() {}
 
   void AddRef() const {
     subtle::RefCountedBase::AddRef();
@@ -137,7 +139,7 @@
   }
 
  protected:
-  ~RefCounted() = default;
+  ~RefCounted() {}
 
  private:
   DISALLOW_COPY_AND_ASSIGN(RefCounted<T>);
@@ -174,7 +176,7 @@
 template <class T, typename Traits = DefaultRefCountedThreadSafeTraits<T> >
 class RefCountedThreadSafe : public subtle::RefCountedThreadSafeBase {
  public:
-  RefCountedThreadSafe() = default;
+  RefCountedThreadSafe() {}
 
   void AddRef() const {
     subtle::RefCountedThreadSafeBase::AddRef();
@@ -187,7 +189,7 @@
   }
 
  protected:
-  ~RefCountedThreadSafe() = default;
+  ~RefCountedThreadSafe() {}
 
  private:
   friend struct DefaultRefCountedThreadSafeTraits<T>;
@@ -211,7 +213,7 @@
 
  private:
   friend class base::RefCountedThreadSafe<base::RefCountedData<T> >;
-  ~RefCountedData() = default;
+  ~RefCountedData() {}
 };
 
 }  // namespace base
@@ -224,9 +226,6 @@
 //
 //   class MyFoo : public RefCounted<MyFoo> {
 //    ...
-//    private:
-//     friend class RefCounted<MyFoo>;  // Allow destruction by RefCounted<>.
-//     ~MyFoo();                        // Destructor must be private/protected.
 //   };
 //
 //   void some_function() {
@@ -238,7 +237,7 @@
 //   void some_other_function() {
 //     scoped_refptr<MyFoo> foo = new MyFoo();
 //     ...
-//     foo = nullptr;  // explicitly releases |foo|
+//     foo = NULL;  // explicitly releases |foo|
 //     ...
 //     if (foo)
 //       foo->Method(param);
@@ -253,7 +252,7 @@
 //     scoped_refptr<MyFoo> b;
 //
 //     b.swap(a);
-//     // now, |b| references the MyFoo object, and |a| references nullptr.
+//     // now, |b| references the MyFoo object, and |a| references NULL.
 //   }
 //
 // To make both |a| and |b| in the above example reference the same MyFoo
@@ -272,7 +271,8 @@
  public:
   typedef T element_type;
 
-  scoped_refptr() {}
+  scoped_refptr() : ptr_(NULL) {
+  }
 
   scoped_refptr(T* p) : ptr_(p) {
     if (ptr_)
@@ -314,12 +314,12 @@
   T* get() const { return ptr_; }
 
   T& operator*() const {
-    assert(ptr_ != nullptr);
+    assert(ptr_ != NULL);
     return *ptr_;
   }
 
   T* operator->() const {
-    assert(ptr_ != nullptr);
+    assert(ptr_ != NULL);
     return ptr_;
   }
 
@@ -382,7 +382,7 @@
   }
 
  protected:
-  T* ptr_ = nullptr;
+  T* ptr_;
 
  private:
   // Friend required for move constructors that set r.ptr_ to null.
@@ -397,13 +397,11 @@
   static void Release(T* ptr);
 };
 
-// static
 template <typename T>
 void scoped_refptr<T>::AddRef(T* ptr) {
   ptr->AddRef();
 }
 
-// static
 template <typename T>
 void scoped_refptr<T>::Release(T* ptr) {
   ptr->Release();
diff --git a/base/memory/ref_counted_delete_on_message_loop.h b/base/memory/ref_counted_delete_on_message_loop.h
new file mode 100644
index 0000000..de194e8
--- /dev/null
+++ b/base/memory/ref_counted_delete_on_message_loop.h
@@ -0,0 +1,75 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_REF_COUNTED_DELETE_ON_MESSAGE_LOOP_H_
+#define BASE_MEMORY_REF_COUNTED_DELETE_ON_MESSAGE_LOOP_H_
+
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/single_thread_task_runner.h"
+
+namespace base {
+
+// RefCountedDeleteOnMessageLoop is similar to RefCountedThreadSafe, and ensures
+// that the object will be deleted on a specified message loop.
+//
+// Sample usage:
+// class Foo : public RefCountedDeleteOnMessageLoop<Foo> {
+//
+//   Foo(scoped_refptr<SingleThreadTaskRunner> loop)
+//       : RefCountedDeleteOnMessageLoop<Foo>(std::move(loop)) {}
+//   ...
+//  private:
+//   friend class RefCountedDeleteOnMessageLoop<Foo>;
+//   friend class DeleteHelper<Foo>;
+//
+//   ~Foo();
+// };
+
+// TODO(skyostil): Rename this to RefCountedDeleteOnTaskRunner.
+template <class T>
+class RefCountedDeleteOnMessageLoop : public subtle::RefCountedThreadSafeBase {
+ public:
+  // This constructor will accept a MessageL00pProxy object, but new code should
+  // prefer a SingleThreadTaskRunner. A SingleThreadTaskRunner for the
+  // MessageLoop on the current thread can be acquired by calling
+  // MessageLoop::current()->task_runner().
+  RefCountedDeleteOnMessageLoop(
+      scoped_refptr<SingleThreadTaskRunner> task_runner)
+      : task_runner_(std::move(task_runner)) {
+    DCHECK(task_runner_);
+  }
+
+  void AddRef() const {
+    subtle::RefCountedThreadSafeBase::AddRef();
+  }
+
+  void Release() const {
+    if (subtle::RefCountedThreadSafeBase::Release())
+      DestructOnMessageLoop();
+  }
+
+ protected:
+  friend class DeleteHelper<RefCountedDeleteOnMessageLoop>;
+  ~RefCountedDeleteOnMessageLoop() {}
+
+  void DestructOnMessageLoop() const {
+    const T* t = static_cast<const T*>(this);
+    if (task_runner_->BelongsToCurrentThread())
+      delete t;
+    else
+      task_runner_->DeleteSoon(FROM_HERE, t);
+  }
+
+  scoped_refptr<SingleThreadTaskRunner> task_runner_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(RefCountedDeleteOnMessageLoop);
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_REF_COUNTED_DELETE_ON_MESSAGE_LOOP_H_
diff --git a/base/memory/ref_counted_unittest.cc b/base/memory/ref_counted_unittest.cc
index 65c15d2..7c4e07a 100644
--- a/base/memory/ref_counted_unittest.cc
+++ b/base/memory/ref_counted_unittest.cc
@@ -4,8 +4,6 @@
 
 #include "base/memory/ref_counted.h"
 
-#include <utility>
-
 #include "base/test/opaque_ref_counted.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -158,34 +156,13 @@
 }
 
 TEST(RefCountedUnitTest, ScopedRefPtrToOpaque) {
-  scoped_refptr<base::OpaqueRefCounted> initial = base::MakeOpaqueRefCounted();
-  base::TestOpaqueRefCounted(initial);
+  scoped_refptr<base::OpaqueRefCounted> p = base::MakeOpaqueRefCounted();
+  base::TestOpaqueRefCounted(p);
 
-  scoped_refptr<base::OpaqueRefCounted> assigned;
-  assigned = initial;
-
-  scoped_refptr<base::OpaqueRefCounted> copied(initial);
-
-  scoped_refptr<base::OpaqueRefCounted> moved(std::move(initial));
-
-  scoped_refptr<base::OpaqueRefCounted> move_assigned;
-  move_assigned = std::move(moved);
-}
-
-TEST(RefCountedUnitTest, ScopedRefPtrToOpaqueThreadSafe) {
-  scoped_refptr<base::OpaqueRefCountedThreadSafe> initial =
-      base::MakeOpaqueRefCountedThreadSafe();
-  base::TestOpaqueRefCountedThreadSafe(initial);
-
-  scoped_refptr<base::OpaqueRefCountedThreadSafe> assigned;
-  assigned = initial;
-
-  scoped_refptr<base::OpaqueRefCountedThreadSafe> copied(initial);
-
-  scoped_refptr<base::OpaqueRefCountedThreadSafe> moved(std::move(initial));
-
-  scoped_refptr<base::OpaqueRefCountedThreadSafe> move_assigned;
-  move_assigned = std::move(moved);
+  scoped_refptr<base::OpaqueRefCounted> q;
+  q = p;
+  base::TestOpaqueRefCounted(p);
+  base::TestOpaqueRefCounted(q);
 }
 
 TEST(RefCountedUnitTest, BooleanTesting) {
diff --git a/base/memory/scoped_vector.h b/base/memory/scoped_vector.h
index a320b1e..f3581ea 100644
--- a/base/memory/scoped_vector.h
+++ b/base/memory/scoped_vector.h
@@ -12,6 +12,7 @@
 
 #include "base/logging.h"
 #include "base/macros.h"
+#include "base/stl_util.h"
 
 // ScopedVector wraps a vector deleting the elements from its
 // destructor.
@@ -87,10 +88,8 @@
 
   // Resize, deleting elements in the disappearing range if we are shrinking.
   void resize(size_t new_size) {
-    if (v_.size() > new_size) {
-      for (auto it = v_.begin() + new_size; it != v_.end(); ++it)
-        delete *it;
-    }
+    if (v_.size() > new_size)
+      STLDeleteContainerPointers(v_.begin() + new_size, v_.end());
     v_.resize(new_size);
   }
 
@@ -99,11 +98,7 @@
     v_.assign(begin, end);
   }
 
-  void clear() {
-    for (auto* item : *this)
-      delete item;
-    v_.clear();
-  }
+  void clear() { STLDeleteElements(&v_); }
 
   // Like |clear()|, but doesn't delete any elements.
   void weak_clear() { v_.clear(); }
@@ -129,8 +124,7 @@
   }
 
   iterator erase(iterator first, iterator last) {
-    for (auto it = first; it != last; ++it)
-      delete *it;
+    STLDeleteContainerPointers(first, last);
     return v_.erase(first, last);
   }
 
diff --git a/base/memory/scoped_vector_unittest.cc b/base/memory/scoped_vector_unittest.cc
index 916dab9..ea3dcdc 100644
--- a/base/memory/scoped_vector_unittest.cc
+++ b/base/memory/scoped_vector_unittest.cc
@@ -322,7 +322,7 @@
     EXPECT_EQ(LC_CONSTRUCTED, it->life_cycle_state());
 }
 
-// Assertions for push_back(unique_ptr).
+// Assertions for push_back(scoped_ptr).
 TEST(ScopedVectorTest, PushBackScopedPtr) {
   int delete_counter = 0;
   std::unique_ptr<DeleteCounter> elem(new DeleteCounter(&delete_counter));
diff --git a/base/memory/shared_memory.h b/base/memory/shared_memory.h
index 4b66cc6..e1c9fa7 100644
--- a/base/memory/shared_memory.h
+++ b/base/memory/shared_memory.h
@@ -34,32 +34,31 @@
 
 // Options for creating a shared memory object.
 struct BASE_EXPORT SharedMemoryCreateOptions {
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-  // The type of OS primitive that should back the SharedMemory object.
-  SharedMemoryHandle::Type type = SharedMemoryHandle::MACH;
-#else
+  SharedMemoryCreateOptions();
+
+#if !(defined(OS_MACOSX) && !defined(OS_IOS))
   // DEPRECATED (crbug.com/345734):
   // If NULL, the object is anonymous.  This pointer is owned by the caller
   // and must live through the call to Create().
-  const std::string* name_deprecated = nullptr;
+  const std::string* name_deprecated;
 
   // DEPRECATED (crbug.com/345734):
   // If true, and the shared memory already exists, Create() will open the
   // existing shared memory and ignore the size parameter.  If false,
   // shared memory must not exist.  This flag is meaningless unless
   // name_deprecated is non-NULL.
-  bool open_existing_deprecated = false;
-#endif  // defined(OS_MACOSX) && !defined(OS_IOS)
+  bool open_existing_deprecated;
+#endif  // !(defined(OS_MACOSX) && !defined(OS_IOS))
 
   // Size of the shared memory object to be created.
   // When opening an existing object, this has no effect.
-  size_t size = 0;
+  size_t size;
 
   // If true, mappings might need to be made executable later.
-  bool executable = false;
+  bool executable;
 
   // If true, the file can be shared read-only to a process.
-  bool share_read_only = false;
+  bool share_read_only;
 };
 
 // Platform abstraction for shared memory.  Provides a C++ wrapper
@@ -104,7 +103,7 @@
   // The caller is responsible for destroying the duplicated OS primitive.
   static SharedMemoryHandle DuplicateHandle(const SharedMemoryHandle& handle);
 
-#if defined(OS_POSIX)
+#if defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
   // This method requires that the SharedMemoryHandle is backed by a POSIX fd.
   static int GetFdFromSharedMemoryHandle(const SharedMemoryHandle& handle);
 #endif
@@ -195,13 +194,6 @@
   // identifier is not portable.
   SharedMemoryHandle handle() const;
 
-  // Returns the underlying OS handle for this segment. The caller also gets
-  // ownership of the handle. This is logically equivalent to:
-  //   SharedMemoryHandle dup = DuplicateHandle(handle());
-  //   Close();
-  //   return dup;
-  SharedMemoryHandle TakeHandle();
-
   // Closes the open shared memory segment. The memory will remain mapped if
   // it was previously mapped.
   // It is safe to call Close repeatedly.
@@ -255,36 +247,16 @@
     return ShareToProcessCommon(process, new_handle, true, SHARE_CURRENT_MODE);
   }
 
-#if defined(OS_POSIX) && (!defined(OS_MACOSX) || defined(OS_IOS)) && \
-    !defined(OS_NACL)
-  using UniqueId = std::pair<dev_t, ino_t>;
-
-  struct UniqueIdHash {
-    size_t operator()(const UniqueId& id) const {
-      return HashInts(id.first, id.second);
-    }
-  };
-
-  // Returns a unique ID for this shared memory's handle. Note this function may
-  // access file system and be slow.
-  bool GetUniqueId(UniqueId* id) const;
-#endif
-
  private:
 #if defined(OS_POSIX) && !defined(OS_NACL) && !defined(OS_ANDROID) && \
-    (!defined(OS_MACOSX) || defined(OS_IOS))
+    !(defined(OS_MACOSX) && !defined(OS_IOS))
+  bool PrepareMapFile(ScopedFILE fp, ScopedFD readonly);
   bool FilePathForMemoryName(const std::string& mem_name, FilePath* path);
 #endif
-
   enum ShareMode {
     SHARE_READONLY,
     SHARE_CURRENT_MODE,
   };
-
-#if defined(OS_MACOSX)
-  bool Share(SharedMemoryHandle* new_handle, ShareMode share_mode);
-#endif
-
   bool ShareToProcessCommon(ProcessHandle process,
                             SharedMemoryHandle* new_handle,
                             bool close_self,
@@ -299,12 +271,6 @@
 #elif defined(OS_MACOSX) && !defined(OS_IOS)
   // The OS primitive that backs the shared memory region.
   SharedMemoryHandle shm_;
-
-  // The mechanism by which the memory is mapped. Only valid if |memory_| is not
-  // |nullptr|.
-  SharedMemoryHandle::Type mapped_memory_mechanism_;
-
-  int readonly_mapped_file_;
 #elif defined(OS_POSIX)
   int                mapped_file_;
   int                readonly_mapped_file_;
@@ -316,7 +282,6 @@
 
   DISALLOW_COPY_AND_ASSIGN(SharedMemory);
 };
-
 }  // namespace base
 
 #endif  // BASE_MEMORY_SHARED_MEMORY_H_
diff --git a/base/memory/shared_memory_handle.h b/base/memory/shared_memory_handle.h
index dc33eea..8eff26b 100644
--- a/base/memory/shared_memory_handle.h
+++ b/base/memory/shared_memory_handle.h
@@ -15,7 +15,6 @@
 #elif defined(OS_MACOSX) && !defined(OS_IOS)
 #include <mach/mach.h>
 #include "base/base_export.h"
-#include "base/file_descriptor_posix.h"
 #include "base/macros.h"
 #include "base/process/process_handle.h"
 #elif defined(OS_POSIX)
@@ -25,6 +24,8 @@
 
 namespace base {
 
+class Pickle;
+
 // SharedMemoryHandle is a platform specific type which represents
 // the underlying OS handle to a shared memory segment.
 #if defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
@@ -84,25 +85,9 @@
 #else
 class BASE_EXPORT SharedMemoryHandle {
  public:
-  enum Type {
-    // The SharedMemoryHandle is backed by a POSIX fd.
-    POSIX,
-    // The SharedMemoryHandle is backed by the Mach primitive "memory object".
-    MACH,
-  };
-
   // The default constructor returns an invalid SharedMemoryHandle.
   SharedMemoryHandle();
 
-  // Constructs a SharedMemoryHandle backed by the components of a
-  // FileDescriptor. The newly created instance has the same ownership semantics
-  // as base::FileDescriptor. This typically means that the SharedMemoryHandle
-  // takes ownership of the |fd| if |auto_close| is true. Unfortunately, it's
-  // common for existing code to make shallow copies of SharedMemoryHandle, and
-  // the one that is finally passed into a base::SharedMemory is the one that
-  // "consumes" the fd.
-  explicit SharedMemoryHandle(const base::FileDescriptor& file_descriptor);
-
   // Makes a Mach-based SharedMemoryHandle of the given size. On error,
   // subsequent calls to IsValid() return false.
   explicit SharedMemoryHandle(mach_vm_size_t size);
@@ -137,7 +122,7 @@
   mach_port_t GetMemoryObject() const;
 
   // Returns false on a failure to determine the size. On success, populates the
-  // output variable |size|.
+  // output variable |size|. Returns 0 if the handle is invalid.
   bool GetSize(size_t* size) const;
 
   // The SharedMemoryHandle must be valid.
@@ -153,36 +138,24 @@
   bool OwnershipPassesToIPC() const;
 
  private:
-  friend class SharedMemory;
-
   // Shared code between copy constructor and operator=.
   void CopyRelevantData(const SharedMemoryHandle& handle);
 
-  Type type_;
+  mach_port_t memory_object_ = MACH_PORT_NULL;
 
-  // Each instance of a SharedMemoryHandle is backed either by a POSIX fd or a
-  // mach port. |type_| determines the backing member.
-  union {
-    FileDescriptor file_descriptor_;
+  // The size of the shared memory region when |type_| is MACH. Only
+  // relevant if |memory_object_| is not |MACH_PORT_NULL|.
+  mach_vm_size_t size_ = 0;
 
-    struct {
-      mach_port_t memory_object_;
+  // The pid of the process in which |memory_object_| is usable. Only
+  // relevant if |memory_object_| is not |MACH_PORT_NULL|.
+  base::ProcessId pid_ = 0;
 
-      // The size of the shared memory region when |type_| is MACH. Only
-      // relevant if |memory_object_| is not |MACH_PORT_NULL|.
-      mach_vm_size_t size_;
-
-      // The pid of the process in which |memory_object_| is usable. Only
-      // relevant if |memory_object_| is not |MACH_PORT_NULL|.
-      base::ProcessId pid_;
-
-      // Whether passing this object as a parameter to an IPC message passes
-      // ownership of |memory_object_| to the IPC stack. This is meant to mimic
-      // the behavior of the |auto_close| parameter of FileDescriptor.
-      // Defaults to |false|.
-      bool ownership_passes_to_ipc_;
-    };
-  };
+  // Whether passing this object as a parameter to an IPC message passes
+  // ownership of |memory_object_| to the IPC stack. This is meant to mimic
+  // the behavior of the |auto_close| parameter of FileDescriptor.
+  // Defaults to |false|.
+  bool ownership_passes_to_ipc_ = false;
 };
 #endif
 
diff --git a/base/memory/shared_memory_handle_mac.cc b/base/memory/shared_memory_handle_mac.cc
index 9dfd3c1..ad470be 100644
--- a/base/memory/shared_memory_handle_mac.cc
+++ b/base/memory/shared_memory_handle_mac.cc
@@ -10,20 +10,13 @@
 #include <unistd.h>
 
 #include "base/mac/mac_util.h"
-#include "base/mac/mach_logging.h"
 #include "base/posix/eintr_wrapper.h"
 
 namespace base {
 
-SharedMemoryHandle::SharedMemoryHandle()
-    : type_(MACH), memory_object_(MACH_PORT_NULL) {}
-
-SharedMemoryHandle::SharedMemoryHandle(
-    const base::FileDescriptor& file_descriptor)
-    : type_(POSIX), file_descriptor_(file_descriptor) {}
+SharedMemoryHandle::SharedMemoryHandle() {}
 
 SharedMemoryHandle::SharedMemoryHandle(mach_vm_size_t size) {
-  type_ = MACH;
   mach_port_t named_right;
   kern_return_t kr = mach_make_memory_entry_64(
       mach_task_self(),
@@ -46,8 +39,7 @@
 SharedMemoryHandle::SharedMemoryHandle(mach_port_t memory_object,
                                        mach_vm_size_t size,
                                        base::ProcessId pid)
-    : type_(MACH),
-      memory_object_(memory_object),
+    : memory_object_(memory_object),
       size_(size),
       pid_(pid),
       ownership_passes_to_ipc_(false) {}
@@ -61,50 +53,29 @@
   if (this == &handle)
     return *this;
 
-  type_ = handle.type_;
   CopyRelevantData(handle);
   return *this;
 }
 
 SharedMemoryHandle SharedMemoryHandle::Duplicate() const {
-  switch (type_) {
-    case POSIX: {
-      if (!IsValid())
-        return SharedMemoryHandle();
-      int duped_fd = HANDLE_EINTR(dup(file_descriptor_.fd));
-      if (duped_fd < 0)
-        return SharedMemoryHandle();
-      return SharedMemoryHandle(FileDescriptor(duped_fd, true));
-    }
-    case MACH: {
-      if (!IsValid())
-        return SharedMemoryHandle(MACH_PORT_NULL, 0, 0);
+  if (!IsValid())
+    return SharedMemoryHandle(MACH_PORT_NULL, 0, 0);
 
-      // Increment the ref count.
-      kern_return_t kr = mach_port_mod_refs(mach_task_self(), memory_object_,
-                                            MACH_PORT_RIGHT_SEND, 1);
-      DCHECK_EQ(kr, KERN_SUCCESS);
-      SharedMemoryHandle handle(*this);
-      handle.SetOwnershipPassesToIPC(true);
-      return handle;
-    }
-  }
+  // Increment the ref count.
+  kern_return_t kr = mach_port_mod_refs(mach_task_self(), memory_object_,
+                                        MACH_PORT_RIGHT_SEND, 1);
+  DCHECK_EQ(kr, KERN_SUCCESS);
+  SharedMemoryHandle handle(*this);
+  handle.SetOwnershipPassesToIPC(true);
+  return handle;
 }
 
 bool SharedMemoryHandle::operator==(const SharedMemoryHandle& handle) const {
   if (!IsValid() && !handle.IsValid())
     return true;
 
-  if (type_ != handle.type_)
-    return false;
-
-  switch (type_) {
-    case POSIX:
-      return file_descriptor_.fd == handle.file_descriptor_.fd;
-    case MACH:
-      return memory_object_ == handle.memory_object_ && size_ == handle.size_ &&
-             pid_ == handle.pid_;
-  }
+  return memory_object_ == handle.memory_object_ && size_ == handle.size_ &&
+         pid_ == handle.pid_;
 }
 
 bool SharedMemoryHandle::operator!=(const SharedMemoryHandle& handle) const {
@@ -112,16 +83,10 @@
 }
 
 bool SharedMemoryHandle::IsValid() const {
-  switch (type_) {
-    case POSIX:
-      return file_descriptor_.fd >= 0;
-    case MACH:
-      return memory_object_ != MACH_PORT_NULL;
-  }
+  return memory_object_ != MACH_PORT_NULL;
 }
 
 mach_port_t SharedMemoryHandle::GetMemoryObject() const {
-  DCHECK_EQ(type_, MACH);
   return memory_object_;
 }
 
@@ -131,19 +96,8 @@
     return true;
   }
 
-  switch (type_) {
-    case SharedMemoryHandle::POSIX:
-      struct stat st;
-      if (fstat(file_descriptor_.fd, &st) != 0)
-        return false;
-      if (st.st_size < 0)
-        return false;
-      *size = st.st_size;
-      return true;
-    case SharedMemoryHandle::MACH:
-      *size = size_;
-      return true;
-  }
+  *size = size_;
+  return true;
 }
 
 bool SharedMemoryHandle::MapAt(off_t offset,
@@ -151,69 +105,42 @@
                                void** memory,
                                bool read_only) {
   DCHECK(IsValid());
-  switch (type_) {
-    case SharedMemoryHandle::POSIX:
-      *memory = mmap(nullptr, bytes, PROT_READ | (read_only ? 0 : PROT_WRITE),
-                     MAP_SHARED, file_descriptor_.fd, offset);
-      return *memory != MAP_FAILED;
-    case SharedMemoryHandle::MACH:
-      DCHECK_EQ(pid_, GetCurrentProcId());
-      kern_return_t kr = mach_vm_map(
-          mach_task_self(),
-          reinterpret_cast<mach_vm_address_t*>(memory),    // Output parameter
-          bytes,
-          0,                                               // Alignment mask
-          VM_FLAGS_ANYWHERE,
-          memory_object_,
-          offset,
-          FALSE,                                           // Copy
-          VM_PROT_READ | (read_only ? 0 : VM_PROT_WRITE),  // Current protection
-          VM_PROT_WRITE | VM_PROT_READ | VM_PROT_IS_MASK,  // Maximum protection
-          VM_INHERIT_NONE);
-      return kr == KERN_SUCCESS;
-  }
+  DCHECK_EQ(pid_, GetCurrentProcId());
+  kern_return_t kr = mach_vm_map(
+      mach_task_self(),
+      reinterpret_cast<mach_vm_address_t*>(memory),  // Output parameter
+      bytes,
+      0,  // Alignment mask
+      VM_FLAGS_ANYWHERE, memory_object_, offset,
+      FALSE,                                           // Copy
+      VM_PROT_READ | (read_only ? 0 : VM_PROT_WRITE),  // Current protection
+      VM_PROT_WRITE | VM_PROT_READ | VM_PROT_IS_MASK,  // Maximum protection
+      VM_INHERIT_NONE);
+  return kr == KERN_SUCCESS;
 }
 
 void SharedMemoryHandle::Close() const {
   if (!IsValid())
     return;
 
-  switch (type_) {
-    case POSIX:
-      if (IGNORE_EINTR(close(file_descriptor_.fd)) < 0)
-        DPLOG(ERROR) << "Error closing fd";
-      break;
-    case MACH:
-      kern_return_t kr = mach_port_deallocate(mach_task_self(), memory_object_);
-      if (kr != KERN_SUCCESS)
-        MACH_DLOG(ERROR, kr) << "Error deallocating mach port";
-      break;
-  }
+  kern_return_t kr = mach_port_deallocate(mach_task_self(), memory_object_);
+  if (kr != KERN_SUCCESS)
+    DPLOG(ERROR) << "Error deallocating mach port: " << kr;
 }
 
 void SharedMemoryHandle::SetOwnershipPassesToIPC(bool ownership_passes) {
-  DCHECK_EQ(type_, MACH);
   ownership_passes_to_ipc_ = ownership_passes;
 }
 
 bool SharedMemoryHandle::OwnershipPassesToIPC() const {
-  DCHECK_EQ(type_, MACH);
   return ownership_passes_to_ipc_;
 }
 
 void SharedMemoryHandle::CopyRelevantData(const SharedMemoryHandle& handle) {
-  type_ = handle.type_;
-  switch (type_) {
-    case POSIX:
-      file_descriptor_ = handle.file_descriptor_;
-      break;
-    case MACH:
-      memory_object_ = handle.memory_object_;
-      size_ = handle.size_;
-      pid_ = handle.pid_;
-      ownership_passes_to_ipc_ = handle.ownership_passes_to_ipc_;
-      break;
-  }
+  memory_object_ = handle.memory_object_;
+  size_ = handle.size_;
+  pid_ = handle.pid_;
+  ownership_passes_to_ipc_ = handle.ownership_passes_to_ipc_;
 }
 
 }  // namespace base
diff --git a/base/memory/shared_memory_helper.cc b/base/memory/shared_memory_helper.cc
deleted file mode 100644
index 7fbfb7a..0000000
--- a/base/memory/shared_memory_helper.cc
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/shared_memory_helper.h"
-
-#include "base/threading/thread_restrictions.h"
-
-namespace base {
-
-struct ScopedPathUnlinkerTraits {
-  static const FilePath* InvalidValue() { return nullptr; }
-
-  static void Free(const FilePath* path) {
-    if (unlink(path->value().c_str()))
-      PLOG(WARNING) << "unlink";
-  }
-};
-
-// Unlinks the FilePath when the object is destroyed.
-using ScopedPathUnlinker =
-    ScopedGeneric<const FilePath*, ScopedPathUnlinkerTraits>;
-
-#if !defined(OS_ANDROID)
-bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
-                                 ScopedFILE* fp,
-                                 ScopedFD* readonly_fd,
-                                 FilePath* path) {
-#if !(defined(OS_MACOSX) && !defined(OS_IOS))
-  // It doesn't make sense to have a open-existing private piece of shmem
-  DCHECK(!options.open_existing_deprecated);
-#endif  // !(defined(OS_MACOSX) && !defined(OS_IOS)
-  // Q: Why not use the shm_open() etc. APIs?
-  // A: Because they're limited to 4mb on OS X.  FFFFFFFUUUUUUUUUUU
-  FilePath directory;
-  ScopedPathUnlinker path_unlinker;
-  if (!GetShmemTempDir(options.executable, &directory))
-    return false;
-
-  fp->reset(base::CreateAndOpenTemporaryFileInDir(directory, path));
-
-  if (!*fp)
-    return false;
-
-  // Deleting the file prevents anyone else from mapping it in (making it
-  // private), and prevents the need for cleanup (once the last fd is
-  // closed, it is truly freed).
-  path_unlinker.reset(path);
-
-  if (options.share_read_only) {
-    // Also open as readonly so that we can ShareReadOnlyToProcess.
-    readonly_fd->reset(HANDLE_EINTR(open(path->value().c_str(), O_RDONLY)));
-    if (!readonly_fd->is_valid()) {
-      DPLOG(ERROR) << "open(\"" << path->value() << "\", O_RDONLY) failed";
-      fp->reset();
-      return false;
-    }
-  }
-  return true;
-}
-
-bool PrepareMapFile(ScopedFILE fp, ScopedFD readonly_fd, int* mapped_file,
-                    int* readonly_mapped_file) {
-  DCHECK_EQ(-1, *mapped_file);
-  DCHECK_EQ(-1, *readonly_mapped_file);
-  if (fp == NULL)
-    return false;
-
-  // This function theoretically can block on the disk, but realistically
-  // the temporary files we create will just go into the buffer cache
-  // and be deleted before they ever make it out to disk.
-  base::ThreadRestrictions::ScopedAllowIO allow_io;
-
-  if (readonly_fd.is_valid()) {
-    struct stat st = {};
-    if (fstat(fileno(fp.get()), &st))
-      NOTREACHED();
-
-    struct stat readonly_st = {};
-    if (fstat(readonly_fd.get(), &readonly_st))
-      NOTREACHED();
-    if (st.st_dev != readonly_st.st_dev || st.st_ino != readonly_st.st_ino) {
-      LOG(ERROR) << "writable and read-only inodes don't match; bailing";
-      return false;
-    }
-  }
-
-  *mapped_file = HANDLE_EINTR(dup(fileno(fp.get())));
-  if (*mapped_file == -1) {
-    NOTREACHED() << "Call to dup failed, errno=" << errno;
-  }
-  *readonly_mapped_file = readonly_fd.release();
-
-  return true;
-}
-#endif  // !defined(OS_ANDROID)
-
-}  // namespace base
diff --git a/base/memory/shared_memory_helper.h b/base/memory/shared_memory_helper.h
deleted file mode 100644
index b515828..0000000
--- a/base/memory/shared_memory_helper.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MEMORY_SHARED_MEMORY_HELPER_H_
-#define BASE_MEMORY_SHARED_MEMORY_HELPER_H_
-
-#include "base/memory/shared_memory.h"
-
-#include <fcntl.h>
-
-namespace base {
-
-#if !defined(OS_ANDROID)
-// Makes a temporary file, fdopens it, and then unlinks it. |fp| is populated
-// with the fdopened FILE. |readonly_fd| is populated with the opened fd if
-// options.share_read_only is true. |path| is populated with the location of
-// the file before it was unlinked.
-// Returns false if there's an unhandled failure.
-bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
-                                 ScopedFILE* fp,
-                                 ScopedFD* readonly_fd,
-                                 FilePath* path);
-
-// Takes the outputs of CreateAnonymousSharedMemory and maps them properly to
-// |mapped_file| or |readonly_mapped_file|, depending on which one is populated.
-bool PrepareMapFile(ScopedFILE fp, ScopedFD readonly_fd, int* mapped_file,
-                    int* readonly_mapped_file);
-#endif
-
-}  // namespace base
-
-#endif  // BASE_MEMORY_SHARED_MEMORY_HELPER_H_
diff --git a/base/memory/shared_memory_mac.cc b/base/memory/shared_memory_mac.cc
index d376daa..d15c632 100644
--- a/base/memory/shared_memory_mac.cc
+++ b/base/memory/shared_memory_mac.cc
@@ -4,33 +4,22 @@
 
 #include "base/memory/shared_memory.h"
 
-#include <errno.h>
 #include <mach/mach_vm.h>
-#include <stddef.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <unistd.h>
 
 #include "base/files/file_util.h"
 #include "base/files/scoped_file.h"
 #include "base/logging.h"
+#include "base/mac/foundation_util.h"
 #include "base/mac/mac_util.h"
 #include "base/mac/scoped_mach_vm.h"
-#include "base/memory/shared_memory_helper.h"
 #include "base/metrics/field_trial.h"
 #include "base/metrics/histogram_macros.h"
-#include "base/posix/eintr_wrapper.h"
-#include "base/posix/safe_strerror.h"
 #include "base/process/process_metrics.h"
+#include "base/profiler/scoped_tracker.h"
 #include "base/scoped_generic.h"
 #include "base/strings/utf_string_conversions.h"
-#include "base/threading/thread_restrictions.h"
 #include "build/build_config.h"
 
-#if defined(OS_MACOSX)
-#include "base/mac/foundation_util.h"
-#endif  // OS_MACOSX
-
 namespace base {
 
 namespace {
@@ -78,21 +67,18 @@
   return true;
 }
 
-
 }  // namespace
 
+SharedMemoryCreateOptions::SharedMemoryCreateOptions()
+    : size(0),
+      executable(false),
+      share_read_only(false) {}
+
 SharedMemory::SharedMemory()
-    : mapped_memory_mechanism_(SharedMemoryHandle::MACH),
-      readonly_mapped_file_(-1),
-      mapped_size_(0),
-      memory_(NULL),
-      read_only_(false),
-      requested_size_(0) {}
+    : mapped_size_(0), memory_(NULL), read_only_(false), requested_size_(0) {}
 
 SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
     : shm_(handle),
-      mapped_memory_mechanism_(SharedMemoryHandle::POSIX),
-      readonly_mapped_file_(-1),
       mapped_size_(0),
       memory_(NULL),
       read_only_(read_only),
@@ -120,7 +106,8 @@
 
 // static
 size_t SharedMemory::GetHandleLimit() {
-  return GetMaxFds();
+  // This should be effectively unlimited on OS X.
+  return 10000;
 }
 
 // static
@@ -129,12 +116,6 @@
   return handle.Duplicate();
 }
 
-// static
-int SharedMemory::GetFdFromSharedMemoryHandle(
-    const SharedMemoryHandle& handle) {
-  return handle.file_descriptor_.fd;
-}
-
 bool SharedMemory::CreateAndMapAnonymous(size_t size) {
   return CreateAnonymous(size) && Map(size);
 }
@@ -149,53 +130,20 @@
 // Chromium mostly only uses the unique/private shmem as specified by
 // "name == L"". The exception is in the StatsTable.
 bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
+  // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
+  // is fixed.
+  tracked_objects::ScopedTracker tracking_profile1(
+      FROM_HERE_WITH_EXPLICIT_FUNCTION(
+          "466437 SharedMemory::Create::Start"));
   DCHECK(!shm_.IsValid());
   if (options.size == 0) return false;
 
   if (options.size > static_cast<size_t>(std::numeric_limits<int>::max()))
     return false;
 
-  if (options.type == SharedMemoryHandle::MACH) {
-    shm_ = SharedMemoryHandle(options.size);
-    requested_size_ = options.size;
-    return shm_.IsValid();
-  }
-
-  // This function theoretically can block on the disk. Both profiling of real
-  // users and local instrumentation shows that this is a real problem.
-  // https://code.google.com/p/chromium/issues/detail?id=466437
-  base::ThreadRestrictions::ScopedAllowIO allow_io;
-
-  ScopedFILE fp;
-  ScopedFD readonly_fd;
-
-  FilePath path;
-  bool result = CreateAnonymousSharedMemory(options, &fp, &readonly_fd, &path);
-  if (!result)
-    return false;
-
-  if (!fp) {
-    PLOG(ERROR) << "Creating shared memory in " << path.value() << " failed";
-    return false;
-  }
-
-  // Get current size.
-  struct stat stat;
-  if (fstat(fileno(fp.get()), &stat) != 0)
-    return false;
-  const size_t current_size = stat.st_size;
-  if (current_size != options.size) {
-    if (HANDLE_EINTR(ftruncate(fileno(fp.get()), options.size)) != 0)
-      return false;
-  }
+  shm_ = SharedMemoryHandle(options.size);
   requested_size_ = options.size;
-
-  int mapped_file = -1;
-  result = PrepareMapFile(std::move(fp), std::move(readonly_fd), &mapped_file,
-                          &readonly_mapped_file_);
-
-  shm_ = SharedMemoryHandle(FileDescriptor(mapped_file, false));
-  return result;
+  return shm_.IsValid();
 }
 
 bool SharedMemory::MapAt(off_t offset, size_t bytes) {
@@ -211,7 +159,6 @@
     mapped_size_ = bytes;
     DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
                       (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
-    mapped_memory_mechanism_ = shm_.type_;
   } else {
     memory_ = NULL;
   }
@@ -223,105 +170,48 @@
   if (memory_ == NULL)
     return false;
 
-  switch (mapped_memory_mechanism_) {
-    case SharedMemoryHandle::POSIX:
-      munmap(memory_, mapped_size_);
-      break;
-    case SharedMemoryHandle::MACH:
-      mach_vm_deallocate(mach_task_self(),
-                         reinterpret_cast<mach_vm_address_t>(memory_),
-                         mapped_size_);
-      break;
-  }
-
+  mach_vm_deallocate(mach_task_self(),
+                     reinterpret_cast<mach_vm_address_t>(memory_),
+                     mapped_size_);
   memory_ = NULL;
   mapped_size_ = 0;
   return true;
 }
 
 SharedMemoryHandle SharedMemory::handle() const {
-  switch (shm_.type_) {
-    case SharedMemoryHandle::POSIX:
-      return SharedMemoryHandle(
-          FileDescriptor(shm_.file_descriptor_.fd, false));
-    case SharedMemoryHandle::MACH:
-      return shm_;
-  }
-}
-
-SharedMemoryHandle SharedMemory::TakeHandle() {
-  SharedMemoryHandle dup = DuplicateHandle(handle());
-  Close();
-  return dup;
+  return shm_;
 }
 
 void SharedMemory::Close() {
   shm_.Close();
   shm_ = SharedMemoryHandle();
-  if (shm_.type_ == SharedMemoryHandle::POSIX) {
-    if (readonly_mapped_file_ > 0) {
-      if (IGNORE_EINTR(close(readonly_mapped_file_)) < 0)
-        PLOG(ERROR) << "close";
-      readonly_mapped_file_ = -1;
-    }
-  }
 }
 
-bool SharedMemory::Share(SharedMemoryHandle* new_handle, ShareMode share_mode) {
-  if (shm_.type_ == SharedMemoryHandle::MACH) {
-    DCHECK(shm_.IsValid());
-
-    bool success = false;
-    switch (share_mode) {
-      case SHARE_CURRENT_MODE:
-        *new_handle = shm_.Duplicate();
-        success = true;
-        break;
-      case SHARE_READONLY:
-        success = MakeMachSharedMemoryHandleReadOnly(new_handle, shm_, memory_);
-        break;
-    }
-
-    if (success)
-      new_handle->SetOwnershipPassesToIPC(true);
-
-    return success;
-  }
-
-  int handle_to_dup = -1;
-  switch (share_mode) {
-    case SHARE_CURRENT_MODE:
-      handle_to_dup = shm_.file_descriptor_.fd;
-      break;
-    case SHARE_READONLY:
-      // We could imagine re-opening the file from /dev/fd, but that can't make
-      // it readonly on Mac: https://codereview.chromium.org/27265002/#msg10
-      CHECK_GE(readonly_mapped_file_, 0);
-      handle_to_dup = readonly_mapped_file_;
-      break;
-  }
-
-  const int new_fd = HANDLE_EINTR(dup(handle_to_dup));
-  if (new_fd < 0) {
-    DPLOG(ERROR) << "dup() failed.";
-    return false;
-  }
-
-  new_handle->file_descriptor_.fd = new_fd;
-  new_handle->type_ = SharedMemoryHandle::POSIX;
-
-  return true;
-}
-
-bool SharedMemory::ShareToProcessCommon(ProcessHandle process,
+bool SharedMemory::ShareToProcessCommon(ProcessHandle /*process*/,
                                         SharedMemoryHandle* new_handle,
                                         bool close_self,
                                         ShareMode share_mode) {
-  bool success = Share(new_handle, share_mode);
+  DCHECK(shm_.IsValid());
+
+  bool success = false;
+  switch (share_mode) {
+    case SHARE_CURRENT_MODE:
+      *new_handle = shm_.Duplicate();
+      success = true;
+      break;
+    case SHARE_READONLY:
+      success = MakeMachSharedMemoryHandleReadOnly(new_handle, shm_, memory_);
+      break;
+  }
+
+  if (success)
+    new_handle->SetOwnershipPassesToIPC(true);
+
   if (close_self) {
     Unmap();
     Close();
   }
+
   return success;
 }
 
diff --git a/base/memory/shared_memory_posix.cc b/base/memory/shared_memory_posix.cc
index fb1a343..7e94223 100644
--- a/base/memory/shared_memory_posix.cc
+++ b/base/memory/shared_memory_posix.cc
@@ -14,15 +14,12 @@
 #include "base/files/file_util.h"
 #include "base/files/scoped_file.h"
 #include "base/logging.h"
-#include "base/memory/shared_memory_helper.h"
-#include "base/memory/shared_memory_tracker.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/posix/safe_strerror.h"
 #include "base/process/process_metrics.h"
+#include "base/profiler/scoped_tracker.h"
 #include "base/scoped_generic.h"
 #include "base/strings/utf_string_conversions.h"
-#include "base/threading/thread_restrictions.h"
-#include "base/trace_event/trace_event.h"
 #include "build/build_config.h"
 
 #if defined(OS_ANDROID)
@@ -34,6 +31,84 @@
 
 namespace base {
 
+namespace {
+
+struct ScopedPathUnlinkerTraits {
+  static FilePath* InvalidValue() { return nullptr; }
+
+  static void Free(FilePath* path) {
+    // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
+    // is fixed.
+    tracked_objects::ScopedTracker tracking_profile(
+        FROM_HERE_WITH_EXPLICIT_FUNCTION(
+            "466437 SharedMemory::Create::Unlink"));
+    if (unlink(path->value().c_str()))
+      PLOG(WARNING) << "unlink";
+  }
+};
+
+// Unlinks the FilePath when the object is destroyed.
+typedef ScopedGeneric<FilePath*, ScopedPathUnlinkerTraits> ScopedPathUnlinker;
+
+#if !defined(OS_ANDROID) && !defined(__ANDROID__)
+// Makes a temporary file, fdopens it, and then unlinks it. |fp| is populated
+// with the fdopened FILE. |readonly_fd| is populated with the opened fd if
+// options.share_read_only is true. |path| is populated with the location of
+// the file before it was unlinked.
+// Returns false if there's an unhandled failure.
+bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
+                                 ScopedFILE* fp,
+                                 ScopedFD* readonly_fd,
+                                 FilePath* path) {
+  // It doesn't make sense to have a open-existing private piece of shmem
+  DCHECK(!options.open_existing_deprecated);
+  // Q: Why not use the shm_open() etc. APIs?
+  // A: Because they're limited to 4mb on OS X.  FFFFFFFUUUUUUUUUUU
+  FilePath directory;
+  ScopedPathUnlinker path_unlinker;
+  if (GetShmemTempDir(options.executable, &directory)) {
+    // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
+    // is fixed.
+    tracked_objects::ScopedTracker tracking_profile(
+        FROM_HERE_WITH_EXPLICIT_FUNCTION(
+            "466437 SharedMemory::Create::OpenTemporaryFile"));
+    fp->reset(base::CreateAndOpenTemporaryFileInDir(directory, path));
+
+    // Deleting the file prevents anyone else from mapping it in (making it
+    // private), and prevents the need for cleanup (once the last fd is
+    // closed, it is truly freed).
+    if (*fp)
+      path_unlinker.reset(path);
+  }
+
+  if (*fp) {
+    if (options.share_read_only) {
+      // TODO(erikchen): Remove ScopedTracker below once
+      // http://crbug.com/466437 is fixed.
+      tracked_objects::ScopedTracker tracking_profile(
+          FROM_HERE_WITH_EXPLICIT_FUNCTION(
+              "466437 SharedMemory::Create::OpenReadonly"));
+      // Also open as readonly so that we can ShareReadOnlyToProcess.
+      readonly_fd->reset(HANDLE_EINTR(open(path->value().c_str(), O_RDONLY)));
+      if (!readonly_fd->is_valid()) {
+        DPLOG(ERROR) << "open(\"" << path->value() << "\", O_RDONLY) failed";
+        fp->reset();
+        return false;
+      }
+    }
+  }
+  return true;
+}
+#endif  // !defined(OS_ANDROID) &&  !defined(__ANDROID__)
+}
+
+SharedMemoryCreateOptions::SharedMemoryCreateOptions()
+    : name_deprecated(nullptr),
+      open_existing_deprecated(false),
+      size(0),
+      executable(false),
+      share_read_only(false) {}
+
 SharedMemory::SharedMemory()
     : mapped_file_(-1),
       readonly_mapped_file_(-1),
@@ -119,6 +194,11 @@
 // In case we want to delete it later, it may be useful to save the value
 // of mem_filename after FilePathForMemoryName().
 bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
+  // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
+  // is fixed.
+  tracked_objects::ScopedTracker tracking_profile1(
+      FROM_HERE_WITH_EXPLICIT_FUNCTION(
+          "466437 SharedMemory::Create::Start"));
   DCHECK_EQ(-1, mapped_file_);
   if (options.size == 0) return false;
 
@@ -208,7 +288,7 @@
     }
     requested_size_ = options.size;
   }
-  if (fp == NULL) {
+  if (fp == nullptr) {
     PLOG(ERROR) << "Creating shared memory in " << path.value() << " failed";
     FilePath dir = path.DirName();
     if (access(dir.value().c_str(), W_OK | X_OK) < 0) {
@@ -221,8 +301,7 @@
     return false;
   }
 
-  return PrepareMapFile(std::move(fp), std::move(readonly_fd), &mapped_file_,
-                        &readonly_mapped_file_);
+  return PrepareMapFile(std::move(fp), std::move(readonly_fd));
 }
 
 // Our current implementation of shmem is with mmap()ing of files.
@@ -254,8 +333,7 @@
     DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
     return false;
   }
-  return PrepareMapFile(std::move(fp), std::move(readonly_fd), &mapped_file_,
-                        &readonly_mapped_file_);
+  return PrepareMapFile(std::move(fp), std::move(readonly_fd));
 }
 #endif  // !defined(OS_ANDROID) && !defined(__ANDROID__)
 
@@ -287,10 +365,8 @@
   bool mmap_succeeded = memory_ != (void*)-1 && memory_ != NULL;
   if (mmap_succeeded) {
     mapped_size_ = bytes;
-    DCHECK_EQ(0U,
-              reinterpret_cast<uintptr_t>(memory_) &
-                  (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
-    SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
+    DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
+        (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
   } else {
     memory_ = NULL;
   }
@@ -303,7 +379,6 @@
     return false;
 
   munmap(memory_, mapped_size_);
-  SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
   memory_ = NULL;
   mapped_size_ = 0;
   return true;
@@ -313,14 +388,6 @@
   return FileDescriptor(mapped_file_, false);
 }
 
-SharedMemoryHandle SharedMemory::TakeHandle() {
-  FileDescriptor handle(mapped_file_, true);
-  mapped_file_ = -1;
-  memory_ = nullptr;
-  mapped_size_ = 0;
-  return handle;
-}
-
 void SharedMemory::Close() {
   if (mapped_file_ > 0) {
     if (IGNORE_EINTR(close(mapped_file_)) < 0)
@@ -335,6 +402,44 @@
 }
 
 #if !defined(OS_ANDROID) && !defined(__ANDROID__)
+bool SharedMemory::PrepareMapFile(ScopedFILE fp, ScopedFD readonly_fd) {
+  DCHECK_EQ(-1, mapped_file_);
+  DCHECK_EQ(-1, readonly_mapped_file_);
+  if (fp == nullptr)
+    return false;
+
+  // This function theoretically can block on the disk, but realistically
+  // the temporary files we create will just go into the buffer cache
+  // and be deleted before they ever make it out to disk.
+  base::ThreadRestrictions::ScopedAllowIO allow_io;
+
+  struct stat st = {};
+  if (fstat(fileno(fp.get()), &st))
+    NOTREACHED();
+  if (readonly_fd.is_valid()) {
+    struct stat readonly_st = {};
+    if (fstat(readonly_fd.get(), &readonly_st))
+      NOTREACHED();
+    if (st.st_dev != readonly_st.st_dev || st.st_ino != readonly_st.st_ino) {
+      LOG(ERROR) << "writable and read-only inodes don't match; bailing";
+      return false;
+    }
+  }
+
+  mapped_file_ = HANDLE_EINTR(dup(fileno(fp.get())));
+  if (mapped_file_ == -1) {
+    if (errno == EMFILE) {
+      LOG(WARNING) << "Shared memory creation failed; out of file descriptors";
+      return false;
+    } else {
+      NOTREACHED() << "Call to dup failed, errno=" << errno;
+    }
+  }
+  readonly_mapped_file_ = readonly_fd.release();
+
+  return true;
+}
+
 // For the given shmem named |mem_name|, return a filename to mmap()
 // (and possibly create).  Modifies |filename|.  Return false on
 // error, or true of we are happy.
@@ -397,22 +502,4 @@
   return true;
 }
 
-bool SharedMemory::GetUniqueId(SharedMemory::UniqueId* id) const {
-  // This function is called just after mmap. fstat is a system call that might
-  // cause I/O. It's safe to call fstat here because mmap for shared memory is
-  // called in two cases:
-  // 1) To handle file-mapped memory
-  // 2) To handle annonymous shared memory
-  // In 1), I/O is already permitted. In 2), the backend is on page cache and
-  // fstat doesn't cause I/O access to the disk. See the discussion at
-  // crbug.com/604726#c41.
-  base::ThreadRestrictions::ScopedAllowIO allow_io;
-  struct stat file_stat;
-  if (HANDLE_EINTR(::fstat(static_cast<int>(handle().fd), &file_stat)) != 0)
-    return false;
-  id->first = file_stat.st_dev;
-  id->second = file_stat.st_ino;
-  return true;
-}
-
 }  // namespace base
diff --git a/base/memory/shared_memory_tracker.cc b/base/memory/shared_memory_tracker.cc
deleted file mode 100644
index cfd4c85..0000000
--- a/base/memory/shared_memory_tracker.cc
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/shared_memory_tracker.h"
-
-#include "base/memory/shared_memory.h"
-#include "base/strings/stringprintf.h"
-#include "base/trace_event/memory_dump_manager.h"
-#include "base/trace_event/process_memory_dump.h"
-
-namespace base {
-
-SharedMemoryTracker::Usage::Usage() = default;
-
-SharedMemoryTracker::Usage::Usage(const Usage& rhs) = default;
-
-SharedMemoryTracker::Usage::~Usage() = default;
-
-// static
-SharedMemoryTracker* SharedMemoryTracker::GetInstance() {
-  static SharedMemoryTracker* instance = new SharedMemoryTracker;
-  return instance;
-}
-
-void SharedMemoryTracker::IncrementMemoryUsage(
-    const SharedMemory& shared_memory) {
-  Usage usage;
-  // |shared_memory|'s unique ID must be generated here and it'd be too late at
-  // OnMemoryDump. An ID is generated with a SharedMemoryHandle, but the handle
-  // might already be closed at that time. Now IncrementMemoryUsage is called
-  // just after mmap and the handle must live then. See the discussion at
-  // crbug.com/604726#c30.
-  SharedMemory::UniqueId id;
-  if (!shared_memory.GetUniqueId(&id))
-    return;
-  usage.unique_id = id;
-  usage.size = shared_memory.mapped_size();
-  AutoLock hold(usages_lock_);
-  usages_[&shared_memory] = usage;
-}
-
-void SharedMemoryTracker::DecrementMemoryUsage(
-    const SharedMemory& shared_memory) {
-  AutoLock hold(usages_lock_);
-  usages_.erase(&shared_memory);
-}
-
-bool SharedMemoryTracker::OnMemoryDump(const trace_event::MemoryDumpArgs& args,
-                                       trace_event::ProcessMemoryDump* pmd) {
-  ALLOW_UNUSED_PARAM(args);
-  std::unordered_map<SharedMemory::UniqueId, size_t, SharedMemory::UniqueIdHash>
-      sizes;
-  {
-    AutoLock hold(usages_lock_);
-    for (const auto& usage : usages_)
-      sizes[usage.second.unique_id] += usage.second.size;
-  }
-  for (auto& size : sizes) {
-    const SharedMemory::UniqueId& id = size.first;
-    std::string dump_name = StringPrintf("%s/%lld.%lld", "shared_memory",
-                                         static_cast<long long>(id.first),
-                                         static_cast<long long>(id.second));
-    auto guid = trace_event::MemoryAllocatorDumpGuid(dump_name);
-    trace_event::MemoryAllocatorDump* local_dump =
-        pmd->CreateAllocatorDump(dump_name);
-    // TODO(hajimehoshi): The size is not resident size but virtual size so far.
-    // Fix this to record resident size.
-    local_dump->AddScalar(trace_event::MemoryAllocatorDump::kNameSize,
-                          trace_event::MemoryAllocatorDump::kUnitsBytes,
-                          size.second);
-    trace_event::MemoryAllocatorDump* global_dump =
-        pmd->CreateSharedGlobalAllocatorDump(guid);
-    global_dump->AddScalar(trace_event::MemoryAllocatorDump::kNameSize,
-                           trace_event::MemoryAllocatorDump::kUnitsBytes,
-                           size.second);
-    // TOOD(hajimehoshi): Detect which the shared memory comes from browser,
-    // renderer or GPU process.
-    // TODO(hajimehoshi): Shared memory reported by GPU and discardable is
-    // currently double-counted. Add ownership edges to avoid this.
-    pmd->AddOwnershipEdge(local_dump->guid(), global_dump->guid());
-  }
-  return true;
-}
-
-SharedMemoryTracker::SharedMemoryTracker() {
-  trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
-      this, "SharedMemoryTracker", nullptr);
-}
-
-SharedMemoryTracker::~SharedMemoryTracker() = default;
-
-}  // namespace
diff --git a/base/memory/shared_memory_tracker.h b/base/memory/shared_memory_tracker.h
deleted file mode 100644
index fe1a3dd..0000000
--- a/base/memory/shared_memory_tracker.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MEMORY_SHARED_MEMORY_TRACKER_H_
-#define BASE_MEMORY_SHARED_MEMORY_TRACKER_H_
-
-#include "base/memory/shared_memory.h"
-#include "base/synchronization/lock.h"
-#include "base/trace_event/memory_dump_provider.h"
-
-namespace base {
-
-namespace trace_event {
-class ProcessMemoryDump;
-}
-
-// SharedMemoryTracker tracks shared memory usage.
-class BASE_EXPORT SharedMemoryTracker
-    : public base::trace_event::MemoryDumpProvider {
- public:
-  // Returns a singleton instance.
-  static SharedMemoryTracker* GetInstance();
-
-  // Records shared memory usage on mapping.
-  void IncrementMemoryUsage(const SharedMemory& shared_memory);
-
-  // Records shared memory usage on unmapping.
-  void DecrementMemoryUsage(const SharedMemory& shared_memory);
-
- private:
-  struct Usage {
-    Usage();
-    Usage(const Usage& rhs);
-    ~Usage();
-    SharedMemory::UniqueId unique_id;
-    size_t size;
-  };
-
-  SharedMemoryTracker();
-  ~SharedMemoryTracker() override;
-
-  // base::trace_event::MemoryDumpProvider implementation.
-  bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
-                    base::trace_event::ProcessMemoryDump* pmd) override;
-
-  // Used to lock when |usages_| is modified or read.
-  Lock usages_lock_;
-  std::unordered_map<const SharedMemory*, Usage> usages_;
-
-  DISALLOW_COPY_AND_ASSIGN(SharedMemoryTracker);
-};
-
-}  // namespace base
-
-#endif  // BASE_MEMORY_SHARED_MEMORY_TRACKER_H_
diff --git a/base/memory/shared_memory_unittest.cc b/base/memory/shared_memory_unittest.cc
index 19dedcc..f29865c 100644
--- a/base/memory/shared_memory_unittest.cc
+++ b/base/memory/shared_memory_unittest.cc
@@ -316,6 +316,8 @@
   }
 }
 
+// The Mach functionality is tested in shared_memory_mac_unittest.cc.
+#if !(defined(OS_MACOSX) && !defined(OS_IOS))
 TEST(SharedMemoryTest, ShareReadOnly) {
   StringPiece contents = "Hello World";
 
@@ -323,10 +325,6 @@
   SharedMemoryCreateOptions options;
   options.size = contents.size();
   options.share_read_only = true;
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-  // The Mach functionality is tested in shared_memory_mac_unittest.cc.
-  options.type = SharedMemoryHandle::POSIX;
-#endif
   ASSERT_TRUE(writable_shmem.Create(options));
   ASSERT_TRUE(writable_shmem.Map(options.size));
   memcpy(writable_shmem.memory(), contents.data(), contents.size());
@@ -402,6 +400,7 @@
 #error Unexpected platform; write a test that tries to make 'handle' writable.
 #endif  // defined(OS_POSIX) || defined(OS_WIN)
 }
+#endif  // !(defined(OS_MACOSX) && !defined(OS_IOS))
 
 TEST(SharedMemoryTest, ShareToSelf) {
   StringPiece contents = "Hello World";
@@ -475,7 +474,7 @@
   EXPECT_EQ(old_address, memory.memory());
 }
 
-#if defined(OS_POSIX)
+#if defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
 // This test is not applicable for iOS (crbug.com/399384).
 #if !defined(OS_IOS)
 // Create a shared memory object, mmap it, and mprotect it to PROT_EXEC.
@@ -486,10 +485,6 @@
   SharedMemoryCreateOptions options;
   options.size = kTestSize;
   options.executable = true;
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-  // The Mach functionality is tested in shared_memory_mac_unittest.cc.
-  options.type = SharedMemoryHandle::POSIX;
-#endif
 
   EXPECT_TRUE(shared_memory.Create(options));
   EXPECT_TRUE(shared_memory.Map(shared_memory.requested_size()));
@@ -523,10 +518,6 @@
   SharedMemory shared_memory;
   SharedMemoryCreateOptions options;
   options.size = kTestSize;
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-  // The Mach functionality is tested in shared_memory_mac_unittest.cc.
-  options.type = SharedMemoryHandle::POSIX;
-#endif
   // Set a file mode creation mask that gives all permissions.
   ScopedUmaskSetter permissive_mask(S_IWGRP | S_IWOTH);
 
@@ -549,10 +540,6 @@
   SharedMemory shared_memory;
   SharedMemoryCreateOptions options;
   options.size = kTestSize;
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-  // The Mach functionality is tested in shared_memory_mac_unittest.cc.
-  options.type = SharedMemoryHandle::POSIX;
-#endif
 
   // Set a file mode creation mask that gives all permissions.
   ScopedUmaskSetter permissive_mask(S_IWGRP | S_IWOTH);
@@ -569,7 +556,7 @@
 }
 #endif  // !defined(OS_ANDROID)
 
-#endif  // defined(OS_POSIX)
+#endif  // defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
 
 // Map() will return addresses which are aligned to the platform page size, this
 // varies from platform to platform though.  Since we'd like to advertise a
diff --git a/base/memory/singleton.h b/base/memory/singleton.h
index 5c58d5f..79e4441 100644
--- a/base/memory/singleton.h
+++ b/base/memory/singleton.h
@@ -22,7 +22,6 @@
 #include "base/at_exit.h"
 #include "base/atomicops.h"
 #include "base/base_export.h"
-#include "base/logging.h"
 #include "base/macros.h"
 #include "base/memory/aligned_memory.h"
 #include "base/threading/thread_restrictions.h"
@@ -64,7 +63,7 @@
   // exit. See below for the required call that makes this happen.
   static const bool kRegisterAtExit = true;
 
-#if DCHECK_IS_ON()
+#ifndef NDEBUG
   // Set to false to disallow access on a non-joinable thread.  This is
   // different from kRegisterAtExit because StaticMemorySingletonTraits allows
   // access on non-joinable threads, and gracefully handles this.
@@ -79,7 +78,7 @@
 template<typename Type>
 struct LeakySingletonTraits : public DefaultSingletonTraits<Type> {
   static const bool kRegisterAtExit = false;
-#if DCHECK_IS_ON()
+#ifndef NDEBUG
   static const bool kAllowedToAccessOnNonjoinableThread = true;
 #endif
 };
@@ -153,17 +152,14 @@
 // Example usage:
 //
 // In your header:
-//   namespace base {
-//   template <typename T>
-//   struct DefaultSingletonTraits;
-//   }
+//   template <typename T> struct DefaultSingletonTraits;
 //   class FooClass {
 //    public:
 //     static FooClass* GetInstance();  <-- See comment below on this.
 //     void Bar() { ... }
 //    private:
 //     FooClass() { ... }
-//     friend struct base::DefaultSingletonTraits<FooClass>;
+//     friend struct DefaultSingletonTraits<FooClass>;
 //
 //     DISALLOW_COPY_AND_ASSIGN(FooClass);
 //   };
@@ -171,14 +167,7 @@
 // In your source file:
 //  #include "base/memory/singleton.h"
 //  FooClass* FooClass::GetInstance() {
-//    return base::Singleton<FooClass>::get();
-//  }
-//
-// Or for leaky singletons:
-//  #include "base/memory/singleton.h"
-//  FooClass* FooClass::GetInstance() {
-//    return base::Singleton<
-//        FooClass, base::LeakySingletonTraits<FooClass>>::get();
+//    return Singleton<FooClass>::get();
 //  }
 //
 // And to call methods on FooClass:
@@ -238,7 +227,7 @@
 
   // Return a pointer to the one true instance of the class.
   static Type* get() {
-#if DCHECK_IS_ON()
+#ifndef NDEBUG
     // Avoid making TLS lookup on release builds.
     if (!Traits::kAllowedToAccessOnNonjoinableThread)
       ThreadRestrictions::AssertSingletonAllowed();
diff --git a/base/memory/weak_ptr.cc b/base/memory/weak_ptr.cc
index c179b80..4e77b04 100644
--- a/base/memory/weak_ptr.cc
+++ b/base/memory/weak_ptr.cc
@@ -17,13 +17,13 @@
 void WeakReference::Flag::Invalidate() {
   // The flag being invalidated with a single ref implies that there are no
   // weak pointers in existence. Allow deletion on other thread in this case.
-  DCHECK(sequence_checker_.CalledOnValidSequence() || HasOneRef())
+  DCHECK(sequence_checker_.CalledOnValidSequencedThread() || HasOneRef())
       << "WeakPtrs must be invalidated on the same sequenced thread.";
   is_valid_ = false;
 }
 
 bool WeakReference::Flag::IsValid() const {
-  DCHECK(sequence_checker_.CalledOnValidSequence())
+  DCHECK(sequence_checker_.CalledOnValidSequencedThread())
       << "WeakPtrs must be checked on the same sequenced thread.";
   return is_valid_;
 }
diff --git a/base/memory/weak_ptr_unittest.cc b/base/memory/weak_ptr_unittest.cc
index 1a4870e..ebcf33c 100644
--- a/base/memory/weak_ptr_unittest.cc
+++ b/base/memory/weak_ptr_unittest.cc
@@ -12,7 +12,6 @@
 #include "base/location.h"
 #include "base/single_thread_task_runner.h"
 #include "base/synchronization/waitable_event.h"
-#include "base/test/gtest_util.h"
 #include "base/threading/thread.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -564,6 +563,8 @@
   background.DeleteArrow(arrow);
 }
 
+#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
+
 TEST(WeakPtrDeathTest, WeakPtrCopyDoesNotChangeThreadBinding) {
   // The default style "fast" does not support multi-threaded tests
   // (introduces deadlock on Linux).
@@ -587,7 +588,7 @@
 
   // Although background thread created the copy, it can not deref the copied
   // WeakPtr.
-  ASSERT_DCHECK_DEATH(background.DeRef(arrow_copy));
+  ASSERT_DEATH(background.DeRef(arrow_copy), "");
 
   background.DeleteArrow(arrow_copy);
 }
@@ -609,7 +610,7 @@
   // Background thread tries to deref target, which violates thread ownership.
   BackgroundThread background;
   background.Start();
-  ASSERT_DCHECK_DEATH(background.DeRef(&arrow));
+  ASSERT_DEATH(background.DeRef(&arrow), "");
 }
 
 TEST(WeakPtrDeathTest, NonOwnerThreadDeletesWeakPtrAfterReference) {
@@ -629,7 +630,7 @@
   background.DeRef(&arrow);
 
   // Main thread deletes Target, violating thread binding.
-  ASSERT_DCHECK_DEATH(target.reset());
+  ASSERT_DEATH(target.reset(), "");
 
   // |target.reset()| died so |target| still holds the object, so we
   // must pass it to the background thread to teardown.
@@ -652,7 +653,7 @@
   // Background thread tries to delete target, volating thread binding.
   BackgroundThread background;
   background.Start();
-  ASSERT_DCHECK_DEATH(background.DeleteTarget(target.release()));
+  ASSERT_DEATH(background.DeleteTarget(target.release()), "");
 }
 
 TEST(WeakPtrDeathTest, NonOwnerThreadReferencesObjectAfterDeletion) {
@@ -672,7 +673,9 @@
   background.DeleteTarget(target.release());
 
   // Main thread attempts to dereference the target, violating thread binding.
-  ASSERT_DCHECK_DEATH(arrow.target.get());
+  ASSERT_DEATH(arrow.target.get(), "");
 }
 
+#endif
+
 }  // namespace base
diff --git a/base/message_loop/incoming_task_queue.cc b/base/message_loop/incoming_task_queue.cc
index fed1494..bca1d52 100644
--- a/base/message_loop/incoming_task_queue.cc
+++ b/base/message_loop/incoming_task_queue.cc
@@ -17,7 +17,7 @@
 
 namespace {
 
-#if DCHECK_IS_ON()
+#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
 // Delays larger than this are often bogus, and a warning should be emitted in
 // debug builds to warn developers.  http://crbug.com/450045
 const int kTaskDelayWarningThresholdInSeconds =
@@ -68,8 +68,8 @@
       << "Requesting super-long task delay period of " << delay.InSeconds()
       << " seconds from here: " << from_here.ToString();
 
-  PendingTask pending_task(from_here, task, CalculateDelayedRuntime(delay),
-                           nestable);
+  PendingTask pending_task(
+      from_here, task, CalculateDelayedRuntime(delay), nestable);
 #if defined(OS_WIN)
   // We consider the task needs a high resolution timer if the delay is
   // more than 0 and less than 32ms. This caps the relative error to
diff --git a/base/message_loop/incoming_task_queue.h b/base/message_loop/incoming_task_queue.h
index 157e47f..aff71d2 100644
--- a/base/message_loop/incoming_task_queue.h
+++ b/base/message_loop/incoming_task_queue.h
@@ -16,6 +16,7 @@
 namespace base {
 
 class MessageLoop;
+class WaitableEvent;
 
 namespace internal {
 
diff --git a/base/message_loop/message_loop.cc b/base/message_loop/message_loop.cc
index eba68a7..54369a9 100644
--- a/base/message_loop/message_loop.cc
+++ b/base/message_loop/message_loop.cc
@@ -5,19 +5,26 @@
 #include "base/message_loop/message_loop.h"
 
 #include <algorithm>
+#include <memory>
 #include <utility>
 
 #include "base/bind.h"
 #include "base/compiler_specific.h"
+#include "base/lazy_instance.h"
 #include "base/logging.h"
 #include "base/memory/ptr_util.h"
 #include "base/message_loop/message_pump_default.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/statistics_recorder.h"
 #include "base/run_loop.h"
 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
 #include "base/threading/thread_id_name_manager.h"
 #include "base/threading/thread_local.h"
 #include "base/threading/thread_task_runner_handle.h"
+#include "base/time/time.h"
 #include "base/trace_event/trace_event.h"
+#include "base/tracked_objects.h"
+#include "build/build_config.h"
 
 #if defined(OS_MACOSX)
 #include "base/message_loop/message_pump_mac.h"
@@ -37,11 +44,51 @@
 namespace {
 
 // A lazily created thread local storage for quick access to a thread's message
-// loop, if one exists.
-base::ThreadLocalPointer<MessageLoop>* GetTLSMessageLoop() {
-  static auto* lazy_tls_ptr = new base::ThreadLocalPointer<MessageLoop>();
-  return lazy_tls_ptr;
-}
+// loop, if one exists.  This should be safe and free of static constructors.
+LazyInstance<base::ThreadLocalPointer<MessageLoop> >::Leaky lazy_tls_ptr =
+    LAZY_INSTANCE_INITIALIZER;
+
+// Logical events for Histogram profiling. Run with --message-loop-histogrammer
+// to get an accounting of messages and actions taken on each thread.
+const int kTaskRunEvent = 0x1;
+#if !defined(OS_NACL)
+const int kTimerEvent = 0x2;
+
+// Provide range of message IDs for use in histogramming and debug display.
+const int kLeastNonZeroMessageId = 1;
+const int kMaxMessageId = 1099;
+const int kNumberOfDistinctMessagesDisplayed = 1100;
+
+// Provide a macro that takes an expression (such as a constant, or macro
+// constant) and creates a pair to initialize an array of pairs.  In this case,
+// our pair consists of the expressions value, and the "stringized" version
+// of the expression (i.e., the expression put in quotes).  For example, if
+// we have:
+//    #define FOO 2
+//    #define BAR 5
+// then the following:
+//    VALUE_TO_NUMBER_AND_NAME(FOO + BAR)
+// will expand to:
+//   {7, "FOO + BAR"}
+// We use the resulting array as an argument to our histogram, which reads the
+// number as a bucket identifier, and proceeds to use the corresponding name
+// in the pair (i.e., the quoted string) when printing out a histogram.
+#define VALUE_TO_NUMBER_AND_NAME(name) {name, #name},
+
+const LinearHistogram::DescriptionPair event_descriptions_[] = {
+  // Provide some pretty print capability in our histogram for our internal
+  // messages.
+
+  // A few events we handle (kindred to messages), and used to profile actions.
+  VALUE_TO_NUMBER_AND_NAME(kTaskRunEvent)
+  VALUE_TO_NUMBER_AND_NAME(kTimerEvent)
+
+  {-1, NULL}  // The list must be null-terminated, per API to histogram.
+};
+#endif  // !defined(OS_NACL)
+
+bool enable_histogrammer_ = false;
+
 MessageLoop::MessagePumpFactory* message_pump_for_ui_factory_ = NULL;
 
 #if defined(OS_IOS)
@@ -124,8 +171,8 @@
   DCHECK(!did_work);
 
   // Let interested parties have one last shot at accessing this.
-  for (auto& observer : destruction_observers_)
-    observer.WillDestroyCurrentMessageLoop();
+  FOR_EACH_OBSERVER(DestructionObserver, destruction_observers_,
+                    WillDestroyCurrentMessageLoop());
 
   thread_task_runner_handle_.reset();
 
@@ -137,7 +184,7 @@
 
   // OK, now make it so that no one can find us.
   if (current() == this)
-    GetTLSMessageLoop()->Set(nullptr);
+    lazy_tls_ptr.Pointer()->Set(nullptr);
 }
 
 // static
@@ -145,7 +192,12 @@
   // TODO(darin): sadly, we cannot enable this yet since people call us even
   // when they have no intention of using us.
   // DCHECK(loop) << "Ouch, did you forget to initialize me?";
-  return GetTLSMessageLoop()->Get();
+  return lazy_tls_ptr.Pointer()->Get();
+}
+
+// static
+void MessageLoop::EnableHistogrammer(bool enable) {
+  enable_histogrammer_ = enable;
 }
 
 // static
@@ -162,7 +214,7 @@
 // TODO(rvargas): Get rid of the OS guards.
 #if defined(USE_GLIB) && !defined(OS_NACL)
   typedef MessagePumpGlib MessagePumpForUI;
-#elif (defined(OS_LINUX) && !defined(OS_NACL)) || defined(OS_BSD)
+#elif defined(OS_LINUX) && !defined(OS_NACL)
   typedef MessagePumpLibevent MessagePumpForUI;
 #endif
 
@@ -216,16 +268,39 @@
 
 void MessageLoop::AddNestingObserver(NestingObserver* observer) {
   DCHECK_EQ(this, current());
-  CHECK(allow_nesting_);
   nesting_observers_.AddObserver(observer);
 }
 
 void MessageLoop::RemoveNestingObserver(NestingObserver* observer) {
   DCHECK_EQ(this, current());
-  CHECK(allow_nesting_);
   nesting_observers_.RemoveObserver(observer);
 }
 
+void MessageLoop::PostTask(
+    const tracked_objects::Location& from_here,
+    const Closure& task) {
+  task_runner_->PostTask(from_here, task);
+}
+
+void MessageLoop::PostDelayedTask(
+    const tracked_objects::Location& from_here,
+    const Closure& task,
+    TimeDelta delay) {
+  task_runner_->PostDelayedTask(from_here, task, delay);
+}
+
+void MessageLoop::Run() {
+  DCHECK(pump_);
+  RunLoop run_loop;
+  run_loop.Run();
+}
+
+void MessageLoop::RunUntilIdle() {
+  DCHECK(pump_);
+  RunLoop run_loop;
+  run_loop.RunUntilIdle();
+}
+
 void MessageLoop::QuitWhenIdle() {
   DCHECK_EQ(this, current());
   if (run_loop_) {
@@ -259,8 +334,6 @@
 
 void MessageLoop::SetNestableTasksAllowed(bool allowed) {
   if (allowed) {
-    CHECK(allow_nesting_);
-
     // Kick the native pump just in case we enter a OS-driven nested message
     // loop.
     pump_->ScheduleWork();
@@ -278,13 +351,11 @@
 
 void MessageLoop::AddTaskObserver(TaskObserver* task_observer) {
   DCHECK_EQ(this, current());
-  CHECK(allow_task_observers_);
   task_observers_.AddObserver(task_observer);
 }
 
 void MessageLoop::RemoveTaskObserver(TaskObserver* task_observer) {
   DCHECK_EQ(this, current());
-  CHECK(allow_task_observers_);
   task_observers_.RemoveObserver(task_observer);
 }
 
@@ -320,8 +391,8 @@
 #endif
       nestable_tasks_allowed_(true),
       pump_factory_(pump_factory),
-      run_loop_(nullptr),
-      current_pending_task_(nullptr),
+      message_histogram_(NULL),
+      run_loop_(NULL),
       incoming_task_queue_(new internal::IncomingTaskQueue(this)),
       unbound_task_runner_(
           new internal::MessageLoopTaskRunner(incoming_task_queue_)),
@@ -339,39 +410,39 @@
     pump_ = CreateMessagePumpForType(type_);
 
   DCHECK(!current()) << "should only have one message loop per thread";
-  GetTLSMessageLoop()->Set(this);
+  lazy_tls_ptr.Pointer()->Set(this);
 
   incoming_task_queue_->StartScheduling();
   unbound_task_runner_->BindToCurrentThread();
   unbound_task_runner_ = nullptr;
   SetThreadTaskRunnerHandle();
-  thread_id_ = PlatformThread::CurrentId();
+  {
+    // Save the current thread's ID for potential use by other threads
+    // later from GetThreadName().
+    thread_id_ = PlatformThread::CurrentId();
+    subtle::MemoryBarrier();
+  }
 }
 
 std::string MessageLoop::GetThreadName() const {
-  DCHECK_NE(kInvalidThreadId, thread_id_)
-      << "GetThreadName() must only be called after BindToCurrentThread()'s "
-      << "side-effects have been synchronized with this thread.";
+  if (thread_id_ == kInvalidThreadId) {
+    // |thread_id_| may already have been initialized but this thread might not
+    // have received the update yet.
+    subtle::MemoryBarrier();
+    DCHECK_NE(kInvalidThreadId, thread_id_);
+  }
   return ThreadIdNameManager::GetInstance()->GetName(thread_id_);
 }
 
 void MessageLoop::SetTaskRunner(
     scoped_refptr<SingleThreadTaskRunner> task_runner) {
   DCHECK_EQ(this, current());
-  DCHECK(task_runner);
   DCHECK(task_runner->BelongsToCurrentThread());
   DCHECK(!unbound_task_runner_);
   task_runner_ = std::move(task_runner);
   SetThreadTaskRunnerHandle();
 }
 
-void MessageLoop::ClearTaskRunnerForTesting() {
-  DCHECK_EQ(this, current());
-  DCHECK(!unbound_task_runner_);
-  task_runner_ = nullptr;
-  thread_task_runner_handle_.reset();
-}
-
 void MessageLoop::SetThreadTaskRunnerHandle() {
   DCHECK_EQ(this, current());
   // Clear the previous thread task runner first, because only one can exist at
@@ -382,8 +453,7 @@
 
 void MessageLoop::RunHandler() {
   DCHECK_EQ(this, current());
-  DCHECK(run_loop_);
-  CHECK(allow_nesting_ || run_loop_->run_depth_ == 1);
+  StartHistogrammer();
   pump_->Run(this);
 }
 
@@ -398,16 +468,15 @@
       std::move(deferred_non_nestable_work_queue_.front());
   deferred_non_nestable_work_queue_.pop();
 
-  RunTask(&pending_task);
+  RunTask(pending_task);
   return true;
 }
 
-void MessageLoop::RunTask(PendingTask* pending_task) {
+void MessageLoop::RunTask(const PendingTask& pending_task) {
   DCHECK(nestable_tasks_allowed_);
-  current_pending_task_ = pending_task;
 
 #if defined(OS_WIN)
-  if (pending_task->is_high_res) {
+  if (pending_task.is_high_res) {
     pending_high_res_tasks_--;
     CHECK_GE(pending_high_res_tasks_, 0);
   }
@@ -416,22 +485,22 @@
   // Execute the task and assume the worst: It is probably not reentrant.
   nestable_tasks_allowed_ = false;
 
-  TRACE_TASK_EXECUTION("MessageLoop::RunTask", *pending_task);
+  HistogramEvent(kTaskRunEvent);
 
-  for (auto& observer : task_observers_)
-    observer.WillProcessTask(*pending_task);
+  TRACE_TASK_EXECUTION("MessageLoop::RunTask", pending_task);
+
+  FOR_EACH_OBSERVER(TaskObserver, task_observers_,
+                    WillProcessTask(pending_task));
   task_annotator_.RunTask("MessageLoop::PostTask", pending_task);
-  for (auto& observer : task_observers_)
-    observer.DidProcessTask(*pending_task);
+  FOR_EACH_OBSERVER(TaskObserver, task_observers_,
+                    DidProcessTask(pending_task));
 
   nestable_tasks_allowed_ = true;
-
-  current_pending_task_ = nullptr;
 }
 
 bool MessageLoop::DeferOrRunPendingTask(PendingTask pending_task) {
   if (pending_task.nestable || run_loop_->run_depth_ == 1) {
-    RunTask(&pending_task);
+    RunTask(pending_task);
     // Show that we ran a task (Note: a new one might arrive as a
     // consequence!).
     return true;
@@ -496,9 +565,40 @@
   pump_->ScheduleWork();
 }
 
+#if defined(OS_WIN)
+bool MessageLoop::MessagePumpWasSignaled() {
+  return pump_->WasSignaled();
+}
+#endif
+
+//------------------------------------------------------------------------------
+// Method and data for histogramming events and actions taken by each instance
+// on each thread.
+
+void MessageLoop::StartHistogrammer() {
+#if !defined(OS_NACL)  // NaCl build has no metrics code.
+  if (enable_histogrammer_ && !message_histogram_
+      && StatisticsRecorder::IsActive()) {
+    std::string thread_name = GetThreadName();
+    DCHECK(!thread_name.empty());
+    message_histogram_ = LinearHistogram::FactoryGetWithRangeDescription(
+        "MsgLoop:" + thread_name, kLeastNonZeroMessageId, kMaxMessageId,
+        kNumberOfDistinctMessagesDisplayed,
+        HistogramBase::kHexRangePrintingFlag, event_descriptions_);
+  }
+#endif
+}
+
+void MessageLoop::HistogramEvent(int event) {
+#if !defined(OS_NACL)
+  if (message_histogram_)
+    message_histogram_->Add(event);
+#endif
+}
+
 void MessageLoop::NotifyBeginNestedLoop() {
-  for (auto& observer : nesting_observers_)
-    observer.OnBeginNestedMessageLoop();
+  FOR_EACH_OBSERVER(NestingObserver, nesting_observers_,
+                    OnBeginNestedMessageLoop());
 }
 
 bool MessageLoop::DoWork() {
@@ -588,6 +688,19 @@
   return false;
 }
 
+void MessageLoop::DeleteSoonInternal(const tracked_objects::Location& from_here,
+                                     void(*deleter)(const void*),
+                                     const void* object) {
+  task_runner()->PostNonNestableTask(from_here, Bind(deleter, object));
+}
+
+void MessageLoop::ReleaseSoonInternal(
+    const tracked_objects::Location& from_here,
+    void(*releaser)(const void*),
+    const void* object) {
+  task_runner()->PostNonNestableTask(from_here, Bind(releaser, object));
+}
+
 #if !defined(OS_NACL)
 //------------------------------------------------------------------------------
 // MessageLoopForUI
@@ -600,18 +713,6 @@
   // No Histogram support for UI message loop as it is managed by Java side
   static_cast<MessagePumpForUI*>(pump_.get())->Start(this);
 }
-
-void MessageLoopForUI::StartForTesting(
-    base::android::JavaMessageHandlerFactory* factory,
-    WaitableEvent* test_done_event) {
-  // No Histogram support for UI message loop as it is managed by Java side
-  static_cast<MessagePumpForUI*>(pump_.get())
-      ->StartForUnitTest(this, factory, test_done_event);
-}
-
-void MessageLoopForUI::Abort() {
-  static_cast<MessagePumpForUI*>(pump_.get())->Abort();
-}
 #endif
 
 #if defined(OS_IOS)
diff --git a/base/message_loop/message_loop.h b/base/message_loop/message_loop.h
index bfef261..ac522cf 100644
--- a/base/message_loop/message_loop.h
+++ b/base/message_loop/message_loop.h
@@ -13,6 +13,7 @@
 #include "base/callback_forward.h"
 #include "base/debug/task_annotator.h"
 #include "base/gtest_prod_util.h"
+#include "base/location.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
 #include "base/message_loop/incoming_task_queue.h"
@@ -21,8 +22,10 @@
 #include "base/message_loop/timer_slack.h"
 #include "base/observer_list.h"
 #include "base/pending_task.h"
+#include "base/sequenced_task_runner_helpers.h"
 #include "base/synchronization/lock.h"
 #include "base/time/time.h"
+#include "base/tracking_info.h"
 #include "build/build_config.h"
 
 // TODO(sky): these includes should not be necessary. Nuke them.
@@ -34,18 +37,9 @@
 #include "base/message_loop/message_pump_libevent.h"
 #endif
 
-#if defined(OS_ANDROID)
-namespace base {
-namespace android {
-
-class JavaMessageHandlerFactory;
-
-}  // namespace android
-}  // namespace base
-#endif  // defined(OS_ANDROID)
-
 namespace base {
 
+class HistogramBase;
 class RunLoop;
 class ThreadTaskRunnerHandle;
 class WaitableEvent;
@@ -53,8 +47,8 @@
 // A MessageLoop is used to process events for a particular thread.  There is
 // at most one MessageLoop instance per thread.
 //
-// Events include at a minimum Task instances submitted to the MessageLoop's
-// TaskRunner. Depending on the type of message pump used by the MessageLoop
+// Events include at a minimum Task instances submitted to PostTask and its
+// variants.  Depending on the type of message pump used by the MessageLoop
 // other events such as UI messages may be processed.  On Windows APC calls (as
 // time permits) and signals sent to a registered set of HANDLEs may also be
 // processed.
@@ -128,6 +122,8 @@
   // Returns the MessageLoop object for the current thread, or null if none.
   static MessageLoop* current();
 
+  static void EnableHistogrammer(bool enable_histogrammer);
+
   typedef std::unique_ptr<MessagePump>(MessagePumpFactory)();
   // Uses the given base::MessagePumpForUIFactory to override the default
   // MessagePump implementation for 'TYPE_UI'. Returns true if the factory
@@ -175,6 +171,86 @@
   void AddNestingObserver(NestingObserver* observer);
   void RemoveNestingObserver(NestingObserver* observer);
 
+  // NOTE: Deprecated; prefer task_runner() and the TaskRunner interfaces.
+  // TODO(skyostil): Remove these functions (crbug.com/465354).
+  //
+  // The "PostTask" family of methods call the task's Run method asynchronously
+  // from within a message loop at some point in the future.
+  //
+  // With the PostTask variant, tasks are invoked in FIFO order, inter-mixed
+  // with normal UI or IO event processing.  With the PostDelayedTask variant,
+  // tasks are called after at least approximately 'delay_ms' have elapsed.
+  //
+  // The NonNestable variants work similarly except that they promise never to
+  // dispatch the task from a nested invocation of MessageLoop::Run.  Instead,
+  // such tasks get deferred until the top-most MessageLoop::Run is executing.
+  //
+  // The MessageLoop takes ownership of the Task, and deletes it after it has
+  // been Run().
+  //
+  // PostTask(from_here, task) is equivalent to
+  // PostDelayedTask(from_here, task, 0).
+  //
+  // NOTE: These methods may be called on any thread.  The Task will be invoked
+  // on the thread that executes MessageLoop::Run().
+  void PostTask(const tracked_objects::Location& from_here,
+                const Closure& task);
+
+  void PostDelayedTask(const tracked_objects::Location& from_here,
+                       const Closure& task,
+                       TimeDelta delay);
+
+  // A variant on PostTask that deletes the given object.  This is useful
+  // if the object needs to live until the next run of the MessageLoop (for
+  // example, deleting a RenderProcessHost from within an IPC callback is not
+  // good).
+  //
+  // NOTE: This method may be called on any thread.  The object will be deleted
+  // on the thread that executes MessageLoop::Run().
+  template <class T>
+  void DeleteSoon(const tracked_objects::Location& from_here, const T* object) {
+    base::subtle::DeleteHelperInternal<T, void>::DeleteViaSequencedTaskRunner(
+        this, from_here, object);
+  }
+
+  // A variant on PostTask that releases the given reference counted object
+  // (by calling its Release method).  This is useful if the object needs to
+  // live until the next run of the MessageLoop, or if the object needs to be
+  // released on a particular thread.
+  //
+  // A common pattern is to manually increment the object's reference count
+  // (AddRef), clear the pointer, then issue a ReleaseSoon.  The reference count
+  // is incremented manually to ensure clearing the pointer does not trigger a
+  // delete and to account for the upcoming decrement (ReleaseSoon).  For
+  // example:
+  //
+  // scoped_refptr<Foo> foo = ...
+  // foo->AddRef();
+  // Foo* raw_foo = foo.get();
+  // foo = NULL;
+  // message_loop->ReleaseSoon(raw_foo);
+  //
+  // NOTE: This method may be called on any thread.  The object will be
+  // released (and thus possibly deleted) on the thread that executes
+  // MessageLoop::Run().  If this is not the same as the thread that calls
+  // ReleaseSoon(FROM_HERE, ), then T MUST inherit from
+  // RefCountedThreadSafe<T>!
+  template <class T>
+  void ReleaseSoon(const tracked_objects::Location& from_here,
+                   const T* object) {
+    base::subtle::ReleaseHelperInternal<T, void>::ReleaseViaSequencedTaskRunner(
+        this, from_here, object);
+  }
+
+  // Deprecated: use RunLoop instead.
+  // Run the message loop.
+  void Run();
+
+  // Deprecated: use RunLoop instead.
+  // Process all pending tasks, windows messages, etc., but don't wait/sleep.
+  // Return as soon as all items that can be run are taken care of.
+  void RunUntilIdle();
+
   // Deprecated: use RunLoop instead.
   //
   // Signals the Run method to return when it becomes idle. It will continue to
@@ -215,11 +291,9 @@
   // Returns the type passed to the constructor.
   Type type() const { return type_; }
 
-  // Returns the name of the thread this message loop is bound to. This function
-  // is only valid when this message loop is running, BindToCurrentThread has
-  // already been called and has an "happens-before" relationship with this call
-  // (this relationship is obtained implicitly by the MessageLoop's task posting
-  // system unless calling this very early).
+  // Returns the name of the thread this message loop is bound to.
+  // This function is only valid when this message loop is running and
+  // BindToCurrentThread has already been called.
   std::string GetThreadName() const;
 
   // Gets the TaskRunner associated with this message loop.
@@ -234,10 +308,6 @@
   // thread to which the message loop is bound.
   void SetTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner);
 
-  // Clears task_runner() and the ThreadTaskRunnerHandle for the target thread.
-  // Must be called on the thread to which the message loop is bound.
-  void ClearTaskRunnerForTesting();
-
   // Enables or disables the recursive task processing. This happens in the case
   // of recursive message loops. Some unwanted message loops may occur when
   // using common controls or printer functions. By default, recursive task
@@ -318,15 +388,16 @@
   debug::TaskAnnotator* task_annotator() { return &task_annotator_; }
 
   // Runs the specified PendingTask.
-  void RunTask(PendingTask* pending_task);
+  void RunTask(const PendingTask& pending_task);
 
-  // Disallow nesting. After this is called, running a nested RunLoop or calling
-  // Add/RemoveNestingObserver() on this MessageLoop will crash.
-  void DisallowNesting() { allow_nesting_ = false; }
-
-  // Disallow task observers. After this is called, calling
-  // Add/RemoveTaskObserver() on this MessageLoop will crash.
-  void DisallowTaskObservers() { allow_task_observers_ = false; }
+#if defined(OS_WIN)
+  // TODO (stanisc): crbug.com/596190: Remove this after the signaling issue
+  // has been investigated.
+  // This should be used for diagnostic only. If message pump wake-up mechanism
+  // is based on auto-reset event this call would reset the event to unset
+  // state.
+  bool MessagePumpWasSignaled();
+#endif
 
   //----------------------------------------------------------------------------
  protected:
@@ -346,13 +417,11 @@
   void BindToCurrentThread();
 
  private:
-  friend class internal::IncomingTaskQueue;
   friend class RunLoop;
+  friend class internal::IncomingTaskQueue;
   friend class ScheduleWorkTest;
   friend class Thread;
-  friend struct PendingTask;
   FRIEND_TEST_ALL_PREFIXES(MessageLoopTest, DeleteUnboundLoop);
-  friend class PendingTaskTest;
 
   // Creates a MessageLoop without binding to a thread.
   // If |type| is TYPE_CUSTOM non-null |pump_factory| must be also given
@@ -399,6 +468,15 @@
   // responsible for synchronizing ScheduleWork() calls.
   void ScheduleWork();
 
+  // Start recording histogram info about events and action IF it was enabled
+  // and IF the statistics recorder can accept a registration of our histogram.
+  void StartHistogrammer();
+
+  // Add occurrence of event to our histogram, so that we can see what is being
+  // done in a specific MessageLoop instance (i.e., specific thread).
+  // If message_histogram_ is NULL, this is a no-op.
+  void HistogramEvent(int event);
+
   // Notify observers that a nested message loop is starting.
   void NotifyBeginNestedLoop();
 
@@ -446,19 +524,15 @@
   // if type_ is TYPE_CUSTOM and pump_ is null.
   MessagePumpFactoryCallback pump_factory_;
 
+  // A profiling histogram showing the counts of various messages and events.
+  HistogramBase* message_histogram_;
+
   RunLoop* run_loop_;
 
   ObserverList<TaskObserver> task_observers_;
 
   debug::TaskAnnotator task_annotator_;
 
-  // Used to allow creating a breadcrumb of program counters in PostTask.
-  // This variable is only initialized while a task is being executed and is
-  // meant only to store context for creating a backtrace breadcrumb. Do not
-  // attach other semantics to it without thinking through the use caes
-  // thoroughly.
-  const PendingTask* current_pending_task_;
-
   scoped_refptr<internal::IncomingTaskQueue> incoming_task_queue_;
 
   // A task runner which we haven't bound to a thread yet.
@@ -468,15 +542,18 @@
   scoped_refptr<SingleThreadTaskRunner> task_runner_;
   std::unique_ptr<ThreadTaskRunnerHandle> thread_task_runner_handle_;
 
-  // Id of the thread this message loop is bound to. Initialized once when the
-  // MessageLoop is bound to its thread and constant forever after.
+  // Id of the thread this message loop is bound to.
   PlatformThreadId thread_id_;
 
-  // Whether nesting is allowed.
-  bool allow_nesting_ = true;
+  template <class T, class R> friend class base::subtle::DeleteHelperInternal;
+  template <class T, class R> friend class base::subtle::ReleaseHelperInternal;
 
-  // Whether task observers are allowed.
-  bool allow_task_observers_ = true;
+  void DeleteSoonInternal(const tracked_objects::Location& from_here,
+                          void(*deleter)(const void*),
+                          const void* object);
+  void ReleaseSoonInternal(const tracked_objects::Location& from_here,
+                           void(*releaser)(const void*),
+                           const void* object);
 
   DISALLOW_COPY_AND_ASSIGN(MessageLoop);
 };
@@ -522,11 +599,6 @@
   // never be called. Instead use Start(), which will forward all the native UI
   // events to the Java message loop.
   void Start();
-  void StartForTesting(base::android::JavaMessageHandlerFactory* factory,
-                       WaitableEvent* test_done_event);
-  // In Android there are cases where we want to abort immediately without
-  // calling Quit(), in these cases we call Abort().
-  void Abort();
 #endif
 
 #if defined(USE_OZONE) || (defined(USE_X11) && !defined(USE_GLIB))
diff --git a/base/message_loop/message_loop_task_runner_unittest.cc b/base/message_loop/message_loop_task_runner_unittest.cc
index 54551da..cabd250 100644
--- a/base/message_loop/message_loop_task_runner_unittest.cc
+++ b/base/message_loop/message_loop_task_runner_unittest.cc
@@ -12,6 +12,7 @@
 #include "base/message_loop/message_loop.h"
 #include "base/message_loop/message_loop_task_runner.h"
 #include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
 #include "base/synchronization/waitable_event.h"
 #include "base/threading/thread.h"
 #include "base/threading/thread_task_runner_handle.h"
@@ -37,7 +38,7 @@
     task_thread_.Start();
 
     // Allow us to pause the |task_thread_|'s MessageLoop.
-    task_thread_.task_runner()->PostTask(
+    task_thread_.message_loop()->task_runner()->PostTask(
         FROM_HERE, Bind(&MessageLoopTaskRunnerTest::BlockTaskThreadHelper,
                         Unretained(this)));
   }
@@ -108,23 +109,23 @@
   MessageLoop* reply_deleted_on = NULL;
   int reply_delete_order = -1;
 
-  scoped_refptr<LoopRecorder> task_recorder =
+  scoped_refptr<LoopRecorder> task_recoder =
       new LoopRecorder(&task_run_on, &task_deleted_on, &task_delete_order);
-  scoped_refptr<LoopRecorder> reply_recorder =
+  scoped_refptr<LoopRecorder> reply_recoder =
       new LoopRecorder(&reply_run_on, &reply_deleted_on, &reply_delete_order);
 
   ASSERT_TRUE(task_thread_.task_runner()->PostTaskAndReply(
-      FROM_HERE, Bind(&RecordLoop, task_recorder),
-      Bind(&RecordLoopAndQuit, reply_recorder)));
+      FROM_HERE, Bind(&RecordLoop, task_recoder),
+      Bind(&RecordLoopAndQuit, reply_recoder)));
 
   // Die if base::Bind doesn't retain a reference to the recorders.
-  task_recorder = NULL;
-  reply_recorder = NULL;
+  task_recoder = NULL;
+  reply_recoder = NULL;
   ASSERT_FALSE(task_deleted_on);
   ASSERT_FALSE(reply_deleted_on);
 
   UnblockTaskThread();
-  RunLoop().Run();
+  current_loop_->Run();
 
   EXPECT_EQ(task_thread_.message_loop(), task_run_on);
   EXPECT_EQ(current_loop_.get(), task_deleted_on);
@@ -141,9 +142,9 @@
   MessageLoop* reply_deleted_on = NULL;
   int reply_delete_order = -1;
 
-  scoped_refptr<LoopRecorder> task_recorder =
+  scoped_refptr<LoopRecorder> task_recoder =
       new LoopRecorder(&task_run_on, &task_deleted_on, &task_delete_order);
-  scoped_refptr<LoopRecorder> reply_recorder =
+  scoped_refptr<LoopRecorder> reply_recoder =
       new LoopRecorder(&reply_run_on, &reply_deleted_on, &reply_delete_order);
 
   // Grab a task runner to a dead MessageLoop.
@@ -153,14 +154,14 @@
   task_thread_.Stop();
 
   ASSERT_FALSE(
-      task_runner->PostTaskAndReply(FROM_HERE, Bind(&RecordLoop, task_recorder),
-                                    Bind(&RecordLoopAndQuit, reply_recorder)));
+      task_runner->PostTaskAndReply(FROM_HERE, Bind(&RecordLoop, task_recoder),
+                                    Bind(&RecordLoopAndQuit, reply_recoder)));
 
   // The relay should have properly deleted its resources leaving us as the only
   // reference.
   EXPECT_EQ(task_delete_order, reply_delete_order);
-  ASSERT_TRUE(task_recorder->HasOneRef());
-  ASSERT_TRUE(reply_recorder->HasOneRef());
+  ASSERT_TRUE(task_recoder->HasOneRef());
+  ASSERT_TRUE(reply_recoder->HasOneRef());
 
   // Nothing should have run though.
   EXPECT_FALSE(task_run_on);
@@ -175,23 +176,23 @@
   MessageLoop* reply_deleted_on = NULL;
   int reply_delete_order = -1;
 
-  scoped_refptr<LoopRecorder> task_recorder =
+  scoped_refptr<LoopRecorder> task_recoder =
       new LoopRecorder(&task_run_on, &task_deleted_on, &task_delete_order);
-  scoped_refptr<LoopRecorder> reply_recorder =
+  scoped_refptr<LoopRecorder> reply_recoder =
       new LoopRecorder(&reply_run_on, &reply_deleted_on, &reply_delete_order);
 
   // Enqueue the relay.
   ASSERT_TRUE(current_loop_->task_runner()->PostTaskAndReply(
-      FROM_HERE, Bind(&RecordLoop, task_recorder),
-      Bind(&RecordLoopAndQuit, reply_recorder)));
+      FROM_HERE, Bind(&RecordLoop, task_recoder),
+      Bind(&RecordLoopAndQuit, reply_recoder)));
 
   // Die if base::Bind doesn't retain a reference to the recorders.
-  task_recorder = NULL;
-  reply_recorder = NULL;
+  task_recoder = NULL;
+  reply_recoder = NULL;
   ASSERT_FALSE(task_deleted_on);
   ASSERT_FALSE(reply_deleted_on);
 
-  RunLoop().Run();
+  current_loop_->Run();
 
   EXPECT_EQ(current_loop_.get(), task_run_on);
   EXPECT_EQ(current_loop_.get(), task_deleted_on);
@@ -210,19 +211,19 @@
   MessageLoop* reply_deleted_on = NULL;
   int reply_delete_order = -1;
 
-  scoped_refptr<LoopRecorder> task_recorder =
+  scoped_refptr<LoopRecorder> task_recoder =
       new LoopRecorder(&task_run_on, &task_deleted_on, &task_delete_order);
-  scoped_refptr<LoopRecorder> reply_recorder =
+  scoped_refptr<LoopRecorder> reply_recoder =
       new LoopRecorder(&reply_run_on, &reply_deleted_on, &reply_delete_order);
 
   // Enqueue the relay.
   task_thread_.task_runner()->PostTaskAndReply(
-      FROM_HERE, Bind(&RecordLoop, task_recorder),
-      Bind(&RecordLoopAndQuit, reply_recorder));
+      FROM_HERE, Bind(&RecordLoop, task_recoder),
+      Bind(&RecordLoopAndQuit, reply_recoder));
 
   // Die if base::Bind doesn't retain a reference to the recorders.
-  task_recorder = NULL;
-  reply_recorder = NULL;
+  task_recoder = NULL;
+  reply_recoder = NULL;
   ASSERT_FALSE(task_deleted_on);
   ASSERT_FALSE(reply_deleted_on);
 
diff --git a/base/message_loop/message_loop_test.cc b/base/message_loop/message_loop_test.cc
index 6ffb16d..1ab946f 100644
--- a/base/message_loop/message_loop_test.cc
+++ b/base/message_loop/message_loop_test.cc
@@ -15,7 +15,6 @@
 #include "base/single_thread_task_runner.h"
 #include "base/synchronization/waitable_event.h"
 #include "base/threading/thread.h"
-#include "base/threading/thread_task_runner_handle.h"
 
 namespace base {
 namespace test {
@@ -98,19 +97,20 @@
   // Add tests to message loop
   scoped_refptr<Foo> foo(new Foo());
   std::string a("a"), b("b"), c("c"), d("d");
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(&Foo::Test0, foo));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&Foo::Test1ConstRef, foo, a));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&Foo::Test1Ptr, foo, &b));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&Foo::Test1Int, foo, 100));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&Foo::Test2Ptr, foo, &a, &c));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&Foo::Test2Mixed, foo, a, &d));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  Bind(&Foo::Test0, foo.get()));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&Foo::Test1ConstRef, foo.get(), a));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&Foo::Test1Ptr, foo.get(), &b));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&Foo::Test1Int, foo.get(), 100));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&Foo::Test2Ptr, foo.get(), &a, &c));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&Foo::Test2Mixed, foo.get(), a, &d));
   // After all tests, post a message that will shut down the message loop
-  ThreadTaskRunnerHandle::Get()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE,
       Bind(&MessageLoop::QuitWhenIdle, Unretained(MessageLoop::current())));
 
@@ -302,8 +302,8 @@
   ~RecordDeletionProbe() {
     *was_deleted_ = true;
     if (post_on_delete_.get())
-      ThreadTaskRunnerHandle::Get()->PostTask(
-          FROM_HERE, Bind(&RecordDeletionProbe::Run, post_on_delete_));
+      MessageLoop::current()->task_runner()->PostTask(
+          FROM_HERE, Bind(&RecordDeletionProbe::Run, post_on_delete_.get()));
   }
 
   scoped_refptr<RecordDeletionProbe> post_on_delete_;
@@ -351,8 +351,8 @@
 void NestingFunc(int* depth) {
   if (*depth > 0) {
     *depth -= 1;
-    ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                            Bind(&NestingFunc, depth));
+    MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                    Bind(&NestingFunc, depth));
 
     MessageLoop::current()->SetNestableTasksAllowed(true);
     RunLoop().Run();
@@ -365,8 +365,8 @@
   MessageLoop loop(std::move(pump));
 
   int depth = 100;
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&NestingFunc, &depth));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  Bind(&NestingFunc, &depth));
   RunLoop().Run();
   EXPECT_EQ(depth, 0);
 }
@@ -403,9 +403,10 @@
   RunLoop nested_loop;
   // Verify that by the time the first task is run the observer has seen the
   // message loop begin.
-  ThreadTaskRunnerHandle::Get()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&ExpectOneBeginNestedLoop, observer));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, nested_loop.QuitClosure());
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  nested_loop.QuitClosure());
   nested_loop.Run();
 
   // Quitting message loops doesn't change the begin count.
@@ -517,7 +518,7 @@
   if (depth > 0) {
     if (is_reentrant)
       MessageLoop::current()->SetNestableTasksAllowed(true);
-    ThreadTaskRunnerHandle::Get()->PostTask(
+    MessageLoop::current()->task_runner()->PostTask(
         FROM_HERE,
         Bind(&RecursiveFunc, order, cookie, depth - 1, is_reentrant));
   }
@@ -535,12 +536,12 @@
 
   EXPECT_TRUE(MessageLoop::current()->NestableTasksAllowed());
   TaskList order;
-  ThreadTaskRunnerHandle::Get()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&RecursiveFunc, &order, 1, 2, false));
-  ThreadTaskRunnerHandle::Get()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&RecursiveFunc, &order, 2, 2, false));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&QuitFunc, &order, 3));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  Bind(&QuitFunc, &order, 3));
 
   RunLoop().Run();
 
@@ -579,13 +580,13 @@
 
   EXPECT_TRUE(MessageLoop::current()->NestableTasksAllowed());
   TaskList order;
-  ThreadTaskRunnerHandle::Get()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&RecursiveSlowFunc, &order, 1, 2, false));
-  ThreadTaskRunnerHandle::Get()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&RecursiveSlowFunc, &order, 2, 2, false));
-  ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+  MessageLoop::current()->task_runner()->PostDelayedTask(
       FROM_HERE, Bind(&OrderedFunc, &order, 3), TimeDelta::FromMilliseconds(5));
-  ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+  MessageLoop::current()->task_runner()->PostDelayedTask(
       FROM_HERE, Bind(&QuitFunc, &order, 4), TimeDelta::FromMilliseconds(5));
 
   RunLoop().Run();
@@ -615,12 +616,12 @@
   MessageLoop loop(std::move(pump));
 
   TaskList order;
-  ThreadTaskRunnerHandle::Get()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&RecursiveFunc, &order, 1, 2, true));
-  ThreadTaskRunnerHandle::Get()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&RecursiveFunc, &order, 2, 2, true));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&QuitFunc, &order, 3));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  Bind(&QuitFunc, &order, 3));
 
   RunLoop().Run();
 
@@ -649,12 +650,14 @@
 
   TaskList order;
 
-  ThreadTaskRunnerHandle::Get()->PostNonNestableTask(
-      FROM_HERE, Bind(&OrderedFunc, &order, 1));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&OrderedFunc, &order, 2));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&QuitFunc, &order, 3));
+  MessageLoop::current()->task_runner()->PostNonNestableTask(
+      FROM_HERE,
+      Bind(&OrderedFunc, &order, 1));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE,
+      Bind(&OrderedFunc, &order, 2));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  Bind(&QuitFunc, &order, 3));
   RunLoop().Run();
 
   // FIFO order.
@@ -689,18 +692,24 @@
 
   TaskList order;
 
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&FuncThatPumps, &order, 1));
-  ThreadTaskRunnerHandle::Get()->PostNonNestableTask(
-      FROM_HERE, Bind(&OrderedFunc, &order, 2));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&OrderedFunc, &order, 3));
-  ThreadTaskRunnerHandle::Get()->PostTask(
-      FROM_HERE, Bind(&SleepFunc, &order, 4, TimeDelta::FromMilliseconds(50)));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&OrderedFunc, &order, 5));
-  ThreadTaskRunnerHandle::Get()->PostNonNestableTask(
-      FROM_HERE, Bind(&QuitFunc, &order, 6));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE,
+      Bind(&FuncThatPumps, &order, 1));
+  MessageLoop::current()->task_runner()->PostNonNestableTask(
+      FROM_HERE,
+      Bind(&OrderedFunc, &order, 2));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE,
+      Bind(&OrderedFunc, &order, 3));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE,
+      Bind(&SleepFunc, &order, 4, TimeDelta::FromMilliseconds(50)));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE,
+      Bind(&OrderedFunc, &order, 5));
+  MessageLoop::current()->task_runner()->PostNonNestableTask(
+      FROM_HERE,
+      Bind(&QuitFunc, &order, 6));
 
   RunLoop().Run();
 
@@ -741,15 +750,17 @@
 
   RunLoop run_loop;
 
-  ThreadTaskRunnerHandle::Get()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&OrderedFunc, &order, 2));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(&FuncThatQuitsNow));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&OrderedFunc, &order, 3));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(&FuncThatQuitsNow));
-  ThreadTaskRunnerHandle::Get()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&OrderedFunc, &order, 2));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  Bind(&FuncThatQuitsNow));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&OrderedFunc, &order, 3));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  Bind(&FuncThatQuitsNow));
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&OrderedFunc, &order, 4));  // never runs
 
   RunLoop().Run();
@@ -775,14 +786,14 @@
   RunLoop outer_run_loop;
   RunLoop nested_run_loop;
 
-  ThreadTaskRunnerHandle::Get()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          outer_run_loop.QuitClosure());
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&OrderedFunc, &order, 2));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          nested_run_loop.QuitClosure());
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  outer_run_loop.QuitClosure());
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&OrderedFunc, &order, 2));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, nested_run_loop.QuitClosure());
 
   outer_run_loop.Run();
 
@@ -805,14 +816,14 @@
   RunLoop outer_run_loop;
   RunLoop nested_run_loop;
 
-  ThreadTaskRunnerHandle::Get()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          nested_run_loop.QuitClosure());
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&OrderedFunc, &order, 2));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          outer_run_loop.QuitClosure());
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, nested_run_loop.QuitClosure());
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&OrderedFunc, &order, 2));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  outer_run_loop.QuitClosure());
 
   outer_run_loop.Run();
 
@@ -836,16 +847,16 @@
   RunLoop nested_run_loop;
   RunLoop bogus_run_loop;
 
-  ThreadTaskRunnerHandle::Get()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          bogus_run_loop.QuitClosure());
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&OrderedFunc, &order, 2));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          outer_run_loop.QuitClosure());
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          nested_run_loop.QuitClosure());
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  bogus_run_loop.QuitClosure());
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&OrderedFunc, &order, 2));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  outer_run_loop.QuitClosure());
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, nested_run_loop.QuitClosure());
 
   outer_run_loop.Run();
 
@@ -871,36 +882,36 @@
   RunLoop nested_loop3;
   RunLoop nested_loop4;
 
-  ThreadTaskRunnerHandle::Get()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_loop1)));
-  ThreadTaskRunnerHandle::Get()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&FuncThatRuns, &order, 2, Unretained(&nested_loop2)));
-  ThreadTaskRunnerHandle::Get()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&FuncThatRuns, &order, 3, Unretained(&nested_loop3)));
-  ThreadTaskRunnerHandle::Get()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&FuncThatRuns, &order, 4, Unretained(&nested_loop4)));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&OrderedFunc, &order, 5));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          outer_run_loop.QuitClosure());
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&OrderedFunc, &order, 6));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          nested_loop1.QuitClosure());
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&OrderedFunc, &order, 7));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          nested_loop2.QuitClosure());
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&OrderedFunc, &order, 8));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          nested_loop3.QuitClosure());
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&OrderedFunc, &order, 9));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          nested_loop4.QuitClosure());
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&OrderedFunc, &order, 10));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&OrderedFunc, &order, 5));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  outer_run_loop.QuitClosure());
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&OrderedFunc, &order, 6));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  nested_loop1.QuitClosure());
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&OrderedFunc, &order, 7));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  nested_loop2.QuitClosure());
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&OrderedFunc, &order, 8));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  nested_loop3.QuitClosure());
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&OrderedFunc, &order, 9));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  nested_loop4.QuitClosure());
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&OrderedFunc, &order, 10));
 
   outer_run_loop.Run();
 
@@ -938,9 +949,9 @@
 
   run_loop.Quit();
 
-  ThreadTaskRunnerHandle::Get()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&OrderedFunc, &order, 1));  // never runs
-  ThreadTaskRunnerHandle::Get()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&FuncThatQuitsNow));  // never runs
 
   run_loop.Run();
@@ -957,12 +968,13 @@
 
   RunLoop run_loop;
 
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&OrderedFunc, &order, 1));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, run_loop.QuitClosure());
-  ThreadTaskRunnerHandle::Get()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&OrderedFunc, &order, 1));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  run_loop.QuitClosure());
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&OrderedFunc, &order, 2));  // never runs
-  ThreadTaskRunnerHandle::Get()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&FuncThatQuitsNow));  // never runs
 
   run_loop.Run();
@@ -983,18 +995,20 @@
 
   RunLoop run_loop;
 
-  ThreadTaskRunnerHandle::Get()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&OrderedFunc, &order, 2));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(&FuncThatQuitsNow));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&OrderedFunc, &order, 3));
-  ThreadTaskRunnerHandle::Get()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&OrderedFunc, &order, 2));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  Bind(&FuncThatQuitsNow));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&OrderedFunc, &order, 3));
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, run_loop.QuitClosure());  // has no affect
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&OrderedFunc, &order, 4));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(&FuncThatQuitsNow));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&OrderedFunc, &order, 4));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  Bind(&FuncThatQuitsNow));
 
   RunLoop outer_run_loop;
   outer_run_loop.Run();
@@ -1014,7 +1028,7 @@
 
 void PostNTasksThenQuit(int posts_remaining) {
   if (posts_remaining > 1) {
-    ThreadTaskRunnerHandle::Get()->PostTask(
+    MessageLoop::current()->task_runner()->PostTask(
         FROM_HERE, Bind(&PostNTasksThenQuit, posts_remaining - 1));
   } else {
     MessageLoop::current()->QuitWhenIdle();
diff --git a/base/message_loop/message_loop_unittest.cc b/base/message_loop/message_loop_unittest.cc
index 14fe1ee..52337e3 100644
--- a/base/message_loop/message_loop_unittest.cc
+++ b/base/message_loop/message_loop_unittest.cc
@@ -27,11 +27,6 @@
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
-#if defined(OS_ANDROID)
-#include "base/android/jni_android.h"
-#include "base/test/android/java_handler_thread_for_testing.h"
-#endif
-
 #if defined(OS_WIN)
 #include "base/message_loop/message_pump_win.h"
 #include "base/process/memory.h"
@@ -81,53 +76,6 @@
   std::string result_;
 };
 
-#if defined(OS_ANDROID)
-void AbortMessagePump() {
-  JNIEnv* env = base::android::AttachCurrentThread();
-  jclass exception = env->FindClass(
-      "org/chromium/base/TestSystemMessageHandler$TestException");
-
-  env->ThrowNew(exception,
-                "This is a test exception that should be caught in "
-                "TestSystemMessageHandler.handleMessage");
-  static_cast<base::MessageLoopForUI*>(base::MessageLoop::current())->Abort();
-}
-
-void RunTest_AbortDontRunMoreTasks(bool delayed) {
-  MessageLoop loop(MessageLoop::TYPE_JAVA);
-
-  WaitableEvent test_done_event(WaitableEvent::ResetPolicy::MANUAL,
-                                WaitableEvent::InitialState::NOT_SIGNALED);
-
-  std::unique_ptr<android::JavaHandlerThreadForTesting> java_thread;
-  java_thread.reset(new android::JavaHandlerThreadForTesting(
-      "JavaHandlerThreadForTesting from AbortDontRunMoreTasks",
-      &test_done_event));
-  java_thread->Start();
-
-  if (delayed) {
-    java_thread->message_loop()->task_runner()->PostDelayedTask(
-        FROM_HERE, Bind(&AbortMessagePump), TimeDelta::FromMilliseconds(10));
-  } else {
-    java_thread->message_loop()->task_runner()->PostTask(
-        FROM_HERE, Bind(&AbortMessagePump));
-  }
-
-  // Wait to ensure we catch the correct exception (and don't crash)
-  test_done_event.Wait();
-
-  java_thread->Stop();
-  java_thread.reset();
-}
-
-TEST(MessageLoopTest, JavaExceptionAbort) {
-  RunTest_AbortDontRunMoreTasks(false);
-}
-TEST(MessageLoopTest, DelayedJavaExceptionAbort) {
-  RunTest_AbortDontRunMoreTasks(true);
-}
-#endif  // defined(OS_ANDROID)
-
 #if defined(OS_WIN)
 
 // This function runs slowly to simulate a large amount of work being done.
@@ -159,7 +107,7 @@
 }
 
 void RunTest_PostDelayedTask_SharedTimer_SubPump() {
-  MessageLoop message_loop(MessageLoop::TYPE_UI);
+  MessageLoop loop(MessageLoop::TYPE_UI);
 
   // Test that the interval of the timer, used to run the next delayed task, is
   // set to a value corresponding to when the next delayed task should run.
@@ -169,20 +117,23 @@
   int num_tasks = 1;
   Time run_time;
 
-  message_loop.task_runner()->PostTask(FROM_HERE, Bind(&SubPumpFunc));
+  loop.PostTask(FROM_HERE, Bind(&SubPumpFunc));
 
   // This very delayed task should never run.
-  message_loop.task_runner()->PostDelayedTask(
-      FROM_HERE, Bind(&RecordRunTimeFunc, &run_time, &num_tasks),
+  loop.PostDelayedTask(
+      FROM_HERE,
+      Bind(&RecordRunTimeFunc, &run_time, &num_tasks),
       TimeDelta::FromSeconds(1000));
 
   // This slightly delayed task should run from within SubPumpFunc.
-  message_loop.task_runner()->PostDelayedTask(
-      FROM_HERE, Bind(&PostQuitMessage, 0), TimeDelta::FromMilliseconds(10));
+  loop.PostDelayedTask(
+      FROM_HERE,
+      Bind(&PostQuitMessage, 0),
+      TimeDelta::FromMilliseconds(10));
 
   Time start_time = Time::Now();
 
-  RunLoop().Run();
+  loop.Run();
   EXPECT_EQ(1, num_tasks);
 
   // Ensure that we ran in far less time than the slower timer.
@@ -309,7 +260,7 @@
   if (depth > 0) {
     if (is_reentrant)
       MessageLoop::current()->SetNestableTasksAllowed(true);
-    ThreadTaskRunnerHandle::Get()->PostTask(
+    MessageLoop::current()->PostTask(
         FROM_HERE,
         Bind(&RecursiveFunc, order, cookie, depth - 1, is_reentrant));
   }
@@ -322,25 +273,27 @@
   order->RecordEnd(QUITMESSAGELOOP, cookie);
 }
 
-void RecursiveFuncWin(scoped_refptr<SingleThreadTaskRunner> task_runner,
+void RecursiveFuncWin(MessageLoop* target,
                       HANDLE event,
                       bool expect_window,
                       TaskList* order,
                       bool is_reentrant) {
-  task_runner->PostTask(FROM_HERE,
-                        Bind(&RecursiveFunc, order, 1, 2, is_reentrant));
-  task_runner->PostTask(FROM_HERE,
-                        Bind(&MessageBoxFunc, order, 2, is_reentrant));
-  task_runner->PostTask(FROM_HERE,
-                        Bind(&RecursiveFunc, order, 3, 2, is_reentrant));
+  target->PostTask(FROM_HERE,
+                   Bind(&RecursiveFunc, order, 1, 2, is_reentrant));
+  target->PostTask(FROM_HERE,
+                   Bind(&MessageBoxFunc, order, 2, is_reentrant));
+  target->PostTask(FROM_HERE,
+                   Bind(&RecursiveFunc, order, 3, 2, is_reentrant));
   // The trick here is that for recursive task processing, this task will be
   // ran _inside_ the MessageBox message loop, dismissing the MessageBox
   // without a chance.
   // For non-recursive task processing, this will be executed _after_ the
   // MessageBox will have been dismissed by the code below, where
   // expect_window_ is true.
-  task_runner->PostTask(FROM_HERE, Bind(&EndDialogFunc, order, 4));
-  task_runner->PostTask(FROM_HERE, Bind(&QuitFunc, order, 5));
+  target->PostTask(FROM_HERE,
+                   Bind(&EndDialogFunc, order, 4));
+  target->PostTask(FROM_HERE,
+                   Bind(&QuitFunc, order, 5));
 
   // Enforce that every tasks are sent before starting to run the main thread
   // message loop.
@@ -378,12 +331,16 @@
   ASSERT_EQ(true, worker.StartWithOptions(options));
   TaskList order;
   win::ScopedHandle event(CreateEvent(NULL, FALSE, FALSE, NULL));
-  worker.task_runner()->PostTask(
-      FROM_HERE, Bind(&RecursiveFuncWin, ThreadTaskRunnerHandle::Get(),
-                      event.Get(), true, &order, false));
+  worker.message_loop()->PostTask(FROM_HERE,
+                                  Bind(&RecursiveFuncWin,
+                                             MessageLoop::current(),
+                                             event.Get(),
+                                             true,
+                                             &order,
+                                             false));
   // Let the other thread execute.
   WaitForSingleObject(event.Get(), INFINITE);
-  RunLoop().Run();
+  MessageLoop::current()->Run();
 
   ASSERT_EQ(17u, order.Size());
   EXPECT_EQ(order.Get(0), TaskItem(RECURSIVE, 1, true));
@@ -418,12 +375,16 @@
   ASSERT_EQ(true, worker.StartWithOptions(options));
   TaskList order;
   win::ScopedHandle event(CreateEvent(NULL, FALSE, FALSE, NULL));
-  worker.task_runner()->PostTask(
-      FROM_HERE, Bind(&RecursiveFuncWin, ThreadTaskRunnerHandle::Get(),
-                      event.Get(), false, &order, true));
+  worker.message_loop()->PostTask(FROM_HERE,
+                                  Bind(&RecursiveFuncWin,
+                                             MessageLoop::current(),
+                                             event.Get(),
+                                             false,
+                                             &order,
+                                             true));
   // Let the other thread execute.
   WaitForSingleObject(event.Get(), INFINITE);
-  RunLoop().Run();
+  MessageLoop::current()->Run();
 
   ASSERT_EQ(18u, order.Size());
   EXPECT_EQ(order.Get(0), TaskItem(RECURSIVE, 1, true));
@@ -456,7 +417,7 @@
 
 void PostNTasksThenQuit(int posts_remaining) {
   if (posts_remaining > 1) {
-    ThreadTaskRunnerHandle::Get()->PostTask(
+    MessageLoop::current()->task_runner()->PostTask(
         FROM_HERE, Bind(&PostNTasksThenQuit, posts_remaining - 1));
   } else {
     MessageLoop::current()->QuitWhenIdle();
@@ -530,9 +491,12 @@
   options.message_loop_type = MessageLoop::TYPE_IO;
   ASSERT_TRUE(thread.StartWithOptions(options));
 
+  MessageLoop* thread_loop = thread.message_loop();
+  ASSERT_TRUE(NULL != thread_loop);
+
   TestIOHandler handler(kPipeName, callback_called.Get(), false);
-  thread.task_runner()->PostTask(
-      FROM_HERE, Bind(&TestIOHandler::Init, Unretained(&handler)));
+  thread_loop->PostTask(FROM_HERE, Bind(&TestIOHandler::Init,
+                                              Unretained(&handler)));
   // Make sure the thread runs and sleeps for lack of work.
   PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
 
@@ -568,16 +532,19 @@
   options.message_loop_type = MessageLoop::TYPE_IO;
   ASSERT_TRUE(thread.StartWithOptions(options));
 
+  MessageLoop* thread_loop = thread.message_loop();
+  ASSERT_TRUE(NULL != thread_loop);
+
   TestIOHandler handler1(kPipeName1, callback1_called.Get(), false);
   TestIOHandler handler2(kPipeName2, callback2_called.Get(), true);
-  thread.task_runner()->PostTask(
-      FROM_HERE, Bind(&TestIOHandler::Init, Unretained(&handler1)));
+  thread_loop->PostTask(FROM_HERE, Bind(&TestIOHandler::Init,
+                                              Unretained(&handler1)));
   // TODO(ajwong): Do we really need such long Sleeps in this function?
   // Make sure the thread runs and sleeps for lack of work.
   TimeDelta delay = TimeDelta::FromMilliseconds(100);
   PlatformThread::Sleep(delay);
-  thread.task_runner()->PostTask(
-      FROM_HERE, Bind(&TestIOHandler::Init, Unretained(&handler2)));
+  thread_loop->PostTask(FROM_HERE, Bind(&TestIOHandler::Init,
+                                              Unretained(&handler2)));
   PlatformThread::Sleep(delay);
 
   // At this time handler1 is waiting to be called, and the thread is waiting
@@ -614,6 +581,9 @@
 RUN_MESSAGE_LOOP_TESTS(IO, &TypeIOMessagePumpFactory);
 
 #if defined(OS_WIN)
+// Additional set of tests for GPU version of UI message loop.
+RUN_MESSAGE_LOOP_TESTS(GPU, &MessagePumpForGpu::CreateMessagePumpForGpu);
+
 TEST(MessageLoopTest, PostDelayedTask_SharedTimer_SubPump) {
   RunTest_PostDelayedTask_SharedTimer_SubPump();
 }
@@ -687,26 +657,26 @@
 }
 
 TEST(MessageLoopTest, HighResolutionTimer) {
-  MessageLoop message_loop;
+  MessageLoop loop;
   Time::EnableHighResolutionTimer(true);
 
   const TimeDelta kFastTimer = TimeDelta::FromMilliseconds(5);
   const TimeDelta kSlowTimer = TimeDelta::FromMilliseconds(100);
 
-  EXPECT_FALSE(message_loop.HasHighResolutionTasks());
+  EXPECT_FALSE(loop.HasHighResolutionTasks());
   // Post a fast task to enable the high resolution timers.
-  message_loop.task_runner()->PostDelayedTask(
-      FROM_HERE, Bind(&PostNTasksThenQuit, 1), kFastTimer);
-  EXPECT_TRUE(message_loop.HasHighResolutionTasks());
-  RunLoop().Run();
-  EXPECT_FALSE(message_loop.HasHighResolutionTasks());
+  loop.PostDelayedTask(FROM_HERE, Bind(&PostNTasksThenQuit, 1),
+                       kFastTimer);
+  EXPECT_TRUE(loop.HasHighResolutionTasks());
+  loop.Run();
+  EXPECT_FALSE(loop.HasHighResolutionTasks());
   EXPECT_FALSE(Time::IsHighResolutionTimerInUse());
   // Check that a slow task does not trigger the high resolution logic.
-  message_loop.task_runner()->PostDelayedTask(
-      FROM_HERE, Bind(&PostNTasksThenQuit, 1), kSlowTimer);
-  EXPECT_FALSE(message_loop.HasHighResolutionTasks());
-  RunLoop().Run();
-  EXPECT_FALSE(message_loop.HasHighResolutionTasks());
+  loop.PostDelayedTask(FROM_HERE, Bind(&PostNTasksThenQuit, 1),
+                       kSlowTimer);
+  EXPECT_FALSE(loop.HasHighResolutionTasks());
+  loop.Run();
+  EXPECT_FALSE(loop.HasHighResolutionTasks());
   Time::EnableHighResolutionTimer(false);
 }
 
@@ -739,7 +709,7 @@
   int fd = pipefds[1];
   {
     // Arrange for controller to live longer than message loop.
-    MessageLoopForIO::FileDescriptorWatcher controller(FROM_HERE);
+    MessageLoopForIO::FileDescriptorWatcher controller;
     {
       MessageLoopForIO message_loop;
 
@@ -766,7 +736,7 @@
     // Arrange for message loop to live longer than controller.
     MessageLoopForIO message_loop;
     {
-      MessageLoopForIO::FileDescriptorWatcher controller(FROM_HERE);
+      MessageLoopForIO::FileDescriptorWatcher controller;
 
       QuitDelegate delegate;
       message_loop.WatchFileDescriptor(fd,
@@ -866,10 +836,10 @@
   scoped_refptr<Foo> foo(new Foo());
   std::string a("a");
   ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(
-      &Foo::Test1ConstRef, foo, a));
+      &Foo::Test1ConstRef, foo.get(), a));
 
   // Post quit task;
-  ThreadTaskRunnerHandle::Get()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE,
       Bind(&MessageLoop::QuitWhenIdle, Unretained(MessageLoop::current())));
 
@@ -891,10 +861,8 @@
 void EmptyFunction() {}
 
 void PostMultipleTasks() {
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          base::Bind(&EmptyFunction));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          base::Bind(&EmptyFunction));
+  MessageLoop::current()->PostTask(FROM_HERE, base::Bind(&EmptyFunction));
+  MessageLoop::current()->PostTask(FROM_HERE, base::Bind(&EmptyFunction));
 }
 
 static const int kSignalMsg = WM_USER + 2;
@@ -922,20 +890,19 @@
     // First, we post a task that will post multiple no-op tasks to make sure
     // that the pump's incoming task queue does not become empty during the
     // test.
-    ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                            base::Bind(&PostMultipleTasks));
+    MessageLoop::current()->PostTask(FROM_HERE, base::Bind(&PostMultipleTasks));
     // Next, we post a task that posts a windows message to trigger the second
     // stage of the test.
-    ThreadTaskRunnerHandle::Get()->PostTask(
-        FROM_HERE, base::Bind(&PostWindowsMessage, hwnd));
+    MessageLoop::current()->PostTask(FROM_HERE,
+                                     base::Bind(&PostWindowsMessage, hwnd));
     break;
   case 2:
     // Since we're about to enter a modal loop, tell the message loop that we
     // intend to nest tasks.
     MessageLoop::current()->SetNestableTasksAllowed(true);
     bool did_run = false;
-    ThreadTaskRunnerHandle::Get()->PostTask(
-        FROM_HERE, base::Bind(&EndTest, &did_run, hwnd));
+    MessageLoop::current()->PostTask(FROM_HERE,
+                                     base::Bind(&EndTest, &did_run, hwnd));
     // Run a nested windows-style message loop and verify that our task runs. If
     // it doesn't, then we'll loop here until the test times out.
     MSG msg;
@@ -972,7 +939,7 @@
 
   ASSERT_TRUE(PostMessage(message_hwnd, kSignalMsg, 0, 1));
 
-  RunLoop().Run();
+  loop.Run();
 
   ASSERT_TRUE(UnregisterClass(MAKEINTATOM(atom), instance));
 }
@@ -995,7 +962,7 @@
 
   scoped_refptr<Foo> foo(new Foo());
   original_runner->PostTask(FROM_HERE,
-                            Bind(&Foo::Test1ConstRef, foo, "a"));
+                            Bind(&Foo::Test1ConstRef, foo.get(), "a"));
   RunLoop().RunUntilIdle();
   EXPECT_EQ(1, foo->test_count());
 }
diff --git a/base/message_loop/message_pump.cc b/base/message_loop/message_pump.cc
index 3d85b9b..2f740f2 100644
--- a/base/message_loop/message_pump.cc
+++ b/base/message_loop/message_pump.cc
@@ -15,4 +15,11 @@
 void MessagePump::SetTimerSlack(TimerSlack) {
 }
 
+#if defined(OS_WIN)
+bool MessagePump::WasSignaled() {
+  NOTREACHED();
+  return false;
+}
+#endif
+
 }  // namespace base
diff --git a/base/message_loop/message_pump.h b/base/message_loop/message_pump.h
index c53be80..af8ed41 100644
--- a/base/message_loop/message_pump.h
+++ b/base/message_loop/message_pump.h
@@ -124,6 +124,15 @@
 
   // Sets the timer slack to the specified value.
   virtual void SetTimerSlack(TimerSlack timer_slack);
+
+#if defined(OS_WIN)
+  // TODO (stanisc): crbug.com/596190: Remove this after the signaling issue
+  // has been investigated.
+  // This should be used for diagnostic only. If message pump wake-up mechanism
+  // is based on auto-reset event this call would reset the event to unset
+  // state.
+  virtual bool WasSignaled();
+#endif
 };
 
 }  // namespace base
diff --git a/base/message_loop/message_pump_default.cc b/base/message_loop/message_pump_default.cc
index cf68270..3449aec 100644
--- a/base/message_loop/message_pump_default.cc
+++ b/base/message_loop/message_pump_default.cc
@@ -4,6 +4,8 @@
 
 #include "base/message_loop/message_pump_default.h"
 
+#include <algorithm>
+
 #include "base/logging.h"
 #include "base/threading/thread_restrictions.h"
 #include "build/build_config.h"
@@ -52,11 +54,38 @@
     if (delayed_work_time_.is_null()) {
       event_.Wait();
     } else {
-      // No need to handle already expired |delayed_work_time_| in any special
-      // way. When |delayed_work_time_| is in the past TimeWaitUntil returns
-      // promptly and |delayed_work_time_| will re-initialized on a next
-      // DoDelayedWork call which has to be called in order to get here again.
-      event_.TimedWaitUntil(delayed_work_time_);
+      TimeDelta delay = delayed_work_time_ - TimeTicks::Now();
+      if (delay > TimeDelta()) {
+#if defined(OS_WIN)
+        // TODO(stanisc): crbug.com/623223: Consider moving the OS_WIN specific
+        // logic into TimedWait implementation in waitable_event_win.cc.
+
+        // crbug.com/487724: on Windows, waiting for less than 1 ms results in
+        // returning from TimedWait promptly and spinning
+        // MessagePumpDefault::Run loop for up to 1 ms - until it is time to
+        // run a delayed task. |min_delay| is the minimum possible wait to
+        // to avoid the spinning.
+        constexpr TimeDelta min_delay = TimeDelta::FromMilliseconds(1);
+        do {
+          delay = std::max(delay, min_delay);
+          if (event_.TimedWait(delay))
+            break;
+
+          // TimedWait can time out earlier than the specified |delay| on
+          // Windows. It doesn't make sense to run the outer loop in that case
+          // because there isn't going to be any new work. It is less overhead
+          // to just go back to wait.
+          // In practice this inner wait loop might have up to 3 iterations.
+          delay = delayed_work_time_ - TimeTicks::Now();
+        } while (delay > TimeDelta());
+#else
+        event_.TimedWait(delay);
+#endif
+      } else {
+        // It looks like delayed_work_time_ indicates a time in the past, so we
+        // need to call DoDelayedWork now.
+        delayed_work_time_ = TimeTicks();
+      }
     }
     // Since event_ is auto-reset, we don't need to do anything special here
     // other than service each delegate method.
diff --git a/base/message_loop/message_pump_glib.h b/base/message_loop/message_pump_glib.h
index d79dba5..a2b54d8 100644
--- a/base/message_loop/message_pump_glib.h
+++ b/base/message_loop/message_pump_glib.h
@@ -69,7 +69,7 @@
   // Dispatch() will be called.
   int wakeup_pipe_read_;
   int wakeup_pipe_write_;
-  // Use a unique_ptr to avoid needing the definition of GPollFD in the header.
+  // Use a scoped_ptr to avoid needing the definition of GPollFD in the header.
   std::unique_ptr<GPollFD> wakeup_gpollfd_;
 
   DISALLOW_COPY_AND_ASSIGN(MessagePumpGlib);
diff --git a/base/message_loop/message_pump_glib_unittest.cc b/base/message_loop/message_pump_glib_unittest.cc
index a89ccb9..7ddd4f0 100644
--- a/base/message_loop/message_pump_glib_unittest.cc
+++ b/base/message_loop/message_pump_glib_unittest.cc
@@ -16,9 +16,7 @@
 #include "base/memory/ref_counted.h"
 #include "base/message_loop/message_loop.h"
 #include "base/run_loop.h"
-#include "base/single_thread_task_runner.h"
 #include "base/threading/thread.h"
-#include "base/threading/thread_task_runner_handle.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
@@ -154,7 +152,7 @@
 // Posts a task on the current message loop.
 void PostMessageLoopTask(const tracked_objects::Location& from_here,
                          const Closure& task) {
-  ThreadTaskRunnerHandle::Get()->PostTask(from_here, task);
+  MessageLoop::current()->PostTask(from_here, task);
 }
 
 // Test fixture.
@@ -195,7 +193,7 @@
   injector()->Reset();
   // Quit from an event
   injector()->AddEvent(0, MessageLoop::QuitWhenIdleClosure());
-  RunLoop().Run();
+  loop()->Run();
   EXPECT_EQ(1, injector()->processed_events());
 }
 
@@ -215,7 +213,7 @@
   injector()->AddEventAsTask(0, posted_task);
   injector()->AddEventAsTask(0, Bind(&DoNothing));
   injector()->AddEvent(0, MessageLoop::QuitWhenIdleClosure());
-  RunLoop().Run();
+  loop()->Run();
   EXPECT_EQ(4, injector()->processed_events());
 
   injector()->Reset();
@@ -226,7 +224,7 @@
   injector()->AddEventAsTask(0, posted_task);
   injector()->AddEventAsTask(10, Bind(&DoNothing));
   injector()->AddEvent(0, MessageLoop::QuitWhenIdleClosure());
-  RunLoop().Run();
+  loop()->Run();
   EXPECT_EQ(4, injector()->processed_events());
 }
 
@@ -235,15 +233,15 @@
   // Tests that we process tasks while waiting for new events.
   // The event queue is empty at first.
   for (int i = 0; i < 10; ++i) {
-    loop()->task_runner()->PostTask(FROM_HERE,
-                                    Bind(&IncrementInt, &task_count));
+    loop()->PostTask(FROM_HERE, Bind(&IncrementInt, &task_count));
   }
   // After all the previous tasks have executed, enqueue an event that will
   // quit.
-  loop()->task_runner()->PostTask(
-      FROM_HERE, Bind(&EventInjector::AddEvent, Unretained(injector()), 0,
-                      MessageLoop::QuitWhenIdleClosure()));
-  RunLoop().Run();
+  loop()->PostTask(
+      FROM_HERE,
+      Bind(&EventInjector::AddEvent, Unretained(injector()), 0,
+                 MessageLoop::QuitWhenIdleClosure()));
+  loop()->Run();
   ASSERT_EQ(10, task_count);
   EXPECT_EQ(1, injector()->processed_events());
 
@@ -251,19 +249,21 @@
   injector()->Reset();
   task_count = 0;
   for (int i = 0; i < 10; ++i) {
-    loop()->task_runner()->PostDelayedTask(FROM_HERE,
-                                           Bind(&IncrementInt, &task_count),
-                                           TimeDelta::FromMilliseconds(10 * i));
+    loop()->PostDelayedTask(
+        FROM_HERE,
+        Bind(&IncrementInt, &task_count),
+        TimeDelta::FromMilliseconds(10*i));
   }
   // After all the previous tasks have executed, enqueue an event that will
   // quit.
   // This relies on the fact that delayed tasks are executed in delay order.
   // That is verified in message_loop_unittest.cc.
-  loop()->task_runner()->PostDelayedTask(
-      FROM_HERE, Bind(&EventInjector::AddEvent, Unretained(injector()), 10,
-                      MessageLoop::QuitWhenIdleClosure()),
+  loop()->PostDelayedTask(
+      FROM_HERE,
+      Bind(&EventInjector::AddEvent, Unretained(injector()), 10,
+                 MessageLoop::QuitWhenIdleClosure()),
       TimeDelta::FromMilliseconds(150));
-  RunLoop().Run();
+  loop()->Run();
   ASSERT_EQ(10, task_count);
   EXPECT_EQ(1, injector()->processed_events());
 }
@@ -285,7 +285,7 @@
 
   // And then quit (relies on the condition tested by TestEventTaskInterleave).
   injector()->AddEvent(10, MessageLoop::QuitWhenIdleClosure());
-  RunLoop().Run();
+  loop()->Run();
 
   EXPECT_EQ(12, injector()->processed_events());
 }
@@ -310,7 +310,7 @@
     if (task_count_ == 0 && event_count_ == 0) {
         MessageLoop::current()->QuitWhenIdle();
     } else {
-      ThreadTaskRunnerHandle::Get()->PostTask(
+      MessageLoop::current()->PostTask(
           FROM_HERE, Bind(&ConcurrentHelper::FromTask, this));
     }
   }
@@ -356,17 +356,17 @@
   // Add 2 events to the queue to make sure it is always full (when we remove
   // the event before processing it).
   injector()->AddEventAsTask(
-      0, Bind(&ConcurrentHelper::FromEvent, helper));
+      0, Bind(&ConcurrentHelper::FromEvent, helper.get()));
   injector()->AddEventAsTask(
-      0, Bind(&ConcurrentHelper::FromEvent, helper));
+      0, Bind(&ConcurrentHelper::FromEvent, helper.get()));
 
   // Similarly post 2 tasks.
-  loop()->task_runner()->PostTask(
-      FROM_HERE, Bind(&ConcurrentHelper::FromTask, helper));
-  loop()->task_runner()->PostTask(
-      FROM_HERE, Bind(&ConcurrentHelper::FromTask, helper));
+  loop()->PostTask(
+      FROM_HERE, Bind(&ConcurrentHelper::FromTask, helper.get()));
+  loop()->PostTask(
+      FROM_HERE, Bind(&ConcurrentHelper::FromTask, helper.get()));
 
-  RunLoop().Run();
+  loop()->Run();
   EXPECT_EQ(0, helper->event_count());
   EXPECT_EQ(0, helper->task_count());
 }
@@ -381,8 +381,8 @@
   injector->AddEvent(0, MessageLoop::QuitWhenIdleClosure());
 
   // Post a couple of dummy tasks
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(&DoNothing));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, Bind(&DoNothing));
+  MessageLoop::current()->PostTask(FROM_HERE, Bind(&DoNothing));
+  MessageLoop::current()->PostTask(FROM_HERE, Bind(&DoNothing));
 
   // Drain the events
   while (g_main_context_pending(NULL)) {
@@ -394,9 +394,10 @@
 
 TEST_F(MessagePumpGLibTest, TestDrainingGLib) {
   // Tests that draining events using GLib works.
-  loop()->task_runner()->PostTask(
-      FROM_HERE, Bind(&AddEventsAndDrainGLib, Unretained(injector())));
-  RunLoop().Run();
+  loop()->PostTask(
+      FROM_HERE,
+      Bind(&AddEventsAndDrainGLib, Unretained(injector())));
+  loop()->Run();
 
   EXPECT_EQ(3, injector()->processed_events());
 }
@@ -446,19 +447,21 @@
   injector->AddDummyEvent(0);
   injector->AddDummyEvent(0);
   // Post a couple of dummy tasks
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&IncrementInt, &task_count));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&IncrementInt, &task_count));
+  MessageLoop::current()->PostTask(
+      FROM_HERE, Bind(&IncrementInt, &task_count));
+  MessageLoop::current()->PostTask(
+      FROM_HERE, Bind(&IncrementInt, &task_count));
   // Delayed events
   injector->AddDummyEvent(10);
   injector->AddDummyEvent(10);
   // Delayed work
-  ThreadTaskRunnerHandle::Get()->PostDelayedTask(
-      FROM_HERE, Bind(&IncrementInt, &task_count),
+  MessageLoop::current()->PostDelayedTask(
+      FROM_HERE,
+      Bind(&IncrementInt, &task_count),
       TimeDelta::FromMilliseconds(30));
-  ThreadTaskRunnerHandle::Get()->PostDelayedTask(
-      FROM_HERE, Bind(&GLibLoopRunner::Quit, runner),
+  MessageLoop::current()->PostDelayedTask(
+      FROM_HERE,
+      Bind(&GLibLoopRunner::Quit, runner.get()),
       TimeDelta::FromMilliseconds(40));
 
   // Run a nested, straight GLib message loop.
@@ -479,19 +482,21 @@
   injector->AddDummyEvent(0);
   injector->AddDummyEvent(0);
   // Post a couple of dummy tasks
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&IncrementInt, &task_count));
-  ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                          Bind(&IncrementInt, &task_count));
+  MessageLoop::current()->PostTask(
+      FROM_HERE, Bind(&IncrementInt, &task_count));
+  MessageLoop::current()->PostTask(
+      FROM_HERE, Bind(&IncrementInt, &task_count));
   // Delayed events
   injector->AddDummyEvent(10);
   injector->AddDummyEvent(10);
   // Delayed work
-  ThreadTaskRunnerHandle::Get()->PostDelayedTask(
-      FROM_HERE, Bind(&IncrementInt, &task_count),
+  MessageLoop::current()->PostDelayedTask(
+      FROM_HERE,
+      Bind(&IncrementInt, &task_count),
       TimeDelta::FromMilliseconds(30));
-  ThreadTaskRunnerHandle::Get()->PostDelayedTask(
-      FROM_HERE, Bind(&GLibLoopRunner::Quit, runner),
+  MessageLoop::current()->PostDelayedTask(
+      FROM_HERE,
+      Bind(&GLibLoopRunner::Quit, runner.get()),
       TimeDelta::FromMilliseconds(40));
 
   // Run a nested, straight Gtk message loop.
@@ -509,9 +514,10 @@
   // loop is not run by MessageLoop::Run() but by a straight GLib loop.
   // Note that in this case we don't make strong guarantees about niceness
   // between events and posted tasks.
-  loop()->task_runner()->PostTask(
-      FROM_HERE, Bind(&TestGLibLoopInternal, Unretained(injector())));
-  RunLoop().Run();
+  loop()->PostTask(
+      FROM_HERE,
+      Bind(&TestGLibLoopInternal, Unretained(injector())));
+  loop()->Run();
 }
 
 TEST_F(MessagePumpGLibTest, TestGtkLoop) {
@@ -519,9 +525,10 @@
   // loop is not run by MessageLoop::Run() but by a straight Gtk loop.
   // Note that in this case we don't make strong guarantees about niceness
   // between events and posted tasks.
-  loop()->task_runner()->PostTask(
-      FROM_HERE, Bind(&TestGtkLoopInternal, Unretained(injector())));
-  RunLoop().Run();
+  loop()->PostTask(
+      FROM_HERE,
+      Bind(&TestGtkLoopInternal, Unretained(injector())));
+  loop()->Run();
 }
 
 }  // namespace base
diff --git a/base/message_loop/message_pump_libevent.cc b/base/message_loop/message_pump_libevent.cc
index 86f5faa..5aa5567 100644
--- a/base/message_loop/message_pump_libevent.cc
+++ b/base/message_loop/message_pump_libevent.cc
@@ -44,13 +44,12 @@
 
 namespace base {
 
-MessagePumpLibevent::FileDescriptorWatcher::FileDescriptorWatcher(
-    const tracked_objects::Location& from_here)
+MessagePumpLibevent::FileDescriptorWatcher::FileDescriptorWatcher()
     : event_(NULL),
       pump_(NULL),
       watcher_(NULL),
-      was_destroyed_(NULL),
-      created_from_location_(from_here) {}
+      was_destroyed_(NULL) {
+}
 
 MessagePumpLibevent::FileDescriptorWatcher::~FileDescriptorWatcher() {
   if (event_) {
@@ -75,15 +74,15 @@
   return (rv == 0);
 }
 
-void MessagePumpLibevent::FileDescriptorWatcher::Init(event* e) {
+void MessagePumpLibevent::FileDescriptorWatcher::Init(event *e) {
   DCHECK(e);
   DCHECK(!event_);
 
   event_ = e;
 }
 
-event* MessagePumpLibevent::FileDescriptorWatcher::ReleaseEvent() {
-  struct event* e = event_;
+event *MessagePumpLibevent::FileDescriptorWatcher::ReleaseEvent() {
+  struct event *e = event_;
   event_ = NULL;
   return e;
 }
@@ -113,7 +112,7 @@
       wakeup_pipe_in_(-1),
       wakeup_pipe_out_(-1) {
   if (!Init())
-    NOTREACHED();
+     NOTREACHED();
 }
 
 MessagePumpLibevent::~MessagePumpLibevent() {
@@ -135,8 +134,8 @@
 bool MessagePumpLibevent::WatchFileDescriptor(int fd,
                                               bool persistent,
                                               int mode,
-                                              FileDescriptorWatcher* controller,
-                                              Watcher* delegate) {
+                                              FileDescriptorWatcher *controller,
+                                              Watcher *delegate) {
   DCHECK_GE(fd, 0);
   DCHECK(controller);
   DCHECK(delegate);
@@ -293,8 +292,16 @@
 
 bool MessagePumpLibevent::Init() {
   int fds[2];
-  if (!CreateLocalNonBlockingPipe(fds)) {
-    DPLOG(ERROR) << "pipe creation failed";
+  if (pipe(fds)) {
+    DLOG(ERROR) << "pipe() failed, errno: " << errno;
+    return false;
+  }
+  if (!SetNonBlocking(fds[0])) {
+    DLOG(ERROR) << "SetNonBlocking for pipe fd[0] failed, errno: " << errno;
+    return false;
+  }
+  if (!SetNonBlocking(fds[1])) {
+    DLOG(ERROR) << "SetNonBlocking for pipe fd[1] failed, errno: " << errno;
     return false;
   }
   wakeup_pipe_out_ = fds[0];
@@ -317,11 +324,8 @@
   FileDescriptorWatcher* controller =
       static_cast<FileDescriptorWatcher*>(context);
   DCHECK(controller);
-  TRACE_EVENT2("toplevel", "MessagePumpLibevent::OnLibeventNotification",
-               "src_file", controller->created_from_location().file_name(),
-               "src_func", controller->created_from_location().function_name());
-  TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION heap_profiler_scope(
-      controller->created_from_location().file_name());
+  TRACE_EVENT1("toplevel", "MessagePumpLibevent::OnLibeventNotification",
+               "fd", fd);
 
   MessagePumpLibevent* pump = controller->pump();
   pump->processed_io_events_ = true;
diff --git a/base/message_loop/message_pump_libevent.h b/base/message_loop/message_pump_libevent.h
index 1124560..76f882f 100644
--- a/base/message_loop/message_pump_libevent.h
+++ b/base/message_loop/message_pump_libevent.h
@@ -6,7 +6,6 @@
 #define BASE_MESSAGE_LOOP_MESSAGE_PUMP_LIBEVENT_H_
 
 #include "base/compiler_specific.h"
-#include "base/location.h"
 #include "base/macros.h"
 #include "base/message_loop/message_pump.h"
 #include "base/threading/thread_checker.h"
@@ -38,7 +37,7 @@
   // Object returned by WatchFileDescriptor to manage further watching.
   class FileDescriptorWatcher {
    public:
-    explicit FileDescriptorWatcher(const tracked_objects::Location& from_here);
+    FileDescriptorWatcher();
     ~FileDescriptorWatcher();  // Implicitly calls StopWatchingFileDescriptor.
 
     // NOTE: These methods aren't called StartWatching()/StopWatching() to
@@ -48,10 +47,6 @@
     // to do.
     bool StopWatchingFileDescriptor();
 
-    const tracked_objects::Location& created_from_location() {
-      return created_from_location_;
-    }
-
    private:
     friend class MessagePumpLibevent;
     friend class MessagePumpLibeventTest;
@@ -78,8 +73,6 @@
     // destructor.
     bool* was_destroyed_;
 
-    const tracked_objects::Location created_from_location_;
-
     DISALLOW_COPY_AND_ASSIGN(FileDescriptorWatcher);
   };
 
@@ -107,8 +100,8 @@
   bool WatchFileDescriptor(int fd,
                            bool persistent,
                            int mode,
-                           FileDescriptorWatcher* controller,
-                           Watcher* delegate);
+                           FileDescriptorWatcher *controller,
+                           Watcher *delegate);
 
   // MessagePump methods:
   void Run(Delegate* delegate) override;
@@ -119,11 +112,15 @@
  private:
   friend class MessagePumpLibeventTest;
 
+  void WillProcessIOEvent();
+  void DidProcessIOEvent();
+
   // Risky part of constructor.  Returns true on success.
   bool Init();
 
   // Called by libevent to tell us a registered FD can be read/written to.
-  static void OnLibeventNotification(int fd, short flags, void* context);
+  static void OnLibeventNotification(int fd, short flags,
+                                     void* context);
 
   // Unix pipe used to implement ScheduleWork()
   // ... callback; called by libevent inside Run() when pipe is ready to read
diff --git a/base/message_loop/message_pump_mac.h b/base/message_loop/message_pump_mac.h
index f0766eb..14b8377 100644
--- a/base/message_loop/message_pump_mac.h
+++ b/base/message_loop/message_pump_mac.h
@@ -78,11 +78,9 @@
 typedef NSAutoreleasePool AutoreleasePoolType;
 #endif  // !defined(__OBJC__) || __has_feature(objc_arc)
 
-class BASE_EXPORT MessagePumpCFRunLoopBase : public MessagePump {
+class MessagePumpCFRunLoopBase : public MessagePump {
   // Needs access to CreateAutoreleasePool.
   friend class MessagePumpScopedAutoreleasePool;
-  friend class TestMessagePumpCFRunLoopBase;
-
  public:
   MessagePumpCFRunLoopBase();
   ~MessagePumpCFRunLoopBase() override;
@@ -115,21 +113,6 @@
   virtual AutoreleasePoolType* CreateAutoreleasePool();
 
  private:
-  // Marking timers as invalid at the right time helps significantly reduce
-  // power use (see the comment in RunDelayedWorkTimer()), however there is no
-  // public API for doing so. CFRuntime.h states that CFRuntimeBase, upon which
-  // the above timer invalidation functions are based, can change from release
-  // to release and should not be accessed directly (this struct last changed at
-  // least in 2008 in CF-476).
-  //
-  // This function uses private API to modify a test timer's valid state and
-  // uses public API to confirm that the private API changed the right bit.
-  static bool CanInvalidateCFRunLoopTimers();
-
-  // Sets a Core Foundation object's "invalid" bit to |valid|. Based on code
-  // from CFRunLoop.c.
-  static void ChromeCFRunLoopTimerSetValid(CFRunLoopTimerRef timer, bool valid);
-
   // Timer callback scheduled by ScheduleDelayedWork.  This does not do any
   // work, but it signals work_source_ so that delayed work can be performed
   // within the appropriate priority constraints.
diff --git a/base/message_loop/message_pump_mac.mm b/base/message_loop/message_pump_mac.mm
index a3accee..95d1c5f 100644
--- a/base/message_loop/message_pump_mac.mm
+++ b/base/message_loop/message_pump_mac.mm
@@ -4,18 +4,16 @@
 
 #import "base/message_loop/message_pump_mac.h"
 
+#include <dlfcn.h>
 #import <Foundation/Foundation.h>
 
 #include <limits>
 
 #include "base/logging.h"
-#include "base/mac/call_with_eh_frame.h"
 #include "base/mac/scoped_cftyperef.h"
-#include "base/macros.h"
 #include "base/message_loop/timer_slack.h"
 #include "base/run_loop.h"
 #include "base/time/time.h"
-#include "build/build_config.h"
 
 #if !defined(OS_IOS)
 #import <AppKit/AppKit.h>
@@ -69,48 +67,34 @@
 // Set to true if MessagePumpMac::Create() is called before NSApp is
 // initialized.  Only accessed from the main thread.
 bool g_not_using_cr_app = false;
-
-// Various CoreFoundation definitions.
-typedef struct __CFRuntimeBase {
-  uintptr_t _cfisa;
-  uint8_t _cfinfo[4];
-#if __LP64__
-  uint32_t _rc;
-#endif
-} CFRuntimeBase;
-
-#if defined(__BIG_ENDIAN__)
-#define __CF_BIG_ENDIAN__ 1
-#define __CF_LITTLE_ENDIAN__ 0
 #endif
 
-#if defined(__LITTLE_ENDIAN__)
-#define __CF_LITTLE_ENDIAN__ 1
-#define __CF_BIG_ENDIAN__ 0
-#endif
+// Call through to CFRunLoopTimerSetTolerance(), which is only available on
+// OS X 10.9.
+void SetTimerTolerance(CFRunLoopTimerRef timer, CFTimeInterval tolerance) {
+  typedef void (*CFRunLoopTimerSetTolerancePtr)(CFRunLoopTimerRef timer,
+      CFTimeInterval tolerance);
 
-#define CF_INFO_BITS (!!(__CF_BIG_ENDIAN__)*3)
+  static CFRunLoopTimerSetTolerancePtr settimertolerance_function_ptr;
 
-#define __CFBitfieldMask(N1, N2) \
-  ((((UInt32)~0UL) << (31UL - (N1) + (N2))) >> (31UL - N1))
-#define __CFBitfieldSetValue(V, N1, N2, X)   \
-  ((V) = ((V) & ~__CFBitfieldMask(N1, N2)) | \
-         (((X) << (N2)) & __CFBitfieldMask(N1, N2)))
+  static dispatch_once_t get_timer_tolerance_function_ptr_once;
+  dispatch_once(&get_timer_tolerance_function_ptr_once, ^{
+      NSBundle* bundle =[NSBundle
+        bundleWithPath:@"/System/Library/Frameworks/CoreFoundation.framework"];
+      const char* path = [[bundle executablePath] fileSystemRepresentation];
+      CHECK(path);
+      void* library_handle = dlopen(path, RTLD_LAZY | RTLD_LOCAL);
+      CHECK(library_handle) << dlerror();
+      settimertolerance_function_ptr =
+          reinterpret_cast<CFRunLoopTimerSetTolerancePtr>(
+              dlsym(library_handle, "CFRunLoopTimerSetTolerance"));
 
-// Marking timers as invalid at the right time by flipping their valid bit helps
-// significantly reduce power use (see the explanation in
-// RunDelayedWorkTimer()), however there is no public API for doing so.
-// CFRuntime.h states that CFRuntimeBase can change from release to release
-// and should not be accessed directly. The last known change of this struct
-// occurred in 2008 in CF-476 / 10.5; unfortunately the source for 10.11 and
-// 10.12 is not available for inspection at this time.
-// CanInvalidateCFRunLoopTimers() will at least prevent us from invalidating
-// timers if this function starts flipping the wrong bit on a future OS release.
-void __ChromeCFRunLoopTimerSetValid(CFRunLoopTimerRef timer, bool valid) {
-  __CFBitfieldSetValue(((CFRuntimeBase*)timer)->_cfinfo[CF_INFO_BITS], 3, 3,
-                       valid);
+      dlclose(library_handle);
+  });
+
+  if (settimertolerance_function_ptr)
+    settimertolerance_function_ptr(timer, tolerance);
 }
-#endif  // !defined(OS_IOS)
 
 }  // namespace
 
@@ -135,47 +119,6 @@
   DISALLOW_COPY_AND_ASSIGN(MessagePumpScopedAutoreleasePool);
 };
 
-#if !defined(OS_IOS)
-// This function uses private API to modify a test timer's valid state and
-// uses public API to confirm that the private API changed the correct bit.
-// static
-bool MessagePumpCFRunLoopBase::CanInvalidateCFRunLoopTimers() {
-  CFRunLoopTimerContext timer_context = CFRunLoopTimerContext();
-  timer_context.info = nullptr;
-  ScopedCFTypeRef<CFRunLoopTimerRef> test_timer(
-      CFRunLoopTimerCreate(NULL,                // allocator
-                           kCFTimeIntervalMax,  // fire time
-                           kCFTimeIntervalMax,  // interval
-                           0,                   // flags
-                           0,                   // priority
-                           nullptr, &timer_context));
-  // Should be valid from the start.
-  if (!CFRunLoopTimerIsValid(test_timer)) {
-    return false;
-  }
-  // Confirm that the private API can mark the timer invalid.
-  __ChromeCFRunLoopTimerSetValid(test_timer, false);
-  if (CFRunLoopTimerIsValid(test_timer)) {
-    return false;
-  }
-  // Confirm that the private API can mark the timer valid.
-  __ChromeCFRunLoopTimerSetValid(test_timer, true);
-  return CFRunLoopTimerIsValid(test_timer);
-}
-#endif  // !defined(OS_IOS)
-
-// static
-void MessagePumpCFRunLoopBase::ChromeCFRunLoopTimerSetValid(
-    CFRunLoopTimerRef timer,
-    bool valid) {
-#if !defined(OS_IOS)
-  static bool can_invalidate_timers = CanInvalidateCFRunLoopTimers();
-  if (can_invalidate_timers) {
-    __ChromeCFRunLoopTimerSetValid(timer, valid);
-  }
-#endif  // !defined(OS_IOS)
-}
-
 // Must be called on the run loop thread.
 MessagePumpCFRunLoopBase::MessagePumpCFRunLoopBase()
     : delegate_(NULL),
@@ -325,22 +268,11 @@
     const TimeTicks& delayed_work_time) {
   TimeDelta delta = delayed_work_time - TimeTicks::Now();
   delayed_work_fire_time_ = CFAbsoluteTimeGetCurrent() + delta.InSecondsF();
-
-  // Flip the timer's validation bit just before setting the new fire time. Do
-  // this now because CFRunLoopTimerSetNextFireDate() likely checks the validity
-  // of a timer before proceeding to set its fire date. Making the timer valid
-  // now won't have any side effects (such as a premature firing of the timer)
-  // because we're only flipping a bit.
-  //
-  // Please see the comment in RunDelayedWorkTimer() for more info on the whys
-  // of invalidation.
-  ChromeCFRunLoopTimerSetValid(delayed_work_timer_, true);
-
   CFRunLoopTimerSetNextFireDate(delayed_work_timer_, delayed_work_fire_time_);
   if (timer_slack_ == TIMER_SLACK_MAXIMUM) {
-    CFRunLoopTimerSetTolerance(delayed_work_timer_, delta.InSecondsF() * 0.5);
+    SetTimerTolerance(delayed_work_timer_, delta.InSecondsF() * 0.5);
   } else {
-    CFRunLoopTimerSetTolerance(delayed_work_timer_, 0);
+    SetTimerTolerance(delayed_work_timer_, 0);
   }
 }
 
@@ -358,31 +290,6 @@
   // The timer won't fire again until it's reset.
   self->delayed_work_fire_time_ = kCFTimeIntervalMax;
 
-  // The message pump's timer needs to fire at changing and unpredictable
-  // intervals. Creating a new timer for each firing time is very expensive, so
-  // the message pump instead uses a repeating timer with a very large repeat
-  // rate. After each firing of the timer, the run loop sets the timer's next
-  // firing time to the distant future, essentially pausing the timer until the
-  // pump sets the next firing time. This is the solution recommended by Apple.
-  //
-  // It turns out, however, that scheduling timers is also quite expensive, and
-  // that every one of the message pump's timer firings incurs two
-  // reschedulings. The first rescheduling occurs in ScheduleDelayedWork(),
-  // which sets the desired next firing time. The second comes after exiting
-  // this method (the timer's callback method), when the run loop sets the
-  // timer's next firing time to far in the future.
-  //
-  // The code in __CFRunLoopDoTimer() inside CFRunLoop.c calls the timer's
-  // callback, confirms that the timer is valid, and then sets its future
-  // firing time based on its repeat frequency. Flipping the valid bit here
-  // causes the __CFRunLoopDoTimer() to skip setting the future firing time.
-  // Note that there's public API to invalidate a timer but it goes beyond
-  // flipping the valid bit, making the timer unusable in the future.
-  //
-  // ScheduleDelayedWork() flips the valid bit back just before setting the
-  // timer's new firing time.
-  ChromeCFRunLoopTimerSetValid(self->delayed_work_timer_, false);
-
   // CFRunLoopTimers fire outside of the priority scheme for CFRunLoopSources.
   // In order to establish the proper priority in which work and delayed work
   // are processed one for one, the timer used to schedule delayed work must
@@ -394,9 +301,7 @@
 // static
 void MessagePumpCFRunLoopBase::RunWorkSource(void* info) {
   MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
-  base::mac::CallWithEHFrame(^{
-    self->RunWork();
-  });
+  self->RunWork();
 }
 
 // Called by MessagePumpCFRunLoopBase::RunWorkSource.
diff --git a/base/metrics/field_trial.cc b/base/metrics/field_trial.cc
index 6b38d55..600b94e 100644
--- a/base/metrics/field_trial.cc
+++ b/base/metrics/field_trial.cc
@@ -5,26 +5,15 @@
 #include "base/metrics/field_trial.h"
 
 #include <algorithm>
-#include <utility>
 
-#include "base/base_switches.h"
 #include "base/build_time.h"
-#include "base/command_line.h"
-#include "base/debug/activity_tracker.h"
 #include "base/logging.h"
-#include "base/metrics/field_trial_param_associator.h"
-#include "base/process/memory.h"
 #include "base/rand_util.h"
 #include "base/strings/string_number_conversions.h"
 #include "base/strings/string_util.h"
 #include "base/strings/stringprintf.h"
 #include "base/strings/utf_string_conversions.h"
 
-// On POSIX, the fd is shared using the mapping in GlobalDescriptors.
-#if defined(OS_POSIX) && !defined(OS_NACL)
-#include "base/posix/global_descriptors.h"
-#endif
-
 namespace base {
 
 namespace {
@@ -38,62 +27,6 @@
 // command line which forces its activation.
 const char kActivationMarker = '*';
 
-// Use shared memory to communicate field trial (experiment) state. Set to false
-// for now while the implementation is fleshed out (e.g. data format, single
-// shared memory segment). See https://codereview.chromium.org/2365273004/ and
-// crbug.com/653874
-// The browser is the only process that has write access to the shared memory.
-// This is safe from race conditions because MakeIterable is a release operation
-// and GetNextOfType is an acquire operation, so memory writes before
-// MakeIterable happen before memory reads after GetNextOfType.
-const bool kUseSharedMemoryForFieldTrials = true;
-
-// Constants for the field trial allocator.
-const char kAllocatorName[] = "FieldTrialAllocator";
-
-// We allocate 128 KiB to hold all the field trial data. This should be enough,
-// as most people use 3 - 25 KiB for field trials (as of 11/25/2016).
-// This also doesn't allocate all 128 KiB at once -- the pages only get mapped
-// to physical memory when they are touched. If the size of the allocated field
-// trials does get larger than 128 KiB, then we will drop some field trials in
-// child processes, leading to an inconsistent view between browser and child
-// processes and possibly causing crashes (see crbug.com/661617).
-const size_t kFieldTrialAllocationSize = 128 << 10;  // 128 KiB
-
-// Writes out string1 and then string2 to pickle.
-bool WriteStringPair(Pickle* pickle,
-                     const StringPiece& string1,
-                     const StringPiece& string2) {
-  if (!pickle->WriteString(string1))
-    return false;
-  if (!pickle->WriteString(string2))
-    return false;
-  return true;
-}
-
-// Writes out the field trial's contents (via trial_state) to the pickle. The
-// format of the pickle looks like:
-// TrialName, GroupName, ParamKey1, ParamValue1, ParamKey2, ParamValue2, ...
-// If there are no parameters, then it just ends at GroupName.
-bool PickleFieldTrial(const FieldTrial::State& trial_state, Pickle* pickle) {
-  if (!WriteStringPair(pickle, *trial_state.trial_name,
-                       *trial_state.group_name)) {
-    return false;
-  }
-
-  // Get field trial params.
-  std::map<std::string, std::string> params;
-  FieldTrialParamAssociator::GetInstance()->GetFieldTrialParamsWithoutFallback(
-      *trial_state.trial_name, *trial_state.group_name, &params);
-
-  // Write params to pickle.
-  for (const auto& param : params) {
-    if (!WriteStringPair(pickle, param.first, param.second))
-      return false;
-  }
-  return true;
-}
-
 // Created a time value based on |year|, |month| and |day_of_month| parameters.
 Time CreateTimeFromParams(int year, int month, int day_of_month) {
   DCHECK_GT(year, 1970);
@@ -140,18 +73,11 @@
   return std::min(result, divisor - 1);
 }
 
-// Separate type from FieldTrial::State so that it can use StringPieces.
-struct FieldTrialStringEntry {
-  StringPiece trial_name;
-  StringPiece group_name;
-  bool activated = false;
-};
-
 // Parses the --force-fieldtrials string |trials_string| into |entries|.
 // Returns true if the string was parsed correctly. On failure, the |entries|
 // array may end up being partially filled.
 bool ParseFieldTrialsString(const std::string& trials_string,
-                            std::vector<FieldTrialStringEntry>* entries) {
+                            std::vector<FieldTrial::State>* entries) {
   const StringPiece trials_string_piece(trials_string);
 
   size_t next_item = 0;
@@ -166,7 +92,7 @@
     if (group_name_end == trials_string.npos)
       group_name_end = trials_string.length();
 
-    FieldTrialStringEntry entry;
+    FieldTrial::State entry;
     // Verify if the trial should be activated or not.
     if (trials_string[next_item] == kActivationMarker) {
       // Name cannot be only the indicator.
@@ -181,61 +107,11 @@
         trials_string_piece.substr(name_end + 1, group_name_end - name_end - 1);
     next_item = group_name_end + 1;
 
-    entries->push_back(std::move(entry));
+    entries->push_back(entry);
   }
   return true;
 }
 
-void AddFeatureAndFieldTrialFlags(const char* enable_features_switch,
-                                  const char* disable_features_switch,
-                                  CommandLine* cmd_line) {
-  std::string enabled_features;
-  std::string disabled_features;
-  FeatureList::GetInstance()->GetFeatureOverrides(&enabled_features,
-                                                  &disabled_features);
-
-  if (!enabled_features.empty())
-    cmd_line->AppendSwitchASCII(enable_features_switch, enabled_features);
-  if (!disabled_features.empty())
-    cmd_line->AppendSwitchASCII(disable_features_switch, disabled_features);
-
-  std::string field_trial_states;
-  FieldTrialList::AllStatesToString(&field_trial_states);
-  if (!field_trial_states.empty()) {
-    cmd_line->AppendSwitchASCII(switches::kForceFieldTrials,
-                                field_trial_states);
-  }
-}
-
-#if defined(OS_WIN)
-HANDLE CreateReadOnlyHandle(FieldTrialList::FieldTrialAllocator* allocator) {
-  HANDLE src = allocator->shared_memory()->handle().GetHandle();
-  ProcessHandle process = GetCurrentProcess();
-  DWORD access = SECTION_MAP_READ | SECTION_QUERY;
-  HANDLE dst;
-  if (!::DuplicateHandle(process, src, process, &dst, access, true, 0))
-    return kInvalidPlatformFile;
-  return dst;
-}
-#endif
-
-#if defined(OS_POSIX) && !defined(OS_NACL)
-int CreateReadOnlyHandle(FieldTrialList::FieldTrialAllocator* allocator) {
-  SharedMemoryHandle new_handle;
-  allocator->shared_memory()->ShareReadOnlyToProcess(GetCurrentProcessHandle(),
-                                                     &new_handle);
-  return SharedMemory::GetFdFromSharedMemoryHandle(new_handle);
-}
-#endif
-
-void OnOutOfMemory(size_t size) {
-#if defined(OS_NACL)
-  NOTREACHED();
-#else
-  TerminateBecauseOutOfMemory(size);
-#endif
-}
-
 }  // namespace
 
 // statics
@@ -251,55 +127,12 @@
 FieldTrial::EntropyProvider::~EntropyProvider() {
 }
 
-FieldTrial::State::State() {}
+FieldTrial::State::State() : activated(false) {}
 
 FieldTrial::State::State(const State& other) = default;
 
 FieldTrial::State::~State() {}
 
-bool FieldTrial::FieldTrialEntry::GetTrialAndGroupName(
-    StringPiece* trial_name,
-    StringPiece* group_name) const {
-  PickleIterator iter = GetPickleIterator();
-  return ReadStringPair(&iter, trial_name, group_name);
-}
-
-bool FieldTrial::FieldTrialEntry::GetParams(
-    std::map<std::string, std::string>* params) const {
-  PickleIterator iter = GetPickleIterator();
-  StringPiece tmp;
-  // Skip reading trial and group name.
-  if (!ReadStringPair(&iter, &tmp, &tmp))
-    return false;
-
-  while (true) {
-    StringPiece key;
-    StringPiece value;
-    if (!ReadStringPair(&iter, &key, &value))
-      return key.empty();  // Non-empty is bad: got one of a pair.
-    (*params)[key.as_string()] = value.as_string();
-  }
-}
-
-PickleIterator FieldTrial::FieldTrialEntry::GetPickleIterator() const {
-  const char* src =
-      reinterpret_cast<const char*>(this) + sizeof(FieldTrialEntry);
-
-  Pickle pickle(src, pickle_size);
-  return PickleIterator(pickle);
-}
-
-bool FieldTrial::FieldTrialEntry::ReadStringPair(
-    PickleIterator* iter,
-    StringPiece* trial_name,
-    StringPiece* group_name) const {
-  if (!iter->ReadStringPiece(trial_name))
-    return false;
-  if (!iter->ReadStringPiece(group_name))
-    return false;
-  return true;
-}
-
 void FieldTrial::Disable() {
   DCHECK(!group_reported_);
   enable_field_trial_ = false;
@@ -410,8 +243,7 @@
       enable_field_trial_(true),
       forced_(false),
       group_reported_(false),
-      trial_registered_(false),
-      ref_(FieldTrialList::FieldTrialAllocator::kReferenceNull) {
+      trial_registered_(false) {
   DCHECK_GT(total_probability, 0);
   DCHECK(!trial_name_.empty());
   DCHECK(!default_group_name_.empty());
@@ -435,10 +267,6 @@
 }
 
 void FieldTrial::FinalizeGroupChoice() {
-  FinalizeGroupChoiceImpl(false);
-}
-
-void FieldTrial::FinalizeGroupChoiceImpl(bool is_locked) {
   if (group_ != kNotFinalized)
     return;
   accumulated_group_probability_ = divisor_;
@@ -446,10 +274,6 @@
   // finalized.
   DCHECK(!forced_);
   SetGroupChoice(default_group_name_, kDefaultGroupNumber);
-
-  // Add the field trial to shared memory.
-  if (kUseSharedMemoryForFieldTrials && trial_registered_)
-    FieldTrialList::OnGroupFinalized(is_locked, this);
 }
 
 bool FieldTrial::GetActiveGroup(ActiveGroup* active_group) const {
@@ -465,18 +289,8 @@
   if (!enable_field_trial_)
     return false;
   FinalizeGroupChoice();
-  field_trial_state->trial_name = &trial_name_;
-  field_trial_state->group_name = &group_name_;
-  field_trial_state->activated = group_reported_;
-  return true;
-}
-
-bool FieldTrial::GetStateWhileLocked(State* field_trial_state) {
-  if (!enable_field_trial_)
-    return false;
-  FinalizeGroupChoiceImpl(true);
-  field_trial_state->trial_name = &trial_name_;
-  field_trial_state->group_name = &group_name_;
+  field_trial_state->trial_name = trial_name_;
+  field_trial_state->group_name = group_name_;
   field_trial_state->activated = group_reported_;
   return true;
 }
@@ -494,8 +308,8 @@
 }
 
 FieldTrialList::FieldTrialList(
-    std::unique_ptr<const FieldTrial::EntropyProvider> entropy_provider)
-    : entropy_provider_(std::move(entropy_provider)),
+    const FieldTrial::EntropyProvider* entropy_provider)
+    : entropy_provider_(entropy_provider),
       observer_list_(new ObserverListThreadSafe<FieldTrialList::Observer>(
           ObserverListBase<FieldTrialList::Observer>::NOTIFY_EXISTING_ONLY)) {
   DCHECK(!global_);
@@ -661,17 +475,17 @@
 
   for (const auto& registered : global_->registered_) {
     FieldTrial::State trial;
-    if (!registered.second->GetStateWhileLocked(&trial))
+    if (!registered.second->GetState(&trial))
       continue;
     DCHECK_EQ(std::string::npos,
-              trial.trial_name->find(kPersistentStringSeparator));
+              trial.trial_name.find(kPersistentStringSeparator));
     DCHECK_EQ(std::string::npos,
-              trial.group_name->find(kPersistentStringSeparator));
+              trial.group_name.find(kPersistentStringSeparator));
     if (trial.activated)
       output->append(1, kActivationMarker);
-    output->append(*trial.trial_name);
+    trial.trial_name.AppendToString(output);
     output->append(1, kPersistentStringSeparator);
-    output->append(*trial.group_name);
+    trial.group_name.AppendToString(output);
     output->append(1, kPersistentStringSeparator);
   }
 }
@@ -696,7 +510,7 @@
 void FieldTrialList::GetActiveFieldTrialGroupsFromString(
     const std::string& trials_string,
     FieldTrial::ActiveGroups* active_groups) {
-  std::vector<FieldTrialStringEntry> entries;
+  std::vector<FieldTrial::State> entries;
   if (!ParseFieldTrialsString(trials_string, &entries))
     return;
 
@@ -711,36 +525,6 @@
 }
 
 // static
-void FieldTrialList::GetInitiallyActiveFieldTrials(
-    const base::CommandLine& command_line,
-    FieldTrial::ActiveGroups* active_groups) {
-  DCHECK(global_->create_trials_from_command_line_called_);
-
-  if (!global_->field_trial_allocator_) {
-    GetActiveFieldTrialGroupsFromString(
-        command_line.GetSwitchValueASCII(switches::kForceFieldTrials),
-        active_groups);
-    return;
-  }
-
-  FieldTrialAllocator* allocator = global_->field_trial_allocator_.get();
-  FieldTrialAllocator::Iterator mem_iter(allocator);
-  const FieldTrial::FieldTrialEntry* entry;
-  while ((entry = mem_iter.GetNextOfObject<FieldTrial::FieldTrialEntry>()) !=
-         nullptr) {
-    StringPiece trial_name;
-    StringPiece group_name;
-    if (subtle::NoBarrier_Load(&entry->activated) &&
-        entry->GetTrialAndGroupName(&trial_name, &group_name)) {
-      FieldTrial::ActiveGroup group;
-      group.trial_name = trial_name.as_string();
-      group.group_name = group_name.as_string();
-      active_groups->push_back(group);
-    }
-  }
-}
-
-// static
 bool FieldTrialList::CreateTrialsFromString(
     const std::string& trials_string,
     const std::set<std::string>& ignored_trial_names) {
@@ -748,7 +532,7 @@
   if (trials_string.empty() || !global_)
     return true;
 
-  std::vector<FieldTrialStringEntry> entries;
+  std::vector<FieldTrial::State> entries;
   if (!ParseFieldTrialsString(trials_string, &entries))
     return false;
 
@@ -773,145 +557,6 @@
 }
 
 // static
-void FieldTrialList::CreateTrialsFromCommandLine(
-    const CommandLine& cmd_line,
-    const char* field_trial_handle_switch,
-    int fd_key) {
-  global_->create_trials_from_command_line_called_ = true;
-
-#if defined(OS_WIN)
-  if (cmd_line.HasSwitch(field_trial_handle_switch)) {
-    std::string handle_switch =
-        cmd_line.GetSwitchValueASCII(field_trial_handle_switch);
-    bool result = CreateTrialsFromHandleSwitch(handle_switch);
-    DCHECK(result);
-  }
-#endif
-
-#if defined(OS_POSIX) && !defined(OS_NACL)
-  // On POSIX, we check if the handle is valid by seeing if the browser process
-  // sent over the switch (we don't care about the value). Invalid handles
-  // occur in some browser tests which don't initialize the allocator.
-  if (cmd_line.HasSwitch(field_trial_handle_switch)) {
-    bool result = CreateTrialsFromDescriptor(fd_key);
-    DCHECK(result);
-  }
-#endif
-
-  if (cmd_line.HasSwitch(switches::kForceFieldTrials)) {
-    bool result = FieldTrialList::CreateTrialsFromString(
-        cmd_line.GetSwitchValueASCII(switches::kForceFieldTrials),
-        std::set<std::string>());
-    DCHECK(result);
-  }
-}
-
-// static
-void FieldTrialList::CreateFeaturesFromCommandLine(
-    const base::CommandLine& command_line,
-    const char* enable_features_switch,
-    const char* disable_features_switch,
-    FeatureList* feature_list) {
-  // Fallback to command line if not using shared memory.
-  if (!kUseSharedMemoryForFieldTrials ||
-      !global_->field_trial_allocator_.get()) {
-    return feature_list->InitializeFromCommandLine(
-        command_line.GetSwitchValueASCII(enable_features_switch),
-        command_line.GetSwitchValueASCII(disable_features_switch));
-  }
-
-  feature_list->InitializeFromSharedMemory(
-      global_->field_trial_allocator_.get());
-}
-
-#if defined(OS_WIN)
-// static
-void FieldTrialList::AppendFieldTrialHandleIfNeeded(
-    HandlesToInheritVector* handles) {
-  if (!global_)
-    return;
-  if (kUseSharedMemoryForFieldTrials) {
-    InstantiateFieldTrialAllocatorIfNeeded();
-    if (global_->readonly_allocator_handle_)
-      handles->push_back(global_->readonly_allocator_handle_);
-  }
-}
-#endif
-
-#if defined(OS_POSIX) && !defined(OS_NACL)
-// static
-int FieldTrialList::GetFieldTrialHandle() {
-  if (global_ && kUseSharedMemoryForFieldTrials) {
-    InstantiateFieldTrialAllocatorIfNeeded();
-    // We check for an invalid handle where this gets called.
-    return global_->readonly_allocator_handle_;
-  }
-  return kInvalidPlatformFile;
-}
-#endif
-
-// static
-void FieldTrialList::CopyFieldTrialStateToFlags(
-    const char* field_trial_handle_switch,
-    const char* enable_features_switch,
-    const char* disable_features_switch,
-    CommandLine* cmd_line) {
-  // TODO(lawrencewu): Ideally, having the global would be guaranteed. However,
-  // content browser tests currently don't create a FieldTrialList because they
-  // don't run ChromeBrowserMainParts code where it's done for Chrome.
-  // Some tests depend on the enable and disable features flag switch, though,
-  // so we can still add those even though AllStatesToString() will be a no-op.
-  if (!global_) {
-    AddFeatureAndFieldTrialFlags(enable_features_switch,
-                                 disable_features_switch, cmd_line);
-    return;
-  }
-
-  // Use shared memory to pass the state if the feature is enabled, otherwise
-  // fallback to passing it via the command line as a string.
-  if (kUseSharedMemoryForFieldTrials) {
-    InstantiateFieldTrialAllocatorIfNeeded();
-    // If the readonly handle didn't get duplicated properly, then fallback to
-    // original behavior.
-    if (global_->readonly_allocator_handle_ == kInvalidPlatformFile) {
-      AddFeatureAndFieldTrialFlags(enable_features_switch,
-                                   disable_features_switch, cmd_line);
-      return;
-    }
-
-    global_->field_trial_allocator_->UpdateTrackingHistograms();
-
-#if defined(OS_WIN)
-    // We need to pass a named anonymous handle to shared memory over the
-    // command line on Windows, since the child doesn't know which of the
-    // handles it inherited it should open.
-    // PlatformFile is typedef'd to HANDLE which is typedef'd to void *. We
-    // basically cast the handle into an int (uintptr_t, to be exact), stringify
-    // the int, and pass it as a command-line flag. The child process will do
-    // the reverse conversions to retrieve the handle. See
-    // http://stackoverflow.com/a/153077
-    auto uintptr_handle =
-        reinterpret_cast<uintptr_t>(global_->readonly_allocator_handle_);
-    std::string field_trial_handle = std::to_string(uintptr_handle);
-    cmd_line->AppendSwitchASCII(field_trial_handle_switch, field_trial_handle);
-#elif defined(OS_POSIX)
-    // On POSIX, we dup the fd into a fixed fd kFieldTrialDescriptor, so we
-    // don't have to pass over the handle (it's not even the right handle
-    // anyways). But some browser tests don't create the allocator, so we need
-    // to be able to distinguish valid and invalid handles. We do that by just
-    // checking that the flag is set with a dummy value.
-    cmd_line->AppendSwitchASCII(field_trial_handle_switch, "1");
-#else
-#error Unsupported OS
-#endif
-    return;
-  }
-
-  AddFeatureAndFieldTrialFlags(enable_features_switch, disable_features_switch,
-                               cmd_line);
-}
-
-// static
 FieldTrial* FieldTrialList::CreateFieldTrial(
     const std::string& name,
     const std::string& group_name) {
@@ -952,20 +597,6 @@
 }
 
 // static
-void FieldTrialList::OnGroupFinalized(bool is_locked, FieldTrial* field_trial) {
-  if (!global_)
-    return;
-  if (is_locked) {
-    AddToAllocatorWhileLocked(global_->field_trial_allocator_.get(),
-                              field_trial);
-  } else {
-    AutoLock auto_lock(global_->lock_);
-    AddToAllocatorWhileLocked(global_->field_trial_allocator_.get(),
-                              field_trial);
-  }
-}
-
-// static
 void FieldTrialList::NotifyFieldTrialGroupSelection(FieldTrial* field_trial) {
   if (!global_)
     return;
@@ -975,22 +606,10 @@
     if (field_trial->group_reported_)
       return;
     field_trial->group_reported_ = true;
-
-    if (!field_trial->enable_field_trial_)
-      return;
-
-    if (kUseSharedMemoryForFieldTrials)
-      ActivateFieldTrialEntryWhileLocked(field_trial);
   }
 
-  // Recording for stability debugging has to be done inline as a task posted
-  // to an observer may not get executed before a crash.
-  base::debug::GlobalActivityTracker* tracker =
-      base::debug::GlobalActivityTracker::Get();
-  if (tracker) {
-    tracker->RecordFieldTrial(field_trial->trial_name(),
-                              field_trial->group_name_internal());
-  }
+  if (!field_trial->enable_field_trial_)
+    return;
 
   global_->observer_list_->Notify(
       FROM_HERE, &FieldTrialList::Observer::OnFieldTrialGroupFinalized,
@@ -1006,334 +625,6 @@
 }
 
 // static
-bool FieldTrialList::GetParamsFromSharedMemory(
-    FieldTrial* field_trial,
-    std::map<std::string, std::string>* params) {
-  DCHECK(global_);
-  // If the field trial allocator is not set up yet, then there are several
-  // cases:
-  //   - We are in the browser process and the allocator has not been set up
-  //   yet. If we got here, then we couldn't find the params in
-  //   FieldTrialParamAssociator, so it's definitely not here. Return false.
-  //   - Using shared memory for field trials is not enabled. If we got here,
-  //   then there's nothing in shared memory. Return false.
-  //   - We are in the child process and the allocator has not been set up yet.
-  //   If this is the case, then you are calling this too early. The field trial
-  //   allocator should get set up very early in the lifecycle. Try to see if
-  //   you can call it after it's been set up.
-  AutoLock auto_lock(global_->lock_);
-  if (!global_->field_trial_allocator_)
-    return false;
-
-  // If ref_ isn't set, then the field trial data can't be in shared memory.
-  if (!field_trial->ref_)
-    return false;
-
-  const FieldTrial::FieldTrialEntry* entry =
-      global_->field_trial_allocator_->GetAsObject<FieldTrial::FieldTrialEntry>(
-          field_trial->ref_);
-
-  size_t allocated_size =
-      global_->field_trial_allocator_->GetAllocSize(field_trial->ref_);
-  size_t actual_size = sizeof(FieldTrial::FieldTrialEntry) + entry->pickle_size;
-  if (allocated_size < actual_size)
-    return false;
-
-  return entry->GetParams(params);
-}
-
-// static
-void FieldTrialList::ClearParamsFromSharedMemoryForTesting() {
-  if (!global_)
-    return;
-
-  AutoLock auto_lock(global_->lock_);
-  if (!global_->field_trial_allocator_)
-    return;
-
-  // To clear the params, we iterate through every item in the allocator, copy
-  // just the trial and group name into a newly-allocated segment and then clear
-  // the existing item.
-  FieldTrialAllocator* allocator = global_->field_trial_allocator_.get();
-  FieldTrialAllocator::Iterator mem_iter(allocator);
-
-  // List of refs to eventually be made iterable. We can't make it in the loop,
-  // since it would go on forever.
-  std::vector<FieldTrial::FieldTrialRef> new_refs;
-
-  FieldTrial::FieldTrialRef prev_ref;
-  while ((prev_ref = mem_iter.GetNextOfType<FieldTrial::FieldTrialEntry>()) !=
-         FieldTrialAllocator::kReferenceNull) {
-    // Get the existing field trial entry in shared memory.
-    const FieldTrial::FieldTrialEntry* prev_entry =
-        allocator->GetAsObject<FieldTrial::FieldTrialEntry>(prev_ref);
-    StringPiece trial_name;
-    StringPiece group_name;
-    if (!prev_entry->GetTrialAndGroupName(&trial_name, &group_name))
-      continue;
-
-    // Write a new entry, minus the params.
-    Pickle pickle;
-    pickle.WriteString(trial_name);
-    pickle.WriteString(group_name);
-    size_t total_size = sizeof(FieldTrial::FieldTrialEntry) + pickle.size();
-    FieldTrial::FieldTrialEntry* new_entry =
-        allocator->New<FieldTrial::FieldTrialEntry>(total_size);
-    subtle::NoBarrier_Store(&new_entry->activated,
-                            subtle::NoBarrier_Load(&prev_entry->activated));
-    new_entry->pickle_size = pickle.size();
-
-    // TODO(lawrencewu): Modify base::Pickle to be able to write over a section
-    // in memory, so we can avoid this memcpy.
-    char* dst = reinterpret_cast<char*>(new_entry) +
-                sizeof(FieldTrial::FieldTrialEntry);
-    memcpy(dst, pickle.data(), pickle.size());
-
-    // Update the ref on the field trial and add it to the list to be made
-    // iterable.
-    FieldTrial::FieldTrialRef new_ref = allocator->GetAsReference(new_entry);
-    FieldTrial* trial = global_->PreLockedFind(trial_name.as_string());
-    trial->ref_ = new_ref;
-    new_refs.push_back(new_ref);
-
-    // Mark the existing entry as unused.
-    allocator->ChangeType(prev_ref, 0,
-                          FieldTrial::FieldTrialEntry::kPersistentTypeId,
-                          /*clear=*/false);
-  }
-
-  for (const auto& ref : new_refs) {
-    allocator->MakeIterable(ref);
-  }
-}
-
-// static
-void FieldTrialList::DumpAllFieldTrialsToPersistentAllocator(
-    PersistentMemoryAllocator* allocator) {
-  if (!global_)
-    return;
-  AutoLock auto_lock(global_->lock_);
-  for (const auto& registered : global_->registered_) {
-    AddToAllocatorWhileLocked(allocator, registered.second);
-  }
-}
-
-// static
-std::vector<const FieldTrial::FieldTrialEntry*>
-FieldTrialList::GetAllFieldTrialsFromPersistentAllocator(
-    PersistentMemoryAllocator const& allocator) {
-  std::vector<const FieldTrial::FieldTrialEntry*> entries;
-  FieldTrialAllocator::Iterator iter(&allocator);
-  const FieldTrial::FieldTrialEntry* entry;
-  while ((entry = iter.GetNextOfObject<FieldTrial::FieldTrialEntry>()) !=
-         nullptr) {
-    entries.push_back(entry);
-  }
-  return entries;
-}
-
-#if defined(OS_WIN)
-// static
-bool FieldTrialList::CreateTrialsFromHandleSwitch(
-    const std::string& handle_switch) {
-  int field_trial_handle = std::stoi(handle_switch);
-  HANDLE handle = reinterpret_cast<HANDLE>(field_trial_handle);
-  SharedMemoryHandle shm_handle(handle, GetCurrentProcId());
-  return FieldTrialList::CreateTrialsFromSharedMemoryHandle(shm_handle);
-}
-#endif
-
-#if defined(OS_POSIX) && !defined(OS_NACL)
-// static
-bool FieldTrialList::CreateTrialsFromDescriptor(int fd_key) {
-  if (!kUseSharedMemoryForFieldTrials)
-    return false;
-
-  if (fd_key == -1)
-    return false;
-
-  int fd = GlobalDescriptors::GetInstance()->MaybeGet(fd_key);
-  if (fd == -1)
-    return false;
-
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-  SharedMemoryHandle shm_handle(FileDescriptor(fd, true));
-#else
-  SharedMemoryHandle shm_handle(fd, true);
-#endif
-
-  bool result = FieldTrialList::CreateTrialsFromSharedMemoryHandle(shm_handle);
-  DCHECK(result);
-  return true;
-}
-#endif
-
-// static
-bool FieldTrialList::CreateTrialsFromSharedMemoryHandle(
-    SharedMemoryHandle shm_handle) {
-  // shm gets deleted when it gets out of scope, but that's OK because we need
-  // it only for the duration of this method.
-  std::unique_ptr<SharedMemory> shm(new SharedMemory(shm_handle, true));
-  if (!shm.get()->Map(kFieldTrialAllocationSize))
-    OnOutOfMemory(kFieldTrialAllocationSize);
-
-  return FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
-}
-
-// static
-bool FieldTrialList::CreateTrialsFromSharedMemory(
-    std::unique_ptr<SharedMemory> shm) {
-  global_->field_trial_allocator_.reset(
-      new FieldTrialAllocator(std::move(shm), 0, kAllocatorName, true));
-  FieldTrialAllocator* shalloc = global_->field_trial_allocator_.get();
-  FieldTrialAllocator::Iterator mem_iter(shalloc);
-
-  const FieldTrial::FieldTrialEntry* entry;
-  while ((entry = mem_iter.GetNextOfObject<FieldTrial::FieldTrialEntry>()) !=
-         nullptr) {
-    StringPiece trial_name;
-    StringPiece group_name;
-    if (!entry->GetTrialAndGroupName(&trial_name, &group_name))
-      return false;
-
-    // TODO(lawrencewu): Convert the API for CreateFieldTrial to take
-    // StringPieces.
-    FieldTrial* trial =
-        CreateFieldTrial(trial_name.as_string(), group_name.as_string());
-
-    trial->ref_ = mem_iter.GetAsReference(entry);
-    if (subtle::NoBarrier_Load(&entry->activated)) {
-      // Call |group()| to mark the trial as "used" and notify observers, if
-      // any. This is useful to ensure that field trials created in child
-      // processes are properly reported in crash reports.
-      trial->group();
-    }
-  }
-  return true;
-}
-
-// static
-void FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded() {
-  if (!global_)
-    return;
-  AutoLock auto_lock(global_->lock_);
-  // Create the allocator if not already created and add all existing trials.
-  if (global_->field_trial_allocator_ != nullptr)
-    return;
-
-  SharedMemoryCreateOptions options;
-  options.size = kFieldTrialAllocationSize;
-  options.share_read_only = true;
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-  options.type = SharedMemoryHandle::POSIX;
-#endif
-
-  std::unique_ptr<SharedMemory> shm(new SharedMemory());
-  if (!shm->Create(options))
-    OnOutOfMemory(kFieldTrialAllocationSize);
-
-  if (!shm->Map(kFieldTrialAllocationSize))
-    OnOutOfMemory(kFieldTrialAllocationSize);
-
-  global_->field_trial_allocator_.reset(
-      new FieldTrialAllocator(std::move(shm), 0, kAllocatorName, false));
-  global_->field_trial_allocator_->CreateTrackingHistograms(kAllocatorName);
-
-  // Add all existing field trials.
-  for (const auto& registered : global_->registered_) {
-    AddToAllocatorWhileLocked(global_->field_trial_allocator_.get(),
-                              registered.second);
-  }
-
-  // Add all existing features.
-  FeatureList::GetInstance()->AddFeaturesToAllocator(
-      global_->field_trial_allocator_.get());
-
-#if !defined(OS_NACL)
-  // Set |readonly_allocator_handle_| so we can pass it to be inherited and
-  // via the command line.
-  global_->readonly_allocator_handle_ =
-      CreateReadOnlyHandle(global_->field_trial_allocator_.get());
-#endif
-}
-
-// static
-void FieldTrialList::AddToAllocatorWhileLocked(
-    PersistentMemoryAllocator* allocator,
-    FieldTrial* field_trial) {
-  // Don't do anything if the allocator hasn't been instantiated yet.
-  if (allocator == nullptr)
-    return;
-
-  // Or if the allocator is read only, which means we are in a child process and
-  // shouldn't be writing to it.
-  if (allocator->IsReadonly())
-    return;
-
-  FieldTrial::State trial_state;
-  if (!field_trial->GetStateWhileLocked(&trial_state))
-    return;
-
-  // Or if we've already added it. We must check after GetState since it can
-  // also add to the allocator.
-  if (field_trial->ref_)
-    return;
-
-  Pickle pickle;
-  if (!PickleFieldTrial(trial_state, &pickle)) {
-    NOTREACHED();
-    return;
-  }
-
-  size_t total_size = sizeof(FieldTrial::FieldTrialEntry) + pickle.size();
-  FieldTrial::FieldTrialRef ref = allocator->Allocate(
-      total_size, FieldTrial::FieldTrialEntry::kPersistentTypeId);
-  if (ref == FieldTrialAllocator::kReferenceNull) {
-    NOTREACHED();
-    return;
-  }
-
-  FieldTrial::FieldTrialEntry* entry =
-      allocator->GetAsObject<FieldTrial::FieldTrialEntry>(ref);
-  subtle::NoBarrier_Store(&entry->activated, trial_state.activated);
-  entry->pickle_size = pickle.size();
-
-  // TODO(lawrencewu): Modify base::Pickle to be able to write over a section in
-  // memory, so we can avoid this memcpy.
-  char* dst =
-      reinterpret_cast<char*>(entry) + sizeof(FieldTrial::FieldTrialEntry);
-  memcpy(dst, pickle.data(), pickle.size());
-
-  allocator->MakeIterable(ref);
-  field_trial->ref_ = ref;
-}
-
-// static
-void FieldTrialList::ActivateFieldTrialEntryWhileLocked(
-    FieldTrial* field_trial) {
-  FieldTrialAllocator* allocator = global_->field_trial_allocator_.get();
-
-  // Check if we're in the child process and return early if so.
-  if (allocator && allocator->IsReadonly())
-    return;
-
-  FieldTrial::FieldTrialRef ref = field_trial->ref_;
-  if (ref == FieldTrialAllocator::kReferenceNull) {
-    // It's fine to do this even if the allocator hasn't been instantiated
-    // yet -- it'll just return early.
-    AddToAllocatorWhileLocked(global_->field_trial_allocator_.get(),
-                              field_trial);
-  } else {
-    // It's also okay to do this even though the callee doesn't have a lock --
-    // the only thing that happens on a stale read here is a slight performance
-    // hit from the child re-synchronizing activation state.
-    FieldTrial::FieldTrialEntry* entry =
-        allocator->GetAsObject<FieldTrial::FieldTrialEntry>(ref);
-    subtle::NoBarrier_Store(&entry->activated, 1);
-  }
-}
-
-// static
 const FieldTrial::EntropyProvider*
     FieldTrialList::GetEntropyProviderForOneTimeRandomization() {
   if (!global_) {
diff --git a/base/metrics/field_trial.h b/base/metrics/field_trial.h
index 60a6592..28a4606 100644
--- a/base/metrics/field_trial.h
+++ b/base/metrics/field_trial.h
@@ -58,24 +58,15 @@
 #include <stdint.h>
 
 #include <map>
-#include <memory>
 #include <set>
 #include <string>
 #include <vector>
 
-#include "base/atomicops.h"
 #include "base/base_export.h"
-#include "base/command_line.h"
-#include "base/feature_list.h"
-#include "base/files/file.h"
 #include "base/gtest_prod_util.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
-#include "base/memory/shared_memory.h"
-#include "base/metrics/persistent_memory_allocator.h"
 #include "base/observer_list_threadsafe.h"
-#include "base/pickle.h"
-#include "base/process/launch.h"
 #include "base/strings/string_piece.h"
 #include "base/synchronization/lock.h"
 #include "base/time/time.h"
@@ -88,9 +79,6 @@
  public:
   typedef int Probability;  // Probability type for being selected in a trial.
 
-  // TODO(665129): Make private again after crash has been resolved.
-  typedef SharedPersistentMemoryAllocator::Reference FieldTrialRef;
-
   // Specifies the persistence of the field trial group choice.
   enum RandomizationType {
     // One time randomized trials will persist the group choice between
@@ -124,61 +112,17 @@
   };
 
   // A triplet representing a FieldTrial, its selected group and whether it's
-  // active. String members are pointers to the underlying strings owned by the
-  // FieldTrial object. Does not use StringPiece to avoid conversions back to
-  // std::string.
+  // active.
   struct BASE_EXPORT State {
-    const std::string* trial_name = nullptr;
-    const std::string* group_name = nullptr;
-    bool activated = false;
+    StringPiece trial_name;
+    StringPiece group_name;
+    bool activated;
 
     State();
     State(const State& other);
     ~State();
   };
 
-  // We create one FieldTrialEntry per field trial in shared memory, via
-  // AddToAllocatorWhileLocked. The FieldTrialEntry is followed by a
-  // base::Pickle object that we unpickle and read from.
-  struct BASE_EXPORT FieldTrialEntry {
-    // SHA1(FieldTrialEntry): Increment this if structure changes!
-    static constexpr uint32_t kPersistentTypeId = 0xABA17E13 + 2;
-
-    // Expected size for 32/64-bit check.
-    static constexpr size_t kExpectedInstanceSize = 8;
-
-    // Whether or not this field trial is activated. This is really just a
-    // boolean but using a 32 bit value for portability reasons. It should be
-    // accessed via NoBarrier_Load()/NoBarrier_Store() to prevent the compiler
-    // from doing unexpected optimizations because it thinks that only one
-    // thread is accessing the memory location.
-    subtle::Atomic32 activated;
-
-    // Size of the pickled structure, NOT the total size of this entry.
-    uint32_t pickle_size;
-
-    // Calling this is only valid when the entry is initialized. That is, it
-    // resides in shared memory and has a pickle containing the trial name and
-    // group name following it.
-    bool GetTrialAndGroupName(StringPiece* trial_name,
-                              StringPiece* group_name) const;
-
-    // Calling this is only valid when the entry is initialized as well. Reads
-    // the parameters following the trial and group name and stores them as
-    // key-value mappings in |params|.
-    bool GetParams(std::map<std::string, std::string>* params) const;
-
-   private:
-    // Returns an iterator over the data containing names and params.
-    PickleIterator GetPickleIterator() const;
-
-    // Takes the iterator and writes out the first two items into |trial_name|
-    // and |group_name|.
-    bool ReadStringPair(PickleIterator* iter,
-                        StringPiece* trial_name,
-                        StringPiece* group_name) const;
-  };
-
   typedef std::vector<ActiveGroup> ActiveGroups;
 
   // A return value to indicate that a given instance has not yet had a group
@@ -269,9 +213,6 @@
   FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SetForcedChangeDefault_NonDefault);
   FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, FloatBoundariesGiveEqualGroupSizes);
   FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, DoesNotSurpassTotalProbability);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest,
-                           DoNotAddSimulatedFieldTrialsToAllocator);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, ClearParamsFromSharedMemory);
 
   friend class base::FieldTrialList;
 
@@ -305,10 +246,6 @@
   // status.
   void FinalizeGroupChoice();
 
-  // Implements FinalizeGroupChoice() with the added flexibility of being
-  // deadlock-free if |is_locked| is true and the caller is holding a lock.
-  void FinalizeGroupChoiceImpl(bool is_locked);
-
   // Returns the trial name and selected group name for this field trial via
   // the output parameter |active_group|, but only if the group has already
   // been chosen and has been externally observed via |group()| and the trial
@@ -324,10 +261,6 @@
   // untouched.
   bool GetState(State* field_trial_state);
 
-  // Does the same thing as above, but is deadlock-free if the caller is holding
-  // a lock.
-  bool GetStateWhileLocked(State* field_trial_state);
-
   // Returns the group_name. A winner need not have been chosen.
   std::string group_name_internal() const { return group_name_; }
 
@@ -375,9 +308,6 @@
   // should notify it when its group is queried.
   bool trial_registered_;
 
-  // Reference to related field trial struct and data in shared memory.
-  FieldTrialRef ref_;
-
   // When benchmarking is enabled, field trials all revert to the 'default'
   // group.
   static bool enable_benchmarking_;
@@ -391,8 +321,6 @@
 // Only one instance of this class exists.
 class BASE_EXPORT FieldTrialList {
  public:
-  typedef SharedPersistentMemoryAllocator FieldTrialAllocator;
-
   // Year that is guaranteed to not be expired when instantiating a field trial
   // via |FactoryGetFieldTrial()|.  Set to two years from the build date.
   static int kNoExpirationYear;
@@ -410,12 +338,11 @@
 
   // This singleton holds the global list of registered FieldTrials.
   //
-  // To support one-time randomized field trials, specify a non-null
+  // To support one-time randomized field trials, specify a non-NULL
   // |entropy_provider| which should be a source of uniformly distributed
-  // entropy values. If one time randomization is not desired, pass in null for
-  // |entropy_provider|.
-  explicit FieldTrialList(
-      std::unique_ptr<const FieldTrial::EntropyProvider> entropy_provider);
+  // entropy values. Takes ownership of |entropy_provider|. If one time
+  // randomization is not desired, pass in NULL for |entropy_provider|.
+  explicit FieldTrialList(const FieldTrial::EntropyProvider* entropy_provider);
 
   // Destructor Release()'s references to all registered FieldTrial instances.
   ~FieldTrialList();
@@ -457,7 +384,7 @@
   // PermutedEntropyProvider (which is used when UMA is not enabled). If
   // |override_entropy_provider| is not null, then it will be used for
   // randomization instead of the provider given when the FieldTrialList was
-  // instantiated.
+  // instanciated.
   static FieldTrial* FactoryGetFieldTrialWithRandomizationSeed(
       const std::string& trial_name,
       FieldTrial::Probability total_probability,
@@ -522,14 +449,6 @@
       const std::string& trials_string,
       FieldTrial::ActiveGroups* active_groups);
 
-  // Returns the field trials that were active when the process was
-  // created. Either parses the field trial string or the shared memory
-  // holding field trial information.
-  // Must be called only after a call to CreateTrialsFromCommandLine().
-  static void GetInitiallyActiveFieldTrials(
-      const base::CommandLine& command_line,
-      FieldTrial::ActiveGroups* active_groups);
-
   // Use a state string (re: StatesToString()) to augment the current list of
   // field trials to include the supplied trials, and using a 100% probability
   // for each trial, force them to have the same group string. This is commonly
@@ -543,51 +462,6 @@
       const std::string& trials_string,
       const std::set<std::string>& ignored_trial_names);
 
-  // Achieves the same thing as CreateTrialsFromString, except wraps the logic
-  // by taking in the trials from the command line, either via shared memory
-  // handle or command line argument. A bit of a misnomer since on POSIX we
-  // simply get the trials from opening |fd_key| if using shared memory. On
-  // Windows, we expect the |cmd_line| switch for |field_trial_handle_switch| to
-  // contain the shared memory handle that contains the field trial allocator.
-  // We need the |field_trial_handle_switch| and |fd_key| arguments to be passed
-  // in since base/ can't depend on content/.
-  static void CreateTrialsFromCommandLine(const base::CommandLine& cmd_line,
-                                          const char* field_trial_handle_switch,
-                                          int fd_key);
-
-  // Creates base::Feature overrides from the command line by first trying to
-  // use shared memory and then falling back to the command line if it fails.
-  static void CreateFeaturesFromCommandLine(
-      const base::CommandLine& command_line,
-      const char* enable_features_switch,
-      const char* disable_features_switch,
-      FeatureList* feature_list);
-
-#if defined(OS_WIN)
-  // On Windows, we need to explicitly pass down any handles to be inherited.
-  // This function adds the shared memory handle to field trial state to the
-  // list of handles to be inherited.
-  static void AppendFieldTrialHandleIfNeeded(
-      base::HandlesToInheritVector* handles);
-#endif
-
-#if defined(OS_POSIX) && !defined(OS_NACL)
-  // On POSIX, we also need to explicitly pass down this file descriptor that
-  // should be shared with the child process. Returns kInvalidPlatformFile if no
-  // handle exists or was not initialized properly.
-  static PlatformFile GetFieldTrialHandle();
-#endif
-
-  // Adds a switch to the command line containing the field trial state as a
-  // string (if not using shared memory to share field trial state), or the
-  // shared memory handle + length.
-  // Needs the |field_trial_handle_switch| argument to be passed in since base/
-  // can't depend on content/.
-  static void CopyFieldTrialStateToFlags(const char* field_trial_handle_switch,
-                                         const char* enable_features_switch,
-                                         const char* disable_features_switch,
-                                         base::CommandLine* cmd_line);
-
   // Create a FieldTrial with the given |name| and using 100% probability for
   // the FieldTrial, force FieldTrial to have the same group string as
   // |group_name|. This is commonly used in a non-browser process, to carry
@@ -605,86 +479,13 @@
   // Remove an observer.
   static void RemoveObserver(Observer* observer);
 
-  // Grabs the lock if necessary and adds the field trial to the allocator. This
-  // should only be called from FinalizeGroupChoice().
-  static void OnGroupFinalized(bool is_locked, FieldTrial* field_trial);
-
   // Notify all observers that a group has been finalized for |field_trial|.
   static void NotifyFieldTrialGroupSelection(FieldTrial* field_trial);
 
   // Return the number of active field trials.
   static size_t GetFieldTrialCount();
 
-  // Gets the parameters for |field_trial| from shared memory and stores them in
-  // |params|. This is only exposed for use by FieldTrialParamAssociator and
-  // shouldn't be used by anything else.
-  static bool GetParamsFromSharedMemory(
-      FieldTrial* field_trial,
-      std::map<std::string, std::string>* params);
-
-  // Clears all the params in the allocator.
-  static void ClearParamsFromSharedMemoryForTesting();
-
-  // Dumps field trial state to an allocator so that it can be analyzed after a
-  // crash.
-  static void DumpAllFieldTrialsToPersistentAllocator(
-      PersistentMemoryAllocator* allocator);
-
-  // Retrieves field trial state from an allocator so that it can be analyzed
-  // after a crash. The pointers in the returned vector are into the persistent
-  // memory segment and so are only valid as long as the allocator is valid.
-  static std::vector<const FieldTrial::FieldTrialEntry*>
-  GetAllFieldTrialsFromPersistentAllocator(
-      PersistentMemoryAllocator const& allocator);
-
  private:
-  // Allow tests to access our innards for testing purposes.
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, InstantiateAllocator);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, AddTrialsToAllocator);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest,
-                           DoNotAddSimulatedFieldTrialsToAllocator);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, AssociateFieldTrialParams);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, ClearParamsFromSharedMemory);
-
-#if defined(OS_WIN)
-  // Takes in |handle_switch| from the command line which represents the shared
-  // memory handle for field trials, parses it, and creates the field trials.
-  // Returns true on success, false on failure.
-  static bool CreateTrialsFromHandleSwitch(const std::string& handle_switch);
-#endif
-
-#if defined(OS_POSIX) && !defined(OS_NACL)
-  // On POSIX systems that use the zygote, we look up the correct fd that backs
-  // the shared memory segment containing the field trials by looking it up via
-  // an fd key in GlobalDescriptors. Returns true on success, false on failure.
-  static bool CreateTrialsFromDescriptor(int fd_key);
-#endif
-
-  // Takes an unmapped SharedMemoryHandle, creates a SharedMemory object from it
-  // and maps it with the correct size.
-  static bool CreateTrialsFromSharedMemoryHandle(SharedMemoryHandle shm_handle);
-
-  // Expects a mapped piece of shared memory |shm| that was created from the
-  // browser process's field_trial_allocator and shared via the command line.
-  // This function recreates the allocator, iterates through all the field
-  // trials in it, and creates them via CreateFieldTrial(). Returns true if
-  // successful and false otherwise.
-  static bool CreateTrialsFromSharedMemory(
-      std::unique_ptr<base::SharedMemory> shm);
-
-  // Instantiate the field trial allocator, add all existing field trials to it,
-  // and duplicates its handle to a read-only handle, which gets stored in
-  // |readonly_allocator_handle|.
-  static void InstantiateFieldTrialAllocatorIfNeeded();
-
-  // Adds the field trial to the allocator. Caller must hold a lock before
-  // calling this.
-  static void AddToAllocatorWhileLocked(PersistentMemoryAllocator* allocator,
-                                        FieldTrial* field_trial);
-
-  // Activate the corresponding field trial entry struct in shared memory.
-  static void ActivateFieldTrialEntryWhileLocked(FieldTrial* field_trial);
-
   // A map from FieldTrial names to the actual instances.
   typedef std::map<std::string, FieldTrial*> RegistrationMap;
 
@@ -709,8 +510,8 @@
   // FieldTrialList is created after that.
   static bool used_without_global_;
 
-  // Lock for access to registered_ and field_trial_allocator_.
-  Lock lock_;
+  // Lock for access to registered_.
+  base::Lock lock_;
   RegistrationMap registered_;
 
   std::map<std::string, std::string> seen_states_;
@@ -722,20 +523,6 @@
   // List of observers to be notified when a group is selected for a FieldTrial.
   scoped_refptr<ObserverListThreadSafe<Observer> > observer_list_;
 
-  // Allocator in shared memory containing field trial data. Used in both
-  // browser and child processes, but readonly in the child.
-  // In the future, we may want to move this to a more generic place if we want
-  // to start passing more data other than field trials.
-  std::unique_ptr<FieldTrialAllocator> field_trial_allocator_ = nullptr;
-
-  // Readonly copy of the handle to the allocator. Needs to be a member variable
-  // because it's needed from both CopyFieldTrialStateToFlags() and
-  // AppendFieldTrialHandleIfNeeded().
-  PlatformFile readonly_allocator_handle_ = kInvalidPlatformFile;
-
-  // Tracks whether CreateTrialsFromCommandLine() has been called.
-  bool create_trials_from_command_line_called_ = false;
-
   DISALLOW_COPY_AND_ASSIGN(FieldTrialList);
 };
 
diff --git a/base/metrics/field_trial_param_associator.cc b/base/metrics/field_trial_param_associator.cc
deleted file mode 100644
index 3bac18d..0000000
--- a/base/metrics/field_trial_param_associator.cc
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/metrics/field_trial_param_associator.h"
-
-#include "base/metrics/field_trial.h"
-
-namespace base {
-
-FieldTrialParamAssociator::FieldTrialParamAssociator() {}
-FieldTrialParamAssociator::~FieldTrialParamAssociator() {}
-
-// static
-FieldTrialParamAssociator* FieldTrialParamAssociator::GetInstance() {
-  return Singleton<FieldTrialParamAssociator,
-                   LeakySingletonTraits<FieldTrialParamAssociator>>::get();
-}
-
-bool FieldTrialParamAssociator::AssociateFieldTrialParams(
-    const std::string& trial_name,
-    const std::string& group_name,
-    const FieldTrialParams& params) {
-  if (FieldTrialList::IsTrialActive(trial_name))
-    return false;
-
-  AutoLock scoped_lock(lock_);
-  const FieldTrialKey key(trial_name, group_name);
-  if (ContainsKey(field_trial_params_, key))
-    return false;
-
-  field_trial_params_[key] = params;
-  return true;
-}
-
-bool FieldTrialParamAssociator::GetFieldTrialParams(
-    const std::string& trial_name,
-    FieldTrialParams* params) {
-  FieldTrial* field_trial = FieldTrialList::Find(trial_name);
-  if (!field_trial)
-    return false;
-
-  // First try the local map, falling back to getting it from shared memory.
-  if (GetFieldTrialParamsWithoutFallback(trial_name, field_trial->group_name(),
-                                         params)) {
-    return true;
-  }
-
-  // TODO(lawrencewu): add the params to field_trial_params_ for next time.
-  return FieldTrialList::GetParamsFromSharedMemory(field_trial, params);
-}
-
-bool FieldTrialParamAssociator::GetFieldTrialParamsWithoutFallback(
-    const std::string& trial_name,
-    const std::string& group_name,
-    FieldTrialParams* params) {
-  AutoLock scoped_lock(lock_);
-
-  const FieldTrialKey key(trial_name, group_name);
-  if (!ContainsKey(field_trial_params_, key))
-    return false;
-
-  *params = field_trial_params_[key];
-  return true;
-}
-
-void FieldTrialParamAssociator::ClearAllParamsForTesting() {
-  {
-    AutoLock scoped_lock(lock_);
-    field_trial_params_.clear();
-  }
-  FieldTrialList::ClearParamsFromSharedMemoryForTesting();
-}
-
-void FieldTrialParamAssociator::ClearAllCachedParamsForTesting() {
-  AutoLock scoped_lock(lock_);
-  field_trial_params_.clear();
-}
-
-}  // namespace base
diff --git a/base/metrics/field_trial_param_associator.h b/base/metrics/field_trial_param_associator.h
deleted file mode 100644
index b19c666..0000000
--- a/base/metrics/field_trial_param_associator.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_METRICS_FIELD_TRIAL_PARAM_ASSOCIATOR_H_
-#define BASE_METRICS_FIELD_TRIAL_PARAM_ASSOCIATOR_H_
-
-#include <map>
-#include <string>
-#include <utility>
-
-#include "base/base_export.h"
-#include "base/memory/singleton.h"
-#include "base/metrics/field_trial.h"
-#include "base/synchronization/lock.h"
-
-namespace base {
-
-// Keeps track of the parameters of all field trials and ensures access to them
-// is thread-safe.
-class BASE_EXPORT FieldTrialParamAssociator {
- public:
-  FieldTrialParamAssociator();
-  ~FieldTrialParamAssociator();
-
-  // Key-value mapping type for field trial parameters.
-  typedef std::map<std::string, std::string> FieldTrialParams;
-
-  // Retrieve the singleton.
-  static FieldTrialParamAssociator* GetInstance();
-
-  // Sets parameters for the given field trial name and group.
-  bool AssociateFieldTrialParams(const std::string& trial_name,
-                                 const std::string& group_name,
-                                 const FieldTrialParams& params);
-
-  // Gets the parameters for a field trial and its chosen group. If not found in
-  // field_trial_params_, then tries to looks it up in shared memory.
-  bool GetFieldTrialParams(const std::string& trial_name,
-                           FieldTrialParams* params);
-
-  // Gets the parameters for a field trial and its chosen group. Does not
-  // fallback to looking it up in shared memory. This should only be used if you
-  // know for sure the params are in the mapping, like if you're in the browser
-  // process, and even then you should probably just use GetFieldTrialParams().
-  bool GetFieldTrialParamsWithoutFallback(const std::string& trial_name,
-                                          const std::string& group_name,
-                                          FieldTrialParams* params);
-
-  // Clears the internal field_trial_params_ mapping, plus removes all params in
-  // shared memory.
-  void ClearAllParamsForTesting();
-
-  // Clears the internal field_trial_params_ mapping.
-  void ClearAllCachedParamsForTesting();
-
- private:
-  friend struct DefaultSingletonTraits<FieldTrialParamAssociator>;
-
-  // (field_trial_name, field_trial_group)
-  typedef std::pair<std::string, std::string> FieldTrialKey;
-
-  Lock lock_;
-  std::map<FieldTrialKey, FieldTrialParams> field_trial_params_;
-
-  DISALLOW_COPY_AND_ASSIGN(FieldTrialParamAssociator);
-};
-
-}  // namespace base
-
-#endif  // BASE_METRICS_FIELD_TRIAL_PARAM_ASSOCIATOR_H_
diff --git a/base/metrics/field_trial_unittest.cc b/base/metrics/field_trial_unittest.cc
index 54672e6..00f351f 100644
--- a/base/metrics/field_trial_unittest.cc
+++ b/base/metrics/field_trial_unittest.cc
@@ -6,20 +6,13 @@
 
 #include <stddef.h>
 
-#include "base/base_switches.h"
 #include "base/build_time.h"
-#include "base/feature_list.h"
 #include "base/macros.h"
-#include "base/memory/ptr_util.h"
 #include "base/message_loop/message_loop.h"
-#include "base/metrics/field_trial_param_associator.h"
 #include "base/rand_util.h"
 #include "base/run_loop.h"
 #include "base/strings/string_number_conversions.h"
 #include "base/strings/stringprintf.h"
-#include "base/test/gtest_util.h"
-#include "base/test/mock_entropy_provider.h"
-#include "base/test/scoped_feature_list.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
@@ -342,12 +335,12 @@
   std::string winner("Winner");
   trial->AppendGroup(winner, 10);
   EXPECT_TRUE(trial->GetState(&field_trial_state));
-  EXPECT_EQ(one_winner, *field_trial_state.trial_name);
-  EXPECT_EQ(winner, *field_trial_state.group_name);
+  EXPECT_EQ(one_winner, field_trial_state.trial_name);
+  EXPECT_EQ(winner, field_trial_state.group_name);
   trial->group();
   EXPECT_TRUE(trial->GetState(&field_trial_state));
-  EXPECT_EQ(one_winner, *field_trial_state.trial_name);
-  EXPECT_EQ(winner, *field_trial_state.group_name);
+  EXPECT_EQ(one_winner, field_trial_state.trial_name);
+  EXPECT_EQ(winner, field_trial_state.group_name);
 
   std::string multi_group("MultiGroup");
   scoped_refptr<FieldTrial> multi_group_trial =
@@ -360,8 +353,8 @@
   // Finalize the group selection by accessing the selected group.
   multi_group_trial->group();
   EXPECT_TRUE(multi_group_trial->GetState(&field_trial_state));
-  EXPECT_EQ(multi_group, *field_trial_state.trial_name);
-  EXPECT_EQ(multi_group_trial->group_name(), *field_trial_state.group_name);
+  EXPECT_EQ(multi_group, field_trial_state.trial_name);
+  EXPECT_EQ(multi_group_trial->group_name(), field_trial_state.group_name);
 }
 
 TEST_F(FieldTrialTest, ActiveGroupsNotFinalized) {
@@ -1127,246 +1120,15 @@
   EXPECT_TRUE(field_trial_list.TrialExists("zzz"));
 }
 
+#if GTEST_HAS_DEATH_TEST
 TEST(FieldTrialDeathTest, OneTimeRandomizedTrialWithoutFieldTrialList) {
   // Trying to instantiate a one-time randomized field trial before the
   // FieldTrialList is created should crash.
-  EXPECT_DEATH_IF_SUPPORTED(
-      FieldTrialList::FactoryGetFieldTrial(
-          "OneTimeRandomizedTrialWithoutFieldTrialList", 100, kDefaultGroupName,
-          base::FieldTrialList::kNoExpirationYear, 1, 1,
-          base::FieldTrial::ONE_TIME_RANDOMIZED, NULL),
-      "");
-}
-
-#if defined(OS_WIN)
-TEST(FieldTrialListTest, TestCopyFieldTrialStateToFlags) {
-  base::FieldTrialList field_trial_list(
-      base::MakeUnique<base::MockEntropyProvider>());
-  base::FieldTrialList::CreateFieldTrial("Trial1", "Group1");
-  base::FilePath test_file_path = base::FilePath(FILE_PATH_LITERAL("Program"));
-  base::CommandLine cmd_line = base::CommandLine(test_file_path);
-  const char field_trial_handle[] = "test-field-trial-handle";
-  const char enable_features_switch[] = "test-enable-features";
-  const char disable_features_switch[] = "test-disable-features";
-
-  base::FieldTrialList::CopyFieldTrialStateToFlags(
-      field_trial_handle, enable_features_switch, disable_features_switch,
-      &cmd_line);
-  EXPECT_TRUE(cmd_line.HasSwitch(field_trial_handle) ||
-              cmd_line.HasSwitch(switches::kForceFieldTrials));
+  EXPECT_DEATH(FieldTrialList::FactoryGetFieldTrial(
+      "OneTimeRandomizedTrialWithoutFieldTrialList", 100, kDefaultGroupName,
+      base::FieldTrialList::kNoExpirationYear, 1, 1,
+      base::FieldTrial::ONE_TIME_RANDOMIZED, NULL), "");
 }
 #endif
 
-TEST(FieldTrialListTest, InstantiateAllocator) {
-  test::ScopedFeatureList scoped_feature_list;
-  scoped_feature_list.Init();
-
-  FieldTrialList field_trial_list(nullptr);
-  FieldTrialList::CreateFieldTrial("Trial1", "Group1");
-
-  FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
-  void* memory = field_trial_list.field_trial_allocator_->shared_memory();
-  size_t used = field_trial_list.field_trial_allocator_->used();
-
-  // Ensure that the function is idempotent.
-  FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
-  void* new_memory = field_trial_list.field_trial_allocator_->shared_memory();
-  size_t new_used = field_trial_list.field_trial_allocator_->used();
-  EXPECT_EQ(memory, new_memory);
-  EXPECT_EQ(used, new_used);
-}
-
-TEST(FieldTrialListTest, AddTrialsToAllocator) {
-  std::string save_string;
-  base::SharedMemoryHandle handle;
-
-  // Scoping the first FieldTrialList, as we need another one to test that it
-  // matches.
-  {
-    test::ScopedFeatureList scoped_feature_list;
-    scoped_feature_list.Init();
-
-    FieldTrialList field_trial_list(nullptr);
-    FieldTrialList::CreateFieldTrial("Trial1", "Group1");
-    FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
-    FieldTrialList::AllStatesToString(&save_string);
-    handle = base::SharedMemory::DuplicateHandle(
-        field_trial_list.field_trial_allocator_->shared_memory()->handle());
-  }
-
-  FieldTrialList field_trial_list2(nullptr);
-  std::unique_ptr<base::SharedMemory> shm(new SharedMemory(handle, true));
-  // 4 KiB is enough to hold the trials only created for this test.
-  shm.get()->Map(4 << 10);
-  FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
-  std::string check_string;
-  FieldTrialList::AllStatesToString(&check_string);
-  EXPECT_EQ(save_string, check_string);
-}
-
-TEST(FieldTrialListTest, DoNotAddSimulatedFieldTrialsToAllocator) {
-  constexpr char kTrialName[] = "trial";
-  base::SharedMemoryHandle handle;
-  {
-    test::ScopedFeatureList scoped_feature_list;
-    scoped_feature_list.Init();
-
-    // Create a simulated trial and a real trial and call group() on them, which
-    // should only add the real trial to the field trial allocator.
-    FieldTrialList field_trial_list(nullptr);
-    FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
-
-    // This shouldn't add to the allocator.
-    scoped_refptr<FieldTrial> simulated_trial =
-        FieldTrial::CreateSimulatedFieldTrial(kTrialName, 100, "Simulated",
-                                              0.95);
-    simulated_trial->group();
-
-    // This should add to the allocator.
-    FieldTrial* real_trial =
-        FieldTrialList::CreateFieldTrial(kTrialName, "Real");
-    real_trial->group();
-
-    handle = base::SharedMemory::DuplicateHandle(
-        field_trial_list.field_trial_allocator_->shared_memory()->handle());
-  }
-
-  // Check that there's only one entry in the allocator.
-  FieldTrialList field_trial_list2(nullptr);
-  std::unique_ptr<base::SharedMemory> shm(new SharedMemory(handle, true));
-  // 4 KiB is enough to hold the trials only created for this test.
-  shm.get()->Map(4 << 10);
-  FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
-  std::string check_string;
-  FieldTrialList::AllStatesToString(&check_string);
-  ASSERT_EQ(check_string.find("Simulated"), std::string::npos);
-}
-
-TEST(FieldTrialListTest, AssociateFieldTrialParams) {
-  test::ScopedFeatureList scoped_feature_list;
-  scoped_feature_list.Init();
-
-  std::string trial_name("Trial1");
-  std::string group_name("Group1");
-
-  // Create a field trial with some params.
-  FieldTrialList field_trial_list(nullptr);
-  FieldTrialList::CreateFieldTrial(trial_name, group_name);
-  std::map<std::string, std::string> params;
-  params["key1"] = "value1";
-  params["key2"] = "value2";
-  FieldTrialParamAssociator::GetInstance()->AssociateFieldTrialParams(
-      trial_name, group_name, params);
-  FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
-
-  // Clear all cached params from the associator.
-  FieldTrialParamAssociator::GetInstance()->ClearAllCachedParamsForTesting();
-  // Check that the params have been cleared from the cache.
-  std::map<std::string, std::string> cached_params;
-  FieldTrialParamAssociator::GetInstance()->GetFieldTrialParamsWithoutFallback(
-      trial_name, group_name, &cached_params);
-  EXPECT_EQ(0U, cached_params.size());
-
-  // Check that we fetch the param from shared memory properly.
-  std::map<std::string, std::string> new_params;
-  FieldTrialParamAssociator::GetInstance()->GetFieldTrialParams(trial_name,
-                                                                &new_params);
-  EXPECT_EQ("value1", new_params["key1"]);
-  EXPECT_EQ("value2", new_params["key2"]);
-  EXPECT_EQ(2U, new_params.size());
-}
-
-TEST(FieldTrialListTest, ClearParamsFromSharedMemory) {
-  std::string trial_name("Trial1");
-  std::string group_name("Group1");
-
-  base::SharedMemoryHandle handle;
-  {
-    test::ScopedFeatureList scoped_feature_list;
-    scoped_feature_list.Init();
-
-    // Create a field trial with some params.
-    FieldTrialList field_trial_list(nullptr);
-    FieldTrial* trial =
-        FieldTrialList::CreateFieldTrial(trial_name, group_name);
-    std::map<std::string, std::string> params;
-    params["key1"] = "value1";
-    params["key2"] = "value2";
-    FieldTrialParamAssociator::GetInstance()->AssociateFieldTrialParams(
-        trial_name, group_name, params);
-    FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
-
-    // Clear all params from the associator AND shared memory. The allocated
-    // segments should be different.
-    FieldTrial::FieldTrialRef old_ref = trial->ref_;
-    FieldTrialParamAssociator::GetInstance()->ClearAllParamsForTesting();
-    FieldTrial::FieldTrialRef new_ref = trial->ref_;
-    EXPECT_NE(old_ref, new_ref);
-
-    // Check that there are no params associated with the field trial anymore.
-    std::map<std::string, std::string> new_params;
-    FieldTrialParamAssociator::GetInstance()->GetFieldTrialParams(trial_name,
-                                                                  &new_params);
-    EXPECT_EQ(0U, new_params.size());
-
-    // Now duplicate the handle so we can easily check that the trial is still
-    // in shared memory via AllStatesToString.
-    handle = base::SharedMemory::DuplicateHandle(
-        field_trial_list.field_trial_allocator_->shared_memory()->handle());
-  }
-
-  // Check that we have the trial.
-  FieldTrialList field_trial_list2(nullptr);
-  std::unique_ptr<base::SharedMemory> shm(new SharedMemory(handle, true));
-  // 4 KiB is enough to hold the trials only created for this test.
-  shm.get()->Map(4 << 10);
-  FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
-  std::string check_string;
-  FieldTrialList::AllStatesToString(&check_string);
-  EXPECT_EQ("*Trial1/Group1/", check_string);
-}
-
-TEST(FieldTrialListTest, DumpAndFetchFromSharedMemory) {
-  std::string trial_name("Trial1");
-  std::string group_name("Group1");
-
-  // Create a field trial with some params.
-  FieldTrialList field_trial_list(nullptr);
-  FieldTrialList::CreateFieldTrial(trial_name, group_name);
-  std::map<std::string, std::string> params;
-  params["key1"] = "value1";
-  params["key2"] = "value2";
-  FieldTrialParamAssociator::GetInstance()->AssociateFieldTrialParams(
-      trial_name, group_name, params);
-
-  std::unique_ptr<base::SharedMemory> shm(new SharedMemory());
-  // 4 KiB is enough to hold the trials only created for this test.
-  shm.get()->CreateAndMapAnonymous(4 << 10);
-  // We _could_ use PersistentMemoryAllocator, this just has less params.
-  SharedPersistentMemoryAllocator allocator(std::move(shm), 1, "", false);
-
-  // Dump and subsequently retrieve the field trial to |allocator|.
-  FieldTrialList::DumpAllFieldTrialsToPersistentAllocator(&allocator);
-  std::vector<const FieldTrial::FieldTrialEntry*> entries =
-      FieldTrialList::GetAllFieldTrialsFromPersistentAllocator(allocator);
-
-  // Check that we have the entry we put in.
-  EXPECT_EQ(1u, entries.size());
-  const FieldTrial::FieldTrialEntry* entry = entries[0];
-
-  // Check that the trial and group names match.
-  StringPiece shm_trial_name;
-  StringPiece shm_group_name;
-  entry->GetTrialAndGroupName(&shm_trial_name, &shm_group_name);
-  EXPECT_EQ(trial_name, shm_trial_name);
-  EXPECT_EQ(group_name, shm_group_name);
-
-  // Check that the params match.
-  std::map<std::string, std::string> shm_params;
-  entry->GetParams(&shm_params);
-  EXPECT_EQ(2u, shm_params.size());
-  EXPECT_EQ("value1", shm_params["key1"]);
-  EXPECT_EQ("value2", shm_params["key2"]);
-}
-
 }  // namespace base
diff --git a/base/metrics/histogram.cc b/base/metrics/histogram.cc
index de2ac33..0d6287c 100644
--- a/base/metrics/histogram.cc
+++ b/base/metrics/histogram.cc
@@ -217,7 +217,7 @@
     ReportHistogramActivity(*histogram, HISTOGRAM_LOOKUP);
   }
 
-  CHECK_EQ(histogram_type_, histogram->GetHistogramType()) << name_;
+  DCHECK_EQ(histogram_type_, histogram->GetHistogramType()) << name_;
   if (bucket_count_ != 0 &&
       !histogram->HasConstructionArguments(minimum_, maximum_, bucket_count_)) {
     // The construction arguments do not match the existing histogram.  This can
@@ -533,8 +533,7 @@
 Histogram::~Histogram() {
 }
 
-bool Histogram::PrintEmptyBucket(uint32_t index) const {
-  ALLOW_UNUSED_PARAM(index);
+bool Histogram::PrintEmptyBucket(uint32_t /*index*/) const {
   return true;
 }
 
@@ -675,14 +674,15 @@
                 "Histogram: %s recorded %d samples",
                 histogram_name().c_str(),
                 sample_count);
-  if (sample_count == 0) {
+  if (0 == sample_count) {
     DCHECK_EQ(samples.sum(), 0);
   } else {
-    double mean = static_cast<float>(samples.sum()) / sample_count;
-    StringAppendF(output, ", mean = %.1f", mean);
+    double average = static_cast<float>(samples.sum()) / sample_count;
+
+    StringAppendF(output, ", average = %.1f", average);
   }
-  if (flags())
-    StringAppendF(output, " (flags = 0x%x)", flags());
+  if (flags() & ~kHexRangePrintingFlag)
+    StringAppendF(output, " (flags = 0x%x)", flags() & ~kHexRangePrintingFlag);
 }
 
 void Histogram::WriteAsciiBucketContext(const int64_t past,
@@ -754,7 +754,8 @@
 
   std::unique_ptr<HistogramBase> HeapAlloc(
       const BucketRanges* ranges) override {
-    return WrapUnique(new LinearHistogram(name_, minimum_, maximum_, ranges));
+    return WrapUnique(
+        new LinearHistogram(name_, minimum_, maximum_, ranges));
   }
 
   void FillHistogram(HistogramBase* base_histogram) override {
@@ -1138,11 +1139,8 @@
   return true;
 }
 
-double CustomHistogram::GetBucketSize(Count current, uint32_t i) const {
-  ALLOW_UNUSED_PARAM(i);
-  // If this is a histogram of enum values, normalizing the bucket count
-  // by the bucket range is not helpful, so just return the bucket count.
-  return current;
+double CustomHistogram::GetBucketSize(Count /*current*/, uint32_t /*i*/) const {
+  return 1;
 }
 
 // static
diff --git a/base/metrics/histogram.h b/base/metrics/histogram.h
index a76dd63..2283a4d 100644
--- a/base/metrics/histogram.h
+++ b/base/metrics/histogram.h
@@ -9,11 +9,13 @@
 // It supports calls to accumulate either time intervals (which are processed
 // as integral number of milliseconds), or arbitrary integral units.
 
-// For Histogram (exponential histogram), LinearHistogram and CustomHistogram,
+// For Histogram(exponential histogram), LinearHistogram and CustomHistogram,
 // the minimum for a declared range is 1 (instead of 0), while the maximum is
-// (HistogramBase::kSampleType_MAX - 1). However, there will always be underflow
-// and overflow buckets added automatically, so a 0 bucket will always exist
-// even when a minimum value of 1 is specified.
+// (HistogramBase::kSampleType_MAX - 1). Currently you can declare histograms
+// with ranges exceeding those limits (e.g. 0 as minimal or
+// HistogramBase::kSampleType_MAX as maximal), but those excesses will be
+// silently clamped to those limits (for backwards compatibility with existing
+// code). Best practice is to not exceed the limits.
 
 // Each use of a histogram with the same name will reference the same underlying
 // data, so it is safe to record to the same histogram from multiple locations
@@ -39,7 +41,7 @@
 // are also counted by the constructor in the user supplied "bucket_count"
 // argument.
 // The above example has an exponential ratio of 2 (doubling the bucket width
-// in each consecutive bucket).  The Histogram class automatically calculates
+// in each consecutive bucket.  The Histogram class automatically calculates
 // the smallest ratio that it can use to construct the number of buckets
 // selected in the constructor.  An another example, if you had 50 buckets,
 // and millisecond time values from 1 to 10000, then the ratio between
@@ -79,6 +81,8 @@
 #include "base/macros.h"
 #include "base/metrics/bucket_ranges.h"
 #include "base/metrics/histogram_base.h"
+// TODO(asvitkine): Migrate callers to to include this directly and remove this.
+#include "base/metrics/histogram_macros.h"
 #include "base/metrics/histogram_samples.h"
 #include "base/time/time.h"
 
@@ -88,6 +92,7 @@
 class CustomHistogram;
 class Histogram;
 class LinearHistogram;
+class PersistentMemoryAllocator;
 class Pickle;
 class PickleIterator;
 class SampleVector;
diff --git a/base/metrics/histogram_base.cc b/base/metrics/histogram_base.cc
index 396f297..8c4f1ec 100644
--- a/base/metrics/histogram_base.cc
+++ b/base/metrics/histogram_base.cc
@@ -97,7 +97,8 @@
   return SerializeInfoImpl(pickle);
 }
 
-uint32_t HistogramBase::FindCorruption(const HistogramSamples& /* samples */) const {
+uint32_t HistogramBase::FindCorruption(
+    const HistogramSamples& /*samples*/) const {
   // Not supported by default.
   return NO_INCONSISTENCIES;
 }
@@ -118,16 +119,14 @@
   root.SetInteger("flags", flags());
   root.Set("params", std::move(parameters));
   root.Set("buckets", std::move(buckets));
-  root.SetInteger("pid", GetUniqueIdForProcess());
+  root.SetInteger("pid", GetCurrentProcId());
   serializer.Serialize(root);
 }
 
 // static
 void HistogramBase::EnableActivityReportHistogram(
     const std::string& process_type) {
-  if (report_histogram_)
-    return;
-
+  DCHECK(!report_histogram_);
   size_t existing = StatisticsRecorder::GetHistogramCount();
   if (existing != 0) {
     DVLOG(1) << existing
@@ -175,7 +174,12 @@
 
 const std::string HistogramBase::GetSimpleAsciiBucketRange(
     Sample sample) const {
-  return StringPrintf("%d", sample);
+  std::string result;
+  if (kHexRangePrintingFlag & flags())
+    StringAppendF(&result, "%#x", sample);
+  else
+    StringAppendF(&result, "%d", sample);
+  return result;
 }
 
 void HistogramBase::WriteAsciiBucketValue(Count current,
diff --git a/base/metrics/histogram_base.h b/base/metrics/histogram_base.h
index 4f5ba04..d240099 100644
--- a/base/metrics/histogram_base.h
+++ b/base/metrics/histogram_base.h
@@ -21,6 +21,7 @@
 
 namespace base {
 
+class BucketRanges;
 class DictionaryValue;
 class HistogramBase;
 class HistogramSamples;
@@ -91,7 +92,7 @@
   static const Sample kSampleType_MAX;  // INT_MAX
 
   enum Flags {
-    kNoFlags = 0x0,
+    kNoFlags = 0,
 
     // Histogram should be UMA uploaded.
     kUmaTargetedHistogramFlag = 0x1,
@@ -120,6 +121,9 @@
     // MemoryAllocator, and that loaded into the Histogram module before this
     // histogram is created.
     kIsPersistent = 0x40,
+
+    // Only for Histogram and its sub classes: fancy bucket-naming support.
+    kHexRangePrintingFlag = 0x8000,
   };
 
   // Histogram data inconsistency types.
diff --git a/base/metrics/histogram_macros.h b/base/metrics/histogram_macros.h
index 7847376..ce1811a 100644
--- a/base/metrics/histogram_macros.h
+++ b/base/metrics/histogram_macros.h
@@ -5,311 +5,294 @@
 #ifndef BASE_METRICS_HISTOGRAM_MACROS_H_
 #define BASE_METRICS_HISTOGRAM_MACROS_H_
 
+#include "base/atomicops.h"
+#include "base/logging.h"
 #include "base/metrics/histogram.h"
-#include "base/metrics/histogram_macros_internal.h"
-#include "base/metrics/histogram_macros_local.h"
 #include "base/time/time.h"
 
-
-// Macros for efficient use of histograms.
+// Macros for efficient use of histograms. See documentation in histogram.h.
 //
-// For best practices on deciding when to emit to a histogram and what form
-// the histogram should take, see
-// https://chromium.googlesource.com/chromium/src.git/+/HEAD/tools/metrics/histograms/README.md
-
-// TODO(rkaplow): Link to proper documentation on metric creation once we have
-// it in a good state.
-
-// All of these macros must be called with |name| as a runtime constant - it
-// doesn't have to literally be a constant, but it must be the same string on
-// all calls from a particular call site. If this rule is violated, it is
-// possible the data will be written to the wrong histogram.
+// UMA_HISTOGRAM_SPARSE_SLOWLY is defined in sparse_histogram.h as it has
+// different #include dependencies.
 
 //------------------------------------------------------------------------------
-// Enumeration histograms.
+// Histograms are often put in areas where they are called many many times, and
+// performance is critical.  As a result, they are designed to have a very low
+// recurring cost of executing (adding additional samples).  Toward that end,
+// the macros declare a static pointer to the histogram in question, and only
+// take a "slow path" to construct (or find) the histogram on the first run
+// through the macro.  We leak the histograms at shutdown time so that we don't
+// have to validate using the pointers at any time during the running of the
+// process.
 
-// These macros create histograms for enumerated data. Ideally, the data should
-// be of the form of "event occurs, log the result". We recommended not putting
-// related but not directly connected data as enums within the same histogram.
-// You should be defining an associated Enum, and the input sample should be
-// an element of the Enum.
-// All of these macros must be called with |name| as a runtime constant.
+// The following code is generally what a thread-safe static pointer
+// initialization looks like for a histogram (after a macro is expanded).  This
+// sample is an expansion (with comments) of the code for
+// LOCAL_HISTOGRAM_CUSTOM_COUNTS().
 
-// Sample usage:
-//   UMA_HISTOGRAM_ENUMERATION("My.Enumeration", VALUE, EVENT_MAX_VALUE);
-// New Enum values can be added, but existing enums must never be renumbered or
-// delete and reused. The value in |sample| must be strictly less than
-// |enum_max|.
+/*
+  do {
+    // The pointer's presence indicates the initialization is complete.
+    // Initialization is idempotent, so it can safely be atomically repeated.
+    static base::subtle::AtomicWord atomic_histogram_pointer = 0;
 
-#define UMA_HISTOGRAM_ENUMERATION(name, sample, enum_max)                      \
-    INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG(                                  \
-        name, sample, enum_max,                                                \
-        base::HistogramBase::kUmaTargetedHistogramFlag)
+    // Acquire_Load() ensures that we acquire visibility to the pointed-to data
+    // in the histogram.
+    base::Histogram* histogram_pointer(reinterpret_cast<base::Histogram*>(
+        base::subtle::Acquire_Load(&atomic_histogram_pointer)));
 
-// Histogram for boolean values.
+    if (!histogram_pointer) {
+      // This is the slow path, which will construct OR find the matching
+      // histogram.  FactoryGet includes locks on a global histogram name map
+      // and is completely thread safe.
+      histogram_pointer = base::Histogram::FactoryGet(
+          name, min, max, bucket_count, base::HistogramBase::kNoFlags);
 
-// Sample usage:
-//   UMA_HISTOGRAM_BOOLEAN("Histogram.Boolean", bool);
-#define UMA_HISTOGRAM_BOOLEAN(name, sample)                                    \
-    STATIC_HISTOGRAM_POINTER_BLOCK(name, AddBoolean(sample),                   \
-        base::BooleanHistogram::FactoryGet(name,                               \
-            base::HistogramBase::kUmaTargetedHistogramFlag))
+      // Use Release_Store to ensure that the histogram data is made available
+      // globally before we make the pointer visible.
+      // Several threads may perform this store, but the same value will be
+      // stored in all cases (for a given named/spec'ed histogram).
+      // We could do this without any barrier, since FactoryGet entered and
+      // exited a lock after construction, but this barrier makes things clear.
+      base::subtle::Release_Store(&atomic_histogram_pointer,
+          reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer));
+    }
 
-//------------------------------------------------------------------------------
-// Linear histograms.
+    // Ensure calling contract is upheld, and the name does NOT vary.
+    DCHECK(histogram_pointer->histogram_name() == constant_histogram_name);
 
-// All of these macros must be called with |name| as a runtime constant.
+    histogram_pointer->Add(sample);
+  } while (0);
+*/
 
-// Used for capturing integer data with a linear bucketing scheme. This can be
-// used when you want the exact value of some small numeric count, with a max of
-// 100 or less. If you need to capture a range of greater than 100, we recommend
-// the use of the COUNT histograms below.
+// The above pattern is repeated in several macros.  The only elements that
+// vary are the invocation of the Add(sample) vs AddTime(sample), and the choice
+// of which FactoryGet method to use.  The different FactoryGet methods have
+// various argument lists, so the function with its argument list is provided as
+// a macro argument here.  The name is only used in a DCHECK, to assure that
+// callers don't try to vary the name of the histogram (which would tend to be
+// ignored by the one-time initialization of the histogtram_pointer).
 
-// Sample usage:
-//   UMA_HISTOGRAM_EXACT_LINEAR("Histogram.Linear", count, 10);
-#define UMA_HISTOGRAM_EXACT_LINEAR(name, sample, value_max) \
-  UMA_HISTOGRAM_ENUMERATION(name, sample, value_max)
+// In some cases (integration into 3rd party code), it's useful to seperate the
+// definition of |atomic_histogram_poiner| from its use. To achieve this we
+// define HISTOGRAM_POINTER_USE, which uses an |atomic_histogram_pointer|, and
+// STATIC_HISTOGRAM_POINTER_BLOCK, which defines an |atomic_histogram_pointer|
+// and forwards to HISTOGRAM_POINTER_USE.
+#define HISTOGRAM_POINTER_USE(atomic_histogram_pointer,                   \
+                              constant_histogram_name,                    \
+                              histogram_add_method_invocation,            \
+                              histogram_factory_get_invocation)           \
+  do {                                                                    \
+    base::HistogramBase* histogram_pointer(                               \
+        reinterpret_cast<base::HistogramBase*>(                           \
+            base::subtle::Acquire_Load(atomic_histogram_pointer)));       \
+    if (!histogram_pointer) {                                             \
+      histogram_pointer = histogram_factory_get_invocation;               \
+      base::subtle::Release_Store(                                        \
+          atomic_histogram_pointer,                                       \
+          reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer)); \
+    }                                                                     \
+    if (DCHECK_IS_ON())                                                   \
+      histogram_pointer->CheckName(constant_histogram_name);              \
+    histogram_pointer->histogram_add_method_invocation;                   \
+  } while (0)
 
-// Used for capturing basic percentages. This will be 100 buckets of size 1.
-
-// Sample usage:
-//   UMA_HISTOGRAM_PERCENTAGE("Histogram.Percent", percent_as_int);
-#define UMA_HISTOGRAM_PERCENTAGE(name, percent_as_int)                         \
-    UMA_HISTOGRAM_ENUMERATION(name, percent_as_int, 101)
-
-//------------------------------------------------------------------------------
-// Count histograms. These are used for collecting numeric data. Note that we
-// have macros for more specialized use cases below (memory, time, percentages).
-
-// The number suffixes here refer to the max size of the sample, i.e. COUNT_1000
-// will be able to collect samples of counts up to 1000. The default number of
-// buckets in all default macros is 50. We recommend erring on the side of too
-// large a range versus too short a range.
-// These macros default to exponential histograms - i.e. the lengths of the
-// bucket ranges exponentially increase as the sample range increases.
-// These should *not* be used if you are interested in exact counts, i.e. a
-// bucket range of 1. In these cases, you should use the ENUMERATION macros
-// defined later. These should also not be used to capture the number of some
-// event, i.e. "button X was clicked N times". In this cases, an enum should be
-// used, ideally with an appropriate baseline enum entry included.
-// All of these macros must be called with |name| as a runtime constant.
-
-// Sample usage:
-//   UMA_HISTOGRAM_COUNTS_1M("My.Histogram", sample);
-
-#define UMA_HISTOGRAM_COUNTS_100(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS(    \
-    name, sample, 1, 100, 50)
-
-#define UMA_HISTOGRAM_COUNTS_1000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS(   \
-    name, sample, 1, 1000, 50)
-
-#define UMA_HISTOGRAM_COUNTS_10000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS(  \
-    name, sample, 1, 10000, 50)
-
-#define UMA_HISTOGRAM_COUNTS_100000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
-    name, sample, 1, 100000, 50)
-
-#define UMA_HISTOGRAM_COUNTS_1M(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS(     \
-    name, sample, 1, 1000000, 50)
-
-#define UMA_HISTOGRAM_COUNTS_10M(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS(    \
-    name, sample, 1, 10000000, 50)
-
-// This can be used when the default ranges are not sufficient. This macro lets
-// the metric developer customize the min and max of the sampled range, as well
-// as the number of buckets recorded.
-// Any data outside the range here will be put in underflow and overflow
-// buckets. Min values should be >=1 as emitted 0s will still go into the
-// underflow bucket.
-
-// Sample usage:
-//   UMA_HISTOGRAM_CUSTOM_COUNTS("My.Histogram", 1, 100000000, 100);
-#define UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count)      \
-    INTERNAL_HISTOGRAM_CUSTOM_COUNTS_WITH_FLAG(                                \
-        name, sample, min, max, bucket_count,                                  \
-        base::HistogramBase::kUmaTargetedHistogramFlag)
-
-//------------------------------------------------------------------------------
-// Timing histograms. These are used for collecting timing data (generally
-// latencies).
-
-// These macros create exponentially sized histograms (lengths of the bucket
-// ranges exponentially increase as the sample range increases). The input
-// sample is a base::TimeDelta. The output data is measured in ms granularity.
-// All of these macros must be called with |name| as a runtime constant.
-
-// Sample usage:
-//   UMA_HISTOGRAM_TIMES("My.Timing.Histogram", time_delta);
-
-// Short timings - up to 10 seconds.
-#define UMA_HISTOGRAM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES(          \
-    name, sample, base::TimeDelta::FromMilliseconds(1),                        \
-    base::TimeDelta::FromSeconds(10), 50)
-
-// Medium timings - up to 3 minutes. Note this starts at 10ms (no good reason,
-// but not worth changing).
-#define UMA_HISTOGRAM_MEDIUM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES(   \
-    name, sample, base::TimeDelta::FromMilliseconds(10),                       \
-    base::TimeDelta::FromMinutes(3), 50)
-
-// Long timings - up to an hour.
-#define UMA_HISTOGRAM_LONG_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES(     \
-    name, sample, base::TimeDelta::FromMilliseconds(1),                        \
-    base::TimeDelta::FromHours(1), 50)
-
-// Long timings with higher granularity - up to an hour with 100 buckets.
-#define UMA_HISTOGRAM_LONG_TIMES_100(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
-    name, sample, base::TimeDelta::FromMilliseconds(1),                        \
-    base::TimeDelta::FromHours(1), 100)
-
-// This can be used when the default ranges are not sufficient. This macro lets
-// the metric developer customize the min and max of the sampled range, as well
-// as the number of buckets recorded.
-
-// Sample usage:
-//   UMA_HISTOGRAM_CUSTOM_TIMES("Very.Long.Timing.Histogram", duration_in_ms,
-//       base::TimeDelta::FromSeconds(1), base::TimeDelta::FromDays(1), 100);
-#define UMA_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count)       \
-    STATIC_HISTOGRAM_POINTER_BLOCK(name, AddTime(sample),                      \
-        base::Histogram::FactoryTimeGet(name, min, max, bucket_count,          \
-            base::HistogramBase::kUmaTargetedHistogramFlag))
-
-// Scoped class which logs its time on this earth as a UMA statistic. This is
-// recommended for when you want a histogram which measures the time it takes
-// for a method to execute. This measures up to 10 seconds. It uses
-// UMA_HISTOGRAM_TIMES under the hood.
-
-// Sample usage:
-//   void Function() {
-//     SCOPED_UMA_HISTOGRAM_TIMER("Component.FunctionTime");
-//     ...
-//   }
-#define SCOPED_UMA_HISTOGRAM_TIMER(name)                                       \
-  INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, false, __COUNTER__)
-
-// Similar scoped histogram timer, but this uses UMA_HISTOGRAM_LONG_TIMES_100,
-// which measures up to an hour, and uses 100 buckets. This is more expensive
-// to store, so only use if this often takes >10 seconds.
-#define SCOPED_UMA_HISTOGRAM_LONG_TIMER(name)                                  \
-  INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, true, __COUNTER__)
-
-
-//------------------------------------------------------------------------------
-// Memory histograms.
-
-// These macros create exponentially sized histograms (lengths of the bucket
-// ranges exponentially increase as the sample range increases). The input
-// sample must be a number measured in kilobytes.
-// All of these macros must be called with |name| as a runtime constant.
-
-// Sample usage:
-//   UMA_HISTOGRAM_MEMORY_KB("My.Memory.Histogram", memory_in_kb);
-
-// Used to measure common KB-granularity memory stats. Range is up to 500000KB -
-// approximately 500M.
-#define UMA_HISTOGRAM_MEMORY_KB(name, sample)                                  \
-    UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1000, 500000, 50)
-
-// Used to measure common MB-granularity memory stats. Range is up to ~64G.
-#define UMA_HISTOGRAM_MEMORY_LARGE_MB(name, sample)                            \
-    UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 64000, 100)
-
-
-//------------------------------------------------------------------------------
-// Stability-specific histograms.
-
-// Histograms logged in as stability histograms will be included in the initial
-// stability log. See comments by declaration of
-// MetricsService::PrepareInitialStabilityLog().
-// All of these macros must be called with |name| as a runtime constant.
-
-// For details on usage, see the documentation on the non-stability equivalents.
-
-#define UMA_STABILITY_HISTOGRAM_COUNTS_100(name, sample)                       \
-    UMA_STABILITY_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 100, 50)
-
-#define UMA_STABILITY_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max,          \
-                                              bucket_count)                    \
-    INTERNAL_HISTOGRAM_CUSTOM_COUNTS_WITH_FLAG(                                \
-        name, sample, min, max, bucket_count,                                  \
-        base::HistogramBase::kUmaStabilityHistogramFlag)
-
-#define UMA_STABILITY_HISTOGRAM_ENUMERATION(name, sample, enum_max)            \
-    INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG(                                  \
-        name, sample, enum_max,                                                \
-        base::HistogramBase::kUmaStabilityHistogramFlag)
-
-//------------------------------------------------------------------------------
-// Sparse histograms.
-
-// Sparse histograms are well suited for recording counts of exact sample values
-// that are sparsely distributed over a large range.
-//
-// UMA_HISTOGRAM_SPARSE_SLOWLY is good for sparsely distributed and/or
-// infrequently recorded values since the implementation is slower
-// and takes more memory.
-//
-// For instance, Sqlite.Version.* are sparse because for any given database,
-// there's going to be exactly one version logged.
-// The |sample| can be a negative or non-negative number.
-#define UMA_HISTOGRAM_SPARSE_SLOWLY(name, sample)                              \
-    INTERNAL_HISTOGRAM_SPARSE_SLOWLY(name, sample)
-
-//------------------------------------------------------------------------------
-// Histogram instantiation helpers.
-
-// Support a collection of histograms, perhaps one for each entry in an
-// enumeration. This macro manages a block of pointers, adding to a specific
-// one by its index.
-//
-// A typical instantiation looks something like this:
-//  STATIC_HISTOGRAM_POINTER_GROUP(
-//      GetHistogramNameForIndex(histogram_index),
-//      histogram_index, MAXIMUM_HISTOGRAM_INDEX, Add(some_delta),
-//      base::Histogram::FactoryGet(
-//          GetHistogramNameForIndex(histogram_index),
-//          MINIMUM_SAMPLE, MAXIMUM_SAMPLE, BUCKET_COUNT,
-//          base::HistogramBase::kUmaTargetedHistogramFlag));
-//
-// Though it seems inefficient to generate the name twice, the first
-// instance will be used only for DCHECK builds and the second will
-// execute only during the first access to the given index, after which
-// the pointer is cached and the name never needed again.
-#define STATIC_HISTOGRAM_POINTER_GROUP(constant_histogram_name, index,        \
-                                       constant_maximum,                      \
+// Defines the static |atomic_histogram_pointer| and forwards to
+// HISTOGRAM_POINTER_USE.
+#define STATIC_HISTOGRAM_POINTER_BLOCK(constant_histogram_name,               \
                                        histogram_add_method_invocation,       \
                                        histogram_factory_get_invocation)      \
   do {                                                                        \
-    static base::subtle::AtomicWord atomic_histograms[constant_maximum];      \
-    DCHECK_LE(0, index);                                                      \
-    DCHECK_LT(index, constant_maximum);                                       \
-    HISTOGRAM_POINTER_USE(&atomic_histograms[index], constant_histogram_name, \
+    static base::subtle::AtomicWord atomic_histogram_pointer = 0;             \
+    HISTOGRAM_POINTER_USE(&atomic_histogram_pointer, constant_histogram_name, \
                           histogram_add_method_invocation,                    \
                           histogram_factory_get_invocation);                  \
   } while (0)
 
 //------------------------------------------------------------------------------
-// Deprecated histogram macros. Not recommended for current use.
+// Provide easy general purpose histogram in a macro, just like stats counters.
+// Most of these macros use 50 buckets, but check the definition for details.
+//
+// All of these macros must be called with |name| as a runtime constant --- it
+// doesn't have to literally be a constant, but it must be the same string on
+// all calls from a particular call site. If this rule is violated,
+// STATIC_HISTOGRAM_POINTER_BLOCK will DCHECK, and if DCHECKS are disabled, the
+// data will be written to the wrong histogram.
 
-// Legacy name for UMA_HISTOGRAM_COUNTS_1M. Suggest using explicit naming
-// and not using this macro going forward.
-#define UMA_HISTOGRAM_COUNTS(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS(        \
+#define LOCAL_HISTOGRAM_TIMES(name, sample) LOCAL_HISTOGRAM_CUSTOM_TIMES( \
+    name, sample, base::TimeDelta::FromMilliseconds(1), \
+    base::TimeDelta::FromSeconds(10), 50)
+
+// For folks that need real specific times, use this to select a precise range
+// of times you want plotted, and the number of buckets you want used.
+#define LOCAL_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
+    STATIC_HISTOGRAM_POINTER_BLOCK(name, AddTime(sample), \
+        base::Histogram::FactoryTimeGet(name, min, max, bucket_count, \
+                                        base::HistogramBase::kNoFlags))
+
+#define LOCAL_HISTOGRAM_COUNTS(name, sample) LOCAL_HISTOGRAM_CUSTOM_COUNTS( \
     name, sample, 1, 1000000, 50)
 
-// MB-granularity memory metric. This has a short max (1G).
-#define UMA_HISTOGRAM_MEMORY_MB(name, sample)                                  \
-    UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 1000, 50)
+#define LOCAL_HISTOGRAM_COUNTS_100(name, sample) \
+    LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 100, 50)
 
-// For an enum with customized range. In general, sparse histograms should be
-// used instead.
-// Samples should be one of the std::vector<int> list provided via
-// |custom_ranges|. See comments above CustomRanges::FactoryGet about the
-// requirement of |custom_ranges|. You can use the helper function
-// CustomHistogram::ArrayToCustomRanges to transform a C-style array of valid
-// sample values to a std::vector<int>.
-#define UMA_HISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges)          \
-    STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample),                          \
-        base::CustomHistogram::FactoryGet(name, custom_ranges,                 \
+#define LOCAL_HISTOGRAM_COUNTS_10000(name, sample) \
+    LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 10000, 50)
+
+#define LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) \
+    STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
+        base::Histogram::FactoryGet(name, min, max, bucket_count, \
+                                    base::HistogramBase::kNoFlags))
+
+// This is a helper macro used by other macros and shouldn't be used directly.
+#define HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, boundary, flag) \
+    STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
+        base::LinearHistogram::FactoryGet(name, 1, boundary, boundary + 1, \
+            flag))
+
+#define LOCAL_HISTOGRAM_PERCENTAGE(name, under_one_hundred) \
+    LOCAL_HISTOGRAM_ENUMERATION(name, under_one_hundred, 101)
+
+#define LOCAL_HISTOGRAM_BOOLEAN(name, sample) \
+    STATIC_HISTOGRAM_POINTER_BLOCK(name, AddBoolean(sample), \
+        base::BooleanHistogram::FactoryGet(name, base::Histogram::kNoFlags))
+
+// Support histograming of an enumerated value.  The samples should always be
+// strictly less than |boundary_value| -- this prevents you from running into
+// problems down the line if you add additional buckets to the histogram.  Note
+// also that, despite explicitly setting the minimum bucket value to |1| below,
+// it is fine for enumerated histograms to be 0-indexed -- this is because
+// enumerated histograms should never have underflow.
+#define LOCAL_HISTOGRAM_ENUMERATION(name, sample, boundary_value) \
+    STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
+        base::LinearHistogram::FactoryGet(name, 1, boundary_value, \
+            boundary_value + 1, base::HistogramBase::kNoFlags))
+
+// Support histograming of an enumerated value. Samples should be one of the
+// std::vector<int> list provided via |custom_ranges|. See comments above
+// CustomRanges::FactoryGet about the requirement of |custom_ranges|.
+// You can use the helper function CustomHistogram::ArrayToCustomRanges to
+// transform a C-style array of valid sample values to a std::vector<int>.
+#define LOCAL_HISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges) \
+    STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
+        base::CustomHistogram::FactoryGet(name, custom_ranges, \
+                                          base::HistogramBase::kNoFlags))
+
+#define LOCAL_HISTOGRAM_MEMORY_KB(name, sample) LOCAL_HISTOGRAM_CUSTOM_COUNTS( \
+    name, sample, 1000, 500000, 50)
+
+//------------------------------------------------------------------------------
+// The following macros provide typical usage scenarios for callers that wish
+// to record histogram data, and have the data submitted/uploaded via UMA.
+// Not all systems support such UMA, but if they do, the following macros
+// should work with the service.
+
+#define UMA_HISTOGRAM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
+    name, sample, base::TimeDelta::FromMilliseconds(1), \
+    base::TimeDelta::FromSeconds(10), 50)
+
+#define UMA_HISTOGRAM_MEDIUM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
+    name, sample, base::TimeDelta::FromMilliseconds(10), \
+    base::TimeDelta::FromMinutes(3), 50)
+
+// Use this macro when times can routinely be much longer than 10 seconds.
+#define UMA_HISTOGRAM_LONG_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
+    name, sample, base::TimeDelta::FromMilliseconds(1), \
+    base::TimeDelta::FromHours(1), 50)
+
+// Use this macro when times can routinely be much longer than 10 seconds and
+// you want 100 buckets.
+#define UMA_HISTOGRAM_LONG_TIMES_100(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
+    name, sample, base::TimeDelta::FromMilliseconds(1), \
+    base::TimeDelta::FromHours(1), 100)
+
+#define UMA_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
+    STATIC_HISTOGRAM_POINTER_BLOCK(name, AddTime(sample), \
+        base::Histogram::FactoryTimeGet(name, min, max, bucket_count, \
             base::HistogramBase::kUmaTargetedHistogramFlag))
 
+#define UMA_HISTOGRAM_COUNTS(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+    name, sample, 1, 1000000, 50)
+
+#define UMA_HISTOGRAM_COUNTS_100(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+    name, sample, 1, 100, 50)
+
+#define UMA_HISTOGRAM_COUNTS_1000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+    name, sample, 1, 1000, 50)
+
+#define UMA_HISTOGRAM_COUNTS_10000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+    name, sample, 1, 10000, 50)
+
+#define UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) \
+    STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
+        base::Histogram::FactoryGet(name, min, max, bucket_count, \
+            base::HistogramBase::kUmaTargetedHistogramFlag))
+
+#define UMA_HISTOGRAM_MEMORY_KB(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+    name, sample, 1000, 500000, 50)
+
+#define UMA_HISTOGRAM_MEMORY_MB(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+    name, sample, 1, 1000, 50)
+
+#define UMA_HISTOGRAM_MEMORY_LARGE_MB(name, sample) \
+    UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 64000, 100)
+
+#define UMA_HISTOGRAM_PERCENTAGE(name, under_one_hundred) \
+    UMA_HISTOGRAM_ENUMERATION(name, under_one_hundred, 101)
+
+#define UMA_HISTOGRAM_BOOLEAN(name, sample) \
+    STATIC_HISTOGRAM_POINTER_BLOCK(name, AddBoolean(sample), \
+        base::BooleanHistogram::FactoryGet(name, \
+            base::HistogramBase::kUmaTargetedHistogramFlag))
+
+// The samples should always be strictly less than |boundary_value|.  For more
+// details, see the comment for the |LOCAL_HISTOGRAM_ENUMERATION| macro, above.
+#define UMA_HISTOGRAM_ENUMERATION(name, sample, boundary_value) \
+    HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, boundary_value, \
+        base::HistogramBase::kUmaTargetedHistogramFlag)
+
+// Similar to UMA_HISTOGRAM_ENUMERATION, but used for recording stability
+// histograms.  Use this if recording a histogram that should be part of the
+// initial stability log.
+#define UMA_STABILITY_HISTOGRAM_ENUMERATION(name, sample, boundary_value) \
+    HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, boundary_value, \
+        base::HistogramBase::kUmaStabilityHistogramFlag)
+
+#define UMA_HISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges) \
+    STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
+        base::CustomHistogram::FactoryGet(name, custom_ranges, \
+            base::HistogramBase::kUmaTargetedHistogramFlag))
+
+// Scoped class which logs its time on this earth as a UMA statistic. This is
+// recommended for when you want a histogram which measures the time it takes
+// for a method to execute. This measures up to 10 seconds.
+#define SCOPED_UMA_HISTOGRAM_TIMER(name) \
+  SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, false, __COUNTER__)
+
+// Similar scoped histogram timer, but this uses UMA_HISTOGRAM_LONG_TIMES_100,
+// which measures up to an hour, and uses 100 buckets. This is more expensive
+// to store, so only use if this often takes >10 seconds.
+#define SCOPED_UMA_HISTOGRAM_LONG_TIMER(name) \
+  SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, true, __COUNTER__)
+
+// This nested macro is necessary to expand __COUNTER__ to an actual value.
+#define SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, is_long, key) \
+  SCOPED_UMA_HISTOGRAM_TIMER_UNIQUE(name, is_long, key)
+
+#define SCOPED_UMA_HISTOGRAM_TIMER_UNIQUE(name, is_long, key) \
+  class ScopedHistogramTimer##key { \
+   public: \
+    ScopedHistogramTimer##key() : constructed_(base::TimeTicks::Now()) {} \
+    ~ScopedHistogramTimer##key() { \
+      base::TimeDelta elapsed = base::TimeTicks::Now() - constructed_; \
+      if (is_long) { \
+        UMA_HISTOGRAM_LONG_TIMES_100(name, elapsed); \
+      } else { \
+        UMA_HISTOGRAM_TIMES(name, elapsed); \
+      } \
+    } \
+   private: \
+    base::TimeTicks constructed_; \
+  } scoped_histogram_timer_##key
+
 #endif  // BASE_METRICS_HISTOGRAM_MACROS_H_
diff --git a/base/metrics/histogram_macros_internal.h b/base/metrics/histogram_macros_internal.h
deleted file mode 100644
index 53e4f11..0000000
--- a/base/metrics/histogram_macros_internal.h
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
-#define BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
-
-#include "base/atomicops.h"
-#include "base/logging.h"
-#include "base/metrics/histogram.h"
-#include "base/metrics/sparse_histogram.h"
-#include "base/time/time.h"
-
-// This is for macros internal to base/metrics. They should not be used outside
-// of this directory. For writing to UMA histograms, see histogram_macros.h.
-
-// TODO(rkaplow): Improve commenting of these methods.
-
-//------------------------------------------------------------------------------
-// Histograms are often put in areas where they are called many many times, and
-// performance is critical.  As a result, they are designed to have a very low
-// recurring cost of executing (adding additional samples). Toward that end,
-// the macros declare a static pointer to the histogram in question, and only
-// take a "slow path" to construct (or find) the histogram on the first run
-// through the macro. We leak the histograms at shutdown time so that we don't
-// have to validate using the pointers at any time during the running of the
-// process.
-
-
-// In some cases (integration into 3rd party code), it's useful to separate the
-// definition of |atomic_histogram_pointer| from its use. To achieve this we
-// define HISTOGRAM_POINTER_USE, which uses an |atomic_histogram_pointer|, and
-// STATIC_HISTOGRAM_POINTER_BLOCK, which defines an |atomic_histogram_pointer|
-// and forwards to HISTOGRAM_POINTER_USE.
-#define HISTOGRAM_POINTER_USE(atomic_histogram_pointer,                        \
-                              constant_histogram_name,                         \
-                              histogram_add_method_invocation,                 \
-                              histogram_factory_get_invocation)                \
-  do {                                                                         \
-    /*                                                                         \
-     * Acquire_Load() ensures that we acquire visibility to the                \
-     * pointed-to data in the histogram.                                       \
-     */                                                                        \
-    base::HistogramBase* histogram_pointer(                                    \
-        reinterpret_cast<base::HistogramBase*>(                                \
-            base::subtle::Acquire_Load(atomic_histogram_pointer)));            \
-    if (!histogram_pointer) {                                                  \
-      /*                                                                       \
-       * This is the slow path, which will construct OR find the               \
-       * matching histogram.  histogram_factory_get_invocation includes        \
-       * locks on a global histogram name map and is completely thread         \
-       * safe.                                                                 \
-       */                                                                      \
-      histogram_pointer = histogram_factory_get_invocation;                    \
-                                                                               \
-      /*                                                                       \
-       * Use Release_Store to ensure that the histogram data is made           \
-       * available globally before we make the pointer visible. Several        \
-       * threads may perform this store, but the same value will be            \
-       * stored in all cases (for a given named/spec'ed histogram).            \
-       * We could do this without any barrier, since FactoryGet entered        \
-       * and exited a lock after construction, but this barrier makes          \
-       * things clear.                                                         \
-       */                                                                      \
-      base::subtle::Release_Store(                                             \
-          atomic_histogram_pointer,                                            \
-          reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer));      \
-    }                                                                          \
-    if (DCHECK_IS_ON())                                                        \
-      histogram_pointer->CheckName(constant_histogram_name);                   \
-    histogram_pointer->histogram_add_method_invocation;                        \
-  } while (0)
-
-// This is a helper macro used by other macros and shouldn't be used directly.
-// Defines the static |atomic_histogram_pointer| and forwards to
-// HISTOGRAM_POINTER_USE.
-#define STATIC_HISTOGRAM_POINTER_BLOCK(constant_histogram_name,                \
-                                       histogram_add_method_invocation,        \
-                                       histogram_factory_get_invocation)       \
-  do {                                                                         \
-    /*                                                                         \
-     * The pointer's presence indicates that the initialization is complete.   \
-     * Initialization is idempotent, so it can safely be atomically repeated.  \
-     */                                                                        \
-    static base::subtle::AtomicWord atomic_histogram_pointer = 0;              \
-    HISTOGRAM_POINTER_USE(&atomic_histogram_pointer, constant_histogram_name,  \
-                          histogram_add_method_invocation,                     \
-                          histogram_factory_get_invocation);                   \
-  } while (0)
-
-// This is a helper macro used by other macros and shouldn't be used directly.
-#define INTERNAL_HISTOGRAM_CUSTOM_COUNTS_WITH_FLAG(name, sample, min, max,     \
-                                                   bucket_count, flag)         \
-    STATIC_HISTOGRAM_POINTER_BLOCK(                                            \
-        name, Add(sample),                                                     \
-        base::Histogram::FactoryGet(name, min, max, bucket_count, flag))
-
-// This is a helper macro used by other macros and shouldn't be used directly.
-// For an enumeration with N items, recording values in the range [0, N - 1],
-// this macro creates a linear histogram with N + 1 buckets:
-//   [0, 1), [1, 2), ..., [N - 1, N), and an overflow bucket [N, infinity).
-// Code should never emit to the overflow bucket; only to the other N buckets.
-// This allows future versions of Chrome to safely append new entries to the
-// enumeration. Otherwise, the histogram would have [N - 1, infinity) as its
-// overflow bucket, and so the maximal value (N - 1) would be emitted to this
-// overflow bucket. But, if an additional enumerated value were later added, the
-// bucket label for the value (N - 1) would change to [N - 1, N), which would
-// result in different versions of Chrome using different bucket labels for
-// identical data.
-#define INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, boundary, flag) \
-  do {                                                                         \
-    static_assert(                                                             \
-        !std::is_enum<decltype(sample)>::value ||                              \
-            !std::is_enum<decltype(boundary)>::value ||                        \
-            std::is_same<std::remove_const<decltype(sample)>::type,            \
-                         std::remove_const<decltype(boundary)>::type>::value,  \
-        "|sample| and |boundary| shouldn't be of different enums");            \
-    STATIC_HISTOGRAM_POINTER_BLOCK(                                            \
-        name, Add(sample), base::LinearHistogram::FactoryGet(                  \
-                               name, 1, boundary, boundary + 1, flag));        \
-  } while (0)
-
-// This is a helper macro used by other macros and shouldn't be used directly.
-// This is necessary to expand __COUNTER__ to an actual value.
-#define INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, is_long, key)       \
-  INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_UNIQUE(name, is_long, key)
-
-// This is a helper macro used by other macros and shouldn't be used directly.
-#define INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_UNIQUE(name, is_long, key)         \
-  class ScopedHistogramTimer##key {                                            \
-   public:                                                                     \
-    ScopedHistogramTimer##key() : constructed_(base::TimeTicks::Now()) {}      \
-    ~ScopedHistogramTimer##key() {                                             \
-      base::TimeDelta elapsed = base::TimeTicks::Now() - constructed_;         \
-      if (is_long) {                                                           \
-        UMA_HISTOGRAM_LONG_TIMES_100(name, elapsed);                           \
-      } else {                                                                 \
-        UMA_HISTOGRAM_TIMES(name, elapsed);                                    \
-      }                                                                        \
-    }                                                                          \
-   private:                                                                    \
-    base::TimeTicks constructed_;                                              \
-  } scoped_histogram_timer_##key
-
-// Macro for sparse histogram.
-// The implementation is more costly to add values to, and each value
-// stored has more overhead, compared to the other histogram types. However it
-// may be more efficient in memory if the total number of sample values is small
-// compared to the range of their values.
-#define INTERNAL_HISTOGRAM_SPARSE_SLOWLY(name, sample)                         \
-    do {                                                                       \
-      base::HistogramBase* histogram = base::SparseHistogram::FactoryGet(      \
-          name, base::HistogramBase::kUmaTargetedHistogramFlag);               \
-      histogram->Add(sample);                                                  \
-    } while (0)
-
-#endif  // BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
diff --git a/base/metrics/histogram_macros_local.h b/base/metrics/histogram_macros_local.h
deleted file mode 100644
index 7571a9c..0000000
--- a/base/metrics/histogram_macros_local.h
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_METRICS_HISTOGRAM_MACROS_LOCAL_H_
-#define BASE_METRICS_HISTOGRAM_MACROS_LOCAL_H_
-
-#include "base/logging.h"
-#include "base/metrics/histogram.h"
-#include "base/metrics/histogram_macros_internal.h"
-#include "base/time/time.h"
-
-// TODO(rkaplow): Migrate all LOCAL_* usage within Chromium to include this
-// file instead of the histogram_macros.h file.
-
-//------------------------------------------------------------------------------
-// Enumeration histograms.
-//
-// For usage details, see the equivalents in histogram_macros.h.
-
-#define LOCAL_HISTOGRAM_ENUMERATION(name, sample, enum_max)                    \
-   INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG(                                   \
-        name, sample, enum_max,                                                \
-        base::HistogramBase::kNoFlags)
-
-#define LOCAL_HISTOGRAM_BOOLEAN(name, sample)                                  \
-    STATIC_HISTOGRAM_POINTER_BLOCK(name, AddBoolean(sample),                   \
-        base::BooleanHistogram::FactoryGet(name, base::Histogram::kNoFlags))
-
-//------------------------------------------------------------------------------
-// Percentage histograms.
-//
-// For usage details, see the equivalents in histogram_macros.h
-
-#define LOCAL_HISTOGRAM_PERCENTAGE(name, under_one_hundred)                    \
-    LOCAL_HISTOGRAM_ENUMERATION(name, under_one_hundred, 101)
-
-//------------------------------------------------------------------------------
-// Count histograms. These are used for collecting numeric data. Note that we
-// have macros for more specialized use cases below (memory, time, percentages).
-// For usage details, see the equivalents in histogram_macros.h.
-
-#define LOCAL_HISTOGRAM_COUNTS_100(name, sample)                               \
-    LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 100, 50)
-
-#define LOCAL_HISTOGRAM_COUNTS_10000(name, sample)                             \
-    LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 10000, 50)
-
-#define LOCAL_HISTOGRAM_COUNTS_1000000(name, sample)                           \
-    LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 1000000, 50)
-
-#define LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count)    \
-    INTERNAL_HISTOGRAM_CUSTOM_COUNTS_WITH_FLAG(                                \
-        name, sample, min, max, bucket_count, base::HistogramBase::kNoFlags)
-
-//------------------------------------------------------------------------------
-// Timing histograms. These are used for collecting timing data (generally
-// latencies).
-//
-// For usage details, see the equivalents in histogram_macros.h.
-
-#define LOCAL_HISTOGRAM_TIMES(name, sample) LOCAL_HISTOGRAM_CUSTOM_TIMES(      \
-    name, sample, base::TimeDelta::FromMilliseconds(1),                        \
-    base::TimeDelta::FromSeconds(10), 50)
-
-#define LOCAL_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count)     \
-    STATIC_HISTOGRAM_POINTER_BLOCK(name, AddTime(sample),                      \
-        base::Histogram::FactoryTimeGet(name, min, max, bucket_count,          \
-                                        base::HistogramBase::kNoFlags))
-
-//------------------------------------------------------------------------------
-// Memory histograms.
-//
-// For usage details, see the equivalents in histogram_macros.h.
-
-#define LOCAL_HISTOGRAM_MEMORY_KB(name, sample) LOCAL_HISTOGRAM_CUSTOM_COUNTS( \
-    name, sample, 1000, 500000, 50)
-
-//------------------------------------------------------------------------------
-// Deprecated histograms. Not recommended for current use.
-
-// TODO(rkaplow): See if we can clean up this macro and usage.
-// Legacy non-explicit version. We suggest using LOCAL_HISTOGRAM_COUNTS_1000000
-// instead.
-#define LOCAL_HISTOGRAM_COUNTS(name, sample)                                   \
-    LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 1000000, 50)
-
-#endif  // BASE_METRICS_HISTOGRAM_MACROS_LOCAL_H_
diff --git a/base/metrics/histogram_samples.h b/base/metrics/histogram_samples.h
index 93f6d21..e28573f 100644
--- a/base/metrics/histogram_samples.h
+++ b/base/metrics/histogram_samples.h
@@ -27,9 +27,6 @@
 class BASE_EXPORT HistogramSamples {
  public:
   struct Metadata {
-    // Expected size for 32/64-bit check.
-    static constexpr size_t kExpectedInstanceSize = 24;
-
     // Initialized when the sample-set is first created with a value provided
     // by the caller. It is generally used to identify the sample-set across
     // threads and processes, though not necessarily uniquely as it is possible
@@ -58,21 +55,7 @@
     // might mismatch even when no memory corruption has happened.
     HistogramBase::AtomicCount redundant_count;
 
-    // 4 bytes of padding to explicitly extend this structure to a multiple of
-    // 64-bits. This is required to ensure the structure is the same size on
-    // both 32-bit and 64-bit builds.
-    char padding[4];
-  };
-
-  // Because sturctures held in persistent memory must be POD, there can be no
-  // default constructor to clear the fields. This derived class exists just
-  // to clear them when being allocated on the heap.
-  struct LocalMetadata : Metadata {
-    LocalMetadata() {
-      id = 0;
-      sum = 0;
-      redundant_count = 0;
-    }
+    Metadata() : id(0), sum(0), redundant_count(0) {}
   };
 
   explicit HistogramSamples(uint64_t id);
@@ -119,7 +102,7 @@
   // In order to support histograms shared through an external memory segment,
   // meta values may be the local storage or external storage depending on the
   // wishes of the derived class.
-  LocalMetadata local_meta_;
+  Metadata local_meta_;
   Metadata* meta_;
 
   DISALLOW_COPY_AND_ASSIGN(HistogramSamples);
diff --git a/base/metrics/histogram_snapshot_manager.cc b/base/metrics/histogram_snapshot_manager.cc
index a774ea6..340505e 100644
--- a/base/metrics/histogram_snapshot_manager.cc
+++ b/base/metrics/histogram_snapshot_manager.cc
@@ -53,8 +53,6 @@
     for (size_t i = 0; i < ranges->size(); ++i)
       ranges_copy.push_back(ranges->range(i));
     HistogramBase::Sample* ranges_ptr = &ranges_copy[0];
-    uint32_t ranges_checksum = ranges->checksum();
-    uint32_t ranges_calc_checksum = ranges->CalculateChecksum();
     const char* histogram_name = histogram->histogram_name().c_str();
     int32_t flags = histogram->flags();
     // The checksum should have caught this, so crash separately if it didn't.
@@ -63,8 +61,6 @@
     // Ensure that compiler keeps around pointers to |histogram| and its
     // internal |bucket_ranges_| for any minidumps.
     base::debug::Alias(&ranges_ptr);
-    base::debug::Alias(&ranges_checksum);
-    base::debug::Alias(&ranges_calc_checksum);
     base::debug::Alias(&histogram_name);
     base::debug::Alias(&flags);
   }
diff --git a/base/metrics/histogram_snapshot_manager.h b/base/metrics/histogram_snapshot_manager.h
index de4a2e1..26fb93f 100644
--- a/base/metrics/histogram_snapshot_manager.h
+++ b/base/metrics/histogram_snapshot_manager.h
@@ -24,7 +24,7 @@
 // histograms for recording either to disk or for transmission (such as from
 // renderer to browser, or from browser to UMA upload). Since histograms can sit
 // in memory for an extended period of time, and are vulnerable to memory
-// corruption, this class also validates as much redundancy as it can before
+// corruption, this class also validates as much rendundancy as it can before
 // calling for the marginal change (a.k.a., delta) in a histogram to be
 // recorded.
 class BASE_EXPORT HistogramSnapshotManager {
diff --git a/base/metrics/histogram_unittest.cc b/base/metrics/histogram_unittest.cc
index 02ed93b..5c2ca68 100644
--- a/base/metrics/histogram_unittest.cc
+++ b/base/metrics/histogram_unittest.cc
@@ -22,7 +22,6 @@
 #include "base/metrics/statistics_recorder.h"
 #include "base/pickle.h"
 #include "base/strings/stringprintf.h"
-#include "base/test/gtest_util.h"
 #include "base/time/time.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -702,6 +701,7 @@
           << "ns each.";
 }
 
+#if GTEST_HAS_DEATH_TEST
 // For Histogram, LinearHistogram and CustomHistogram, the minimum for a
 // declared range is 1, while the maximum is (HistogramBase::kSampleType_MAX -
 // 1). But we accept ranges exceeding those limits, and silently clamped to
@@ -735,18 +735,17 @@
 
   // CustomHistogram does not accepts kSampleType_MAX as range.
   custom_ranges.push_back(HistogramBase::kSampleType_MAX);
-  EXPECT_DEATH_IF_SUPPORTED(
-      CustomHistogram::FactoryGet("BadRangesCustom2", custom_ranges,
-                                  HistogramBase::kNoFlags),
+  EXPECT_DEATH(CustomHistogram::FactoryGet("BadRangesCustom2", custom_ranges,
+                                           HistogramBase::kNoFlags),
                "");
 
   // CustomHistogram needs at least 1 valid range.
   custom_ranges.clear();
   custom_ranges.push_back(0);
-  EXPECT_DEATH_IF_SUPPORTED(
-      CustomHistogram::FactoryGet("BadRangesCustom3", custom_ranges,
-                                  HistogramBase::kNoFlags),
+  EXPECT_DEATH(CustomHistogram::FactoryGet("BadRangesCustom3", custom_ranges,
+                                           HistogramBase::kNoFlags),
                "");
 }
+#endif
 
 }  // namespace base
diff --git a/base/metrics/persistent_histogram_allocator.cc b/base/metrics/persistent_histogram_allocator.cc
index 2991003..5af3486 100644
--- a/base/metrics/persistent_histogram_allocator.cc
+++ b/base/metrics/persistent_histogram_allocator.cc
@@ -6,7 +6,6 @@
 
 #include <memory>
 
-#include "base/atomicops.h"
 #include "base/files/file_path.h"
 #include "base/files/file_util.h"
 #include "base/files/important_file_writer.h"
@@ -36,6 +35,7 @@
 // so that, if the structure of that object changes, stored older versions
 // will be safely ignored.
 enum : uint32_t {
+  kTypeIdHistogram   = 0xF1645910 + 2,  // SHA1(Histogram)   v2
   kTypeIdRangesArray = 0xBCEA225A + 1,  // SHA1(RangesArray) v1
   kTypeIdCountsArray = 0x53215530 + 1,  // SHA1(CountsArray) v1
 };
@@ -45,10 +45,8 @@
 // but that's best since PersistentMemoryAllocator objects (that underlie
 // GlobalHistogramAllocator objects) are explicitly forbidden from doing
 // anything essential at exit anyway due to the fact that they depend on data
-// managed elsewhere and which could be destructed first. An AtomicWord is
-// used instead of std::atomic because the latter can create global ctors
-// and dtors.
-subtle::AtomicWord g_allocator = 0;
+// managed elsewhere and which could be destructed first.
+GlobalHistogramAllocator* g_allocator = nullptr;
 
 // Take an array of range boundaries and create a proper BucketRanges object
 // which is returned to the caller. A return of nullptr indicates that the
@@ -119,7 +117,7 @@
     return found->second.get();
 
   std::unique_ptr<PersistentSampleMapRecords>& samples = sample_records_[id];
-  samples = MakeUnique<PersistentSampleMapRecords>(this, id);
+  samples = WrapUnique(new PersistentSampleMapRecords(this, id));
   return samples.get();
 }
 
@@ -226,13 +224,6 @@
 // This data will be held in persistent memory in order for processes to
 // locate and use histograms created elsewhere.
 struct PersistentHistogramAllocator::PersistentHistogramData {
-  // SHA1(Histogram): Increment this if structure changes!
-  static constexpr uint32_t kPersistentTypeId = 0xF1645910 + 3;
-
-  // Expected size for 32/64-bit check.
-  static constexpr size_t kExpectedInstanceSize =
-      40 + 2 * HistogramSamples::Metadata::kExpectedInstanceSize;
-
   int32_t histogram_type;
   int32_t flags;
   int32_t minimum;
@@ -247,7 +238,7 @@
   // Space for the histogram name will be added during the actual allocation
   // request. This must be the last field of the structure. A zero-size array
   // or a "flexible" array would be preferred but is not (yet) valid C++.
-  char name[sizeof(uint64_t)];  // Force 64-bit alignment on 32-bit builds.
+  char name[1];
 };
 
 PersistentHistogramAllocator::Iterator::Iterator(
@@ -257,7 +248,7 @@
 std::unique_ptr<HistogramBase>
 PersistentHistogramAllocator::Iterator::GetNextWithIgnore(Reference ignore) {
   PersistentMemoryAllocator::Reference ref;
-  while ((ref = memory_iter_.GetNextOfType<PersistentHistogramData>()) != 0) {
+  while ((ref = memory_iter_.GetNextOfType(kTypeIdHistogram)) != 0) {
     if (ref != ignore)
       return allocator_->GetHistogram(ref);
   }
@@ -280,17 +271,11 @@
   // add it to the local list of known histograms (while these may be simple
   // references to histograms in other processes).
   PersistentHistogramData* histogram_data =
-      memory_allocator_->GetAsObject<PersistentHistogramData>(ref);
+      memory_allocator_->GetAsObject<PersistentHistogramData>(
+          ref, kTypeIdHistogram);
   size_t length = memory_allocator_->GetAllocSize(ref);
-
-  // Check that metadata is reasonable: name is NUL terminated and non-empty,
-  // ID fields have been loaded with a hash of the name (0 is considered
-  // unset/invalid).
   if (!histogram_data ||
-      reinterpret_cast<char*>(histogram_data)[length - 1] != '\0' ||
-      histogram_data->name[0] == '\0' ||
-      histogram_data->samples_metadata.id == 0 ||
-      histogram_data->logged_metadata.id == 0) {
+      reinterpret_cast<char*>(histogram_data)[length - 1] != '\0') {
     RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA);
     NOTREACHED();
     return nullptr;
@@ -317,13 +302,14 @@
 
   // Create the metadata necessary for a persistent sparse histogram. This
   // is done first because it is a small subset of what is required for
-  // other histograms. The type is "under construction" so that a crash
-  // during the datafill doesn't leave a bad record around that could cause
-  // confusion by another process trying to read it. It will be corrected
-  // once histogram construction is complete.
+  // other histograms.
+  PersistentMemoryAllocator::Reference histogram_ref =
+      memory_allocator_->Allocate(
+          offsetof(PersistentHistogramData, name) + name.length() + 1,
+          kTypeIdHistogram);
   PersistentHistogramData* histogram_data =
-      memory_allocator_->New<PersistentHistogramData>(
-          offsetof(PersistentHistogramData, name) + name.length() + 1);
+      memory_allocator_->GetAsObject<PersistentHistogramData>(histogram_ref,
+                                                              kTypeIdHistogram);
   if (histogram_data) {
     memcpy(histogram_data->name, name.c_str(), name.size() + 1);
     histogram_data->histogram_type = histogram_type;
@@ -340,15 +326,14 @@
       return nullptr;
     }
 
-    size_t ranges_count = bucket_count + 1;
-    size_t ranges_bytes = ranges_count * sizeof(HistogramBase::Sample);
+    size_t ranges_bytes = (bucket_count + 1) * sizeof(HistogramBase::Sample);
     PersistentMemoryAllocator::Reference counts_ref =
         memory_allocator_->Allocate(counts_bytes, kTypeIdCountsArray);
     PersistentMemoryAllocator::Reference ranges_ref =
         memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
     HistogramBase::Sample* ranges_data =
-        memory_allocator_->GetAsArray<HistogramBase::Sample>(
-            ranges_ref, kTypeIdRangesArray, ranges_count);
+        memory_allocator_->GetAsObject<HistogramBase::Sample>(
+            ranges_ref, kTypeIdRangesArray);
 
     // Only continue here if all allocations were successful. If they weren't,
     // there is no way to free the space but that's not really a problem since
@@ -380,11 +365,6 @@
     // correct before commiting the new histogram to persistent space.
     std::unique_ptr<HistogramBase> histogram = CreateHistogram(histogram_data);
     DCHECK(histogram);
-    DCHECK_NE(0U, histogram_data->samples_metadata.id);
-    DCHECK_NE(0U, histogram_data->logged_metadata.id);
-
-    PersistentMemoryAllocator::Reference histogram_ref =
-        memory_allocator_->GetAsReference(histogram_data);
     if (ref_ptr != nullptr)
       *ref_ptr = histogram_ref;
 
@@ -406,31 +386,22 @@
     result = CREATE_HISTOGRAM_ALLOCATOR_ERROR;
   }
   RecordCreateHistogramResult(result);
-
-  // Crash for failures caused by internal bugs but not "full" which is
-  // dependent on outside code.
-  if (result != CREATE_HISTOGRAM_ALLOCATOR_FULL)
-    NOTREACHED() << memory_allocator_->Name() << ", error=" << result;
+  NOTREACHED() << "error=" << result;
 
   return nullptr;
 }
 
 void PersistentHistogramAllocator::FinalizeHistogram(Reference ref,
                                                      bool registered) {
-  if (registered) {
-    // If the created persistent histogram was registered then it needs to
-    // be marked as "iterable" in order to be found by other processes. This
-    // happens only after the histogram is fully formed so it's impossible for
-    // code iterating through the allocator to read a partially created record.
+  // If the created persistent histogram was registered then it needs to
+  // be marked as "iterable" in order to be found by other processes.
+  if (registered)
     memory_allocator_->MakeIterable(ref);
-  } else {
-    // If it wasn't registered then a race condition must have caused two to
-    // be created. The allocator does not support releasing the acquired memory
-    // so just change the type to be empty.
-    memory_allocator_->ChangeType(ref, 0,
-                                  PersistentHistogramData::kPersistentTypeId,
-                                  /*clear=*/false);
-  }
+  // If it wasn't registered then a race condition must have caused
+  // two to be created. The allocator does not support releasing the
+  // acquired memory so just change the type to be empty.
+  else
+    memory_allocator_->ChangeType(ref, 0, kTypeIdHistogram);
 }
 
 void PersistentHistogramAllocator::MergeHistogramDeltaToStatisticsRecorder(
@@ -506,10 +477,15 @@
     static bool initialized = false;
     if (!initialized) {
       initialized = true;
-      if (GlobalHistogramAllocator::Get()) {
-        DVLOG(1) << "Creating the results-histogram inside persistent"
-                 << " memory can cause future allocations to crash if"
-                 << " that memory is ever released (for testing).";
+      if (g_allocator) {
+// Don't log in release-with-asserts builds, otherwise the test_installer step
+// fails because this code writes to a log file before the installer code had a
+// chance to set the log file's location.
+#if !defined(DCHECK_ALWAYS_ON)
+        DLOG(WARNING) << "Creating the results-histogram inside persistent"
+                      << " memory can cause future allocations to crash if"
+                      << " that memory is ever released (for testing).";
+#endif
       }
 
       histogram_pointer = LinearHistogram::FactoryGet(
@@ -551,9 +527,8 @@
   PersistentHistogramData histogram_data = *histogram_data_ptr;
 
   HistogramBase::Sample* ranges_data =
-      memory_allocator_->GetAsArray<HistogramBase::Sample>(
-          histogram_data.ranges_ref, kTypeIdRangesArray,
-          PersistentMemoryAllocator::kSizeAny);
+      memory_allocator_->GetAsObject<HistogramBase::Sample>(
+          histogram_data.ranges_ref, kTypeIdRangesArray);
 
   const uint32_t max_buckets =
       std::numeric_limits<uint32_t>::max() / sizeof(HistogramBase::Sample);
@@ -582,9 +557,8 @@
           created_ranges.release());
 
   HistogramBase::AtomicCount* counts_data =
-      memory_allocator_->GetAsArray<HistogramBase::AtomicCount>(
-          histogram_data.counts_ref, kTypeIdCountsArray,
-          PersistentMemoryAllocator::kSizeAny);
+      memory_allocator_->GetAsObject<HistogramBase::AtomicCount>(
+          histogram_data.counts_ref, kTypeIdCountsArray);
   size_t counts_bytes =
       CalculateRequiredCountsBytes(histogram_data.bucket_count);
   if (!counts_data || counts_bytes == 0 ||
@@ -654,7 +628,7 @@
     const HistogramBase* histogram) {
   // This should never be called on the global histogram allocator as objects
   // created there are already within the global statistics recorder.
-  DCHECK_NE(GlobalHistogramAllocator::Get(), this);
+  DCHECK_NE(g_allocator, this);
   DCHECK(histogram);
 
   HistogramBase* existing =
@@ -664,9 +638,7 @@
 
   // Adding the passed histogram to the SR would cause a problem if the
   // allocator that holds it eventually goes away. Instead, create a new
-  // one from a serialized version. Deserialization calls the appropriate
-  // FactoryGet() which will create the histogram in the global persistent-
-  // histogram allocator if such is set.
+  // one from a serialized version.
   base::Pickle pickle;
   if (!histogram->SerializeInfo(&pickle))
     return nullptr;
@@ -698,9 +670,9 @@
     size_t page_size,
     uint64_t id,
     StringPiece name) {
-  Set(WrapUnique(
-      new GlobalHistogramAllocator(MakeUnique<PersistentMemoryAllocator>(
-          base, size, page_size, id, name, false))));
+  Set(WrapUnique(new GlobalHistogramAllocator(
+      WrapUnique(new PersistentMemoryAllocator(
+          base, size, page_size, id, name, false)))));
 }
 
 // static
@@ -709,12 +681,12 @@
     uint64_t id,
     StringPiece name) {
   Set(WrapUnique(new GlobalHistogramAllocator(
-      MakeUnique<LocalPersistentMemoryAllocator>(size, id, name))));
+      WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)))));
 }
 
 #if !defined(OS_NACL)
 // static
-bool GlobalHistogramAllocator::CreateWithFile(
+void GlobalHistogramAllocator::CreateWithFile(
     const FilePath& file_path,
     size_t size,
     uint64_t id,
@@ -734,55 +706,14 @@
   if (!mmfile->IsValid() ||
       !FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)) {
     NOTREACHED();
-    return false;
+    return;
   }
 
-  Set(WrapUnique(
-      new GlobalHistogramAllocator(MakeUnique<FilePersistentMemoryAllocator>(
-          std::move(mmfile), size, id, name, false))));
-  Get()->SetPersistentLocation(file_path);
-  return true;
+  Set(WrapUnique(new GlobalHistogramAllocator(
+      WrapUnique(new FilePersistentMemoryAllocator(
+          std::move(mmfile), size, id, name, false)))));
 }
-
-// static
-bool GlobalHistogramAllocator::CreateWithActiveFile(const FilePath& base_path,
-                                                    const FilePath& active_path,
-                                                    size_t size,
-                                                    uint64_t id,
-                                                    StringPiece name) {
-  if (!base::ReplaceFile(active_path, base_path, nullptr))
-    base::DeleteFile(base_path, /*recursive=*/false);
-
-  return base::GlobalHistogramAllocator::CreateWithFile(active_path, size, id,
-                                                        name);
-}
-
-// static
-bool GlobalHistogramAllocator::CreateWithActiveFileInDir(const FilePath& dir,
-                                                         size_t size,
-                                                         uint64_t id,
-                                                         StringPiece name) {
-  FilePath base_path, active_path;
-  ConstructFilePaths(dir, name, &base_path, &active_path);
-  return CreateWithActiveFile(base_path, active_path, size, id, name);
-}
-
-// static
-void GlobalHistogramAllocator::ConstructFilePaths(const FilePath& dir,
-                                                  StringPiece name,
-                                                  FilePath* out_base_path,
-                                                  FilePath* out_active_path) {
-  if (out_base_path) {
-    *out_base_path = dir.AppendASCII(name).AddExtension(
-        PersistentMemoryAllocator::kFileExtension);
-  }
-  if (out_active_path) {
-    *out_active_path =
-        dir.AppendASCII(name.as_string() + std::string("-active"))
-            .AddExtension(PersistentMemoryAllocator::kFileExtension);
-  }
-}
-#endif  // !defined(OS_NACL)
+#endif
 
 // static
 void GlobalHistogramAllocator::CreateWithSharedMemory(
@@ -797,9 +728,9 @@
   }
 
   DCHECK_LE(memory->mapped_size(), size);
-  Set(WrapUnique(
-      new GlobalHistogramAllocator(MakeUnique<SharedPersistentMemoryAllocator>(
-          std::move(memory), 0, StringPiece(), /*readonly=*/false))));
+  Set(WrapUnique(new GlobalHistogramAllocator(
+      WrapUnique(new SharedPersistentMemoryAllocator(
+          std::move(memory), 0, StringPiece(), /*readonly=*/false)))));
 }
 
 // static
@@ -814,9 +745,9 @@
     return;
   }
 
-  Set(WrapUnique(
-      new GlobalHistogramAllocator(MakeUnique<SharedPersistentMemoryAllocator>(
-          std::move(shm), 0, StringPiece(), /*readonly=*/false))));
+  Set(WrapUnique(new GlobalHistogramAllocator(
+      WrapUnique(new SharedPersistentMemoryAllocator(
+          std::move(shm), 0, StringPiece(), /*readonly=*/false)))));
 }
 
 // static
@@ -825,9 +756,8 @@
   // Releasing or changing an allocator is extremely dangerous because it
   // likely has histograms stored within it. If the backing memory is also
   // also released, future accesses to those histograms will seg-fault.
-  CHECK(!subtle::NoBarrier_Load(&g_allocator));
-  subtle::Release_Store(&g_allocator,
-                        reinterpret_cast<uintptr_t>(allocator.release()));
+  CHECK(!g_allocator);
+  g_allocator = allocator.release();
   size_t existing = StatisticsRecorder::GetHistogramCount();
 
   DVLOG_IF(1, existing)
@@ -836,14 +766,13 @@
 
 // static
 GlobalHistogramAllocator* GlobalHistogramAllocator::Get() {
-  return reinterpret_cast<GlobalHistogramAllocator*>(
-      subtle::Acquire_Load(&g_allocator));
+  return g_allocator;
 }
 
 // static
 std::unique_ptr<GlobalHistogramAllocator>
 GlobalHistogramAllocator::ReleaseForTesting() {
-  GlobalHistogramAllocator* histogram_allocator = Get();
+  GlobalHistogramAllocator* histogram_allocator = g_allocator;
   if (!histogram_allocator)
     return nullptr;
   PersistentMemoryAllocator* memory_allocator =
@@ -853,9 +782,13 @@
   // Recorder forget about the histograms contained therein; otherwise,
   // some operations will try to access them and the released memory.
   PersistentMemoryAllocator::Iterator iter(memory_allocator);
-  const PersistentHistogramData* data;
-  while ((data = iter.GetNextOfObject<PersistentHistogramData>()) != nullptr) {
-    StatisticsRecorder::ForgetHistogramForTesting(data->name);
+  PersistentMemoryAllocator::Reference ref;
+  while ((ref = iter.GetNextOfType(kTypeIdHistogram)) != 0) {
+    PersistentHistogramData* histogram_data =
+        memory_allocator->GetAsObject<PersistentHistogramData>(
+            ref, kTypeIdHistogram);
+    DCHECK(histogram_data);
+    StatisticsRecorder::ForgetHistogramForTesting(histogram_data->name);
 
     // If a test breaks here then a memory region containing a histogram
     // actively used by this code is being released back to the test.
@@ -864,10 +797,10 @@
     // the method GetCreateHistogramResultHistogram() *before* setting
     // the (temporary) memory allocator via SetGlobalAllocator() so that
     // histogram is instead allocated from the process heap.
-    DCHECK_NE(kResultHistogram, data->name);
+    DCHECK_NE(kResultHistogram, histogram_data->name);
   }
 
-  subtle::Release_Store(&g_allocator, 0);
+  g_allocator = nullptr;
   return WrapUnique(histogram_allocator);
 };
 
@@ -904,30 +837,10 @@
 #endif
 }
 
-void GlobalHistogramAllocator::DeletePersistentLocation() {
-#if defined(OS_NACL)
-  NOTREACHED();
-#else
-  if (persistent_location_.empty())
-    return;
-
-  // Open (with delete) and then immediately close the file by going out of
-  // scope. This is the only cross-platform safe way to delete a file that may
-  // be open elsewhere. Open handles will continue to operate normally but
-  // new opens will not be possible.
-  File file(persistent_location_,
-            File::FLAG_OPEN | File::FLAG_READ | File::FLAG_DELETE_ON_CLOSE);
-#endif
-}
-
 GlobalHistogramAllocator::GlobalHistogramAllocator(
     std::unique_ptr<PersistentMemoryAllocator> memory)
     : PersistentHistogramAllocator(std::move(memory)),
-      import_iterator_(this) {
-  // Make sure the StatisticsRecorder is initialized to prevent duplicate
-  // histograms from being created. It's safe to call this multiple times.
-  StatisticsRecorder::Initialize();
-}
+      import_iterator_(this) {}
 
 void GlobalHistogramAllocator::ImportHistogramsToStatisticsRecorder() {
   // Skip the import if it's the histogram that was last created. Should a
diff --git a/base/metrics/persistent_histogram_allocator.h b/base/metrics/persistent_histogram_allocator.h
index 2eb28df..ee1fba5 100644
--- a/base/metrics/persistent_histogram_allocator.h
+++ b/base/metrics/persistent_histogram_allocator.h
@@ -19,7 +19,6 @@
 
 namespace base {
 
-class BucketRanges;
 class FilePath;
 class PersistentSampleMapRecords;
 class PersistentSparseHistogramDataManager;
@@ -56,8 +55,8 @@
   // Convenience method that gets the object for a given reference so callers
   // don't have to also keep their own pointer to the appropriate allocator.
   template <typename T>
-  T* GetAsObject(PersistentMemoryAllocator::Reference ref) {
-    return allocator_->GetAsObject<T>(ref);
+  T* GetAsObject(PersistentMemoryAllocator::Reference ref, uint32_t type_id) {
+    return allocator_->GetAsObject<T>(ref, type_id);
   }
 
  private:
@@ -131,8 +130,8 @@
   // cleanliness of the interface), a template is defined that will be
   // resolved when used inside that file.
   template <typename T>
-  T* GetAsObject(PersistentMemoryAllocator::Reference ref) {
-    return data_manager_->GetAsObject<T>(ref);
+  T* GetAsObject(PersistentMemoryAllocator::Reference ref, uint32_t type_id) {
+    return data_manager_->GetAsObject<T>(ref, type_id);
   }
 
  private:
@@ -395,40 +394,11 @@
   // Create a global allocator by memory-mapping a |file|. If the file does
   // not exist, it will be created with the specified |size|. If the file does
   // exist, the allocator will use and add to its contents, ignoring the passed
-  // size in favor of the existing size. Returns whether the global allocator
-  // was set.
-  static bool CreateWithFile(const FilePath& file_path,
+  // size in favor of the existing size.
+  static void CreateWithFile(const FilePath& file_path,
                              size_t size,
                              uint64_t id,
                              StringPiece name);
-
-  // Creates a new file at |active_path|. If it already exists, it will first be
-  // moved to |base_path|. In all cases, any old file at |base_path| will be
-  // removed. The file will be created using the given size, id, and name.
-  // Returns whether the global allocator was set.
-  static bool CreateWithActiveFile(const FilePath& base_path,
-                                   const FilePath& active_path,
-                                   size_t size,
-                                   uint64_t id,
-                                   StringPiece name);
-
-  // Uses ConstructBaseActivePairFilePaths() to build a pair of file names which
-  // are then used for CreateWithActiveFile(). |name| is used for both the
-  // internal name for the allocator and also for the name of the file inside
-  // |dir|.
-  static bool CreateWithActiveFileInDir(const FilePath& dir,
-                                        size_t size,
-                                        uint64_t id,
-                                        StringPiece name);
-
-  // Constructs a pair of names in |dir| based on name that can be used for a
-  // base + active persistent memory mapped location for CreateWithActiveFile().
-  // |name| will be used as the basename of the file inside |dir|.
-  // |out_base_path| or |out_active_path| may be null if not needed.
-  static void ConstructFilePaths(const FilePath& dir,
-                                 StringPiece name,
-                                 FilePath* out_base_path,
-                                 FilePath* out_active_path);
 #endif
 
   // Create a global allocator using a block of shared |memory| of the
@@ -479,10 +449,6 @@
   // indicates success.
   bool WriteToPersistentLocation();
 
-  // If there is a global metrics file being updated on disk, mark it to be
-  // deleted when the process exits.
-  void DeletePersistentLocation();
-
  private:
   friend class StatisticsRecorder;
 
diff --git a/base/metrics/persistent_histogram_allocator_unittest.cc b/base/metrics/persistent_histogram_allocator_unittest.cc
index df250a3..b680662 100644
--- a/base/metrics/persistent_histogram_allocator_unittest.cc
+++ b/base/metrics/persistent_histogram_allocator_unittest.cc
@@ -102,8 +102,9 @@
 
   // Create a second allocator and have it access the memory of the first.
   std::unique_ptr<HistogramBase> recovered;
-  PersistentHistogramAllocator recovery(MakeUnique<PersistentMemoryAllocator>(
-      allocator_memory_.get(), kAllocatorMemorySize, 0, 0, "", false));
+  PersistentHistogramAllocator recovery(
+      WrapUnique(new PersistentMemoryAllocator(
+          allocator_memory_.get(), kAllocatorMemorySize, 0, 0, "", false)));
   PersistentHistogramAllocator::Iterator histogram_iter(&recovery);
 
   recovered = histogram_iter.GetNext();
@@ -130,7 +131,7 @@
   const char temp_name[] = "CreateWithFileTest";
   ScopedTempDir temp_dir;
   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  FilePath temp_file = temp_dir.GetPath().AppendASCII(temp_name);
+  FilePath temp_file = temp_dir.path().AppendASCII(temp_name);
   const size_t temp_size = 64 << 10;  // 64 KiB
 
   // Test creation of a new file.
@@ -155,130 +156,54 @@
   GlobalHistogramAllocator::ReleaseForTesting();
 }
 
-TEST_F(PersistentHistogramAllocatorTest, StatisticsRecorderMergeTest) {
-  const char LinearHistogramName[] = "SRTLinearHistogram";
-  const char SparseHistogramName[] = "SRTSparseHistogram";
-  const size_t starting_sr_count = StatisticsRecorder::GetHistogramCount();
+TEST_F(PersistentHistogramAllocatorTest, StatisticsRecorderTest) {
+  size_t starting_sr_count = StatisticsRecorder::GetHistogramCount();
 
   // Create a local StatisticsRecorder in which the newly created histogram
-  // will be recorded. The global allocator must be replaced after because the
-  // act of releasing will cause the active SR to forget about all histograms
-  // in the relased memory.
+  // will be recorded.
   std::unique_ptr<StatisticsRecorder> local_sr =
       StatisticsRecorder::CreateTemporaryForTesting();
   EXPECT_EQ(0U, StatisticsRecorder::GetHistogramCount());
-  std::unique_ptr<GlobalHistogramAllocator> old_allocator =
-      GlobalHistogramAllocator::ReleaseForTesting();
-  GlobalHistogramAllocator::CreateWithLocalMemory(kAllocatorMemorySize, 0, "");
-  ASSERT_TRUE(GlobalHistogramAllocator::Get());
 
-  // Create a linear histogram for merge testing.
-  HistogramBase* histogram1 =
-      LinearHistogram::FactoryGet(LinearHistogramName, 1, 10, 10, 0);
-  ASSERT_TRUE(histogram1);
+  HistogramBase* histogram = LinearHistogram::FactoryGet(
+      "TestHistogram", 1, 10, 10, HistogramBase::kIsPersistent);
+  EXPECT_TRUE(histogram);
   EXPECT_EQ(1U, StatisticsRecorder::GetHistogramCount());
-  histogram1->Add(3);
-  histogram1->Add(1);
-  histogram1->Add(4);
-  histogram1->AddCount(1, 4);
-  histogram1->Add(6);
+  histogram->Add(3);
+  histogram->Add(1);
+  histogram->Add(4);
+  histogram->Add(1);
+  histogram->Add(6);
 
-  // Create a sparse histogram for merge testing.
-  HistogramBase* histogram2 =
-      SparseHistogram::FactoryGet(SparseHistogramName, 0);
-  ASSERT_TRUE(histogram2);
-  EXPECT_EQ(2U, StatisticsRecorder::GetHistogramCount());
-  histogram2->Add(3);
-  histogram2->Add(1);
-  histogram2->Add(4);
-  histogram2->AddCount(1, 4);
-  histogram2->Add(6);
-
-  // Destroy the local SR and ensure that we're back to the initial state and
-  // restore the global allocator. Histograms created in the local SR will
-  // become unmanaged.
-  std::unique_ptr<GlobalHistogramAllocator> new_allocator =
-      GlobalHistogramAllocator::ReleaseForTesting();
+  // Destroy the local SR and ensure that we're back to the initial state.
   local_sr.reset();
   EXPECT_EQ(starting_sr_count, StatisticsRecorder::GetHistogramCount());
-  GlobalHistogramAllocator::Set(std::move(old_allocator));
 
-  // Create a "recovery" allocator using the same memory as the local one.
-  PersistentHistogramAllocator recovery1(MakeUnique<PersistentMemoryAllocator>(
-      const_cast<void*>(new_allocator->memory_allocator()->data()),
-      new_allocator->memory_allocator()->size(), 0, 0, "", false));
-  PersistentHistogramAllocator::Iterator histogram_iter1(&recovery1);
-
-  // Get the histograms that were created locally (and forgotten) and merge
-  // them into the global SR. New objects will be created.
+  // Create a second allocator and have it access the memory of the first.
   std::unique_ptr<HistogramBase> recovered;
-  while (true) {
-    recovered = histogram_iter1.GetNext();
-    if (!recovered)
-      break;
+  PersistentHistogramAllocator recovery(
+      WrapUnique(new PersistentMemoryAllocator(
+          allocator_memory_.get(), kAllocatorMemorySize, 0, 0, "", false)));
+  PersistentHistogramAllocator::Iterator histogram_iter(&recovery);
 
-    recovery1.MergeHistogramDeltaToStatisticsRecorder(recovered.get());
-    HistogramBase* found =
-        StatisticsRecorder::FindHistogram(recovered->histogram_name());
-    EXPECT_NE(recovered.get(), found);
-  };
-  EXPECT_EQ(starting_sr_count + 2, StatisticsRecorder::GetHistogramCount());
+  recovered = histogram_iter.GetNext();
+  ASSERT_TRUE(recovered);
 
-  // Check the merged histograms for accuracy.
-  HistogramBase* found = StatisticsRecorder::FindHistogram(LinearHistogramName);
+  // Merge the recovered histogram to the SR. It will always be a new object.
+  recovery.MergeHistogramDeltaToStatisticsRecorder(recovered.get());
+  EXPECT_EQ(starting_sr_count + 1, StatisticsRecorder::GetHistogramCount());
+  HistogramBase* found =
+      StatisticsRecorder::FindHistogram(recovered->histogram_name());
   ASSERT_TRUE(found);
+  EXPECT_NE(recovered.get(), found);
+
+  // Ensure that the data got merged, too.
   std::unique_ptr<HistogramSamples> snapshot = found->SnapshotSamples();
-  EXPECT_EQ(found->SnapshotSamples()->TotalCount(), snapshot->TotalCount());
+  EXPECT_EQ(recovered->SnapshotSamples()->TotalCount(), snapshot->TotalCount());
   EXPECT_EQ(1, snapshot->GetCount(3));
-  EXPECT_EQ(5, snapshot->GetCount(1));
+  EXPECT_EQ(2, snapshot->GetCount(1));
   EXPECT_EQ(1, snapshot->GetCount(4));
   EXPECT_EQ(1, snapshot->GetCount(6));
-
-  found = StatisticsRecorder::FindHistogram(SparseHistogramName);
-  ASSERT_TRUE(found);
-  snapshot = found->SnapshotSamples();
-  EXPECT_EQ(found->SnapshotSamples()->TotalCount(), snapshot->TotalCount());
-  EXPECT_EQ(1, snapshot->GetCount(3));
-  EXPECT_EQ(5, snapshot->GetCount(1));
-  EXPECT_EQ(1, snapshot->GetCount(4));
-  EXPECT_EQ(1, snapshot->GetCount(6));
-
-  // Perform additional histogram increments.
-  histogram1->AddCount(1, 3);
-  histogram1->Add(6);
-  histogram2->AddCount(1, 3);
-  histogram2->Add(7);
-
-  // Do another merge.
-  PersistentHistogramAllocator recovery2(MakeUnique<PersistentMemoryAllocator>(
-      const_cast<void*>(new_allocator->memory_allocator()->data()),
-      new_allocator->memory_allocator()->size(), 0, 0, "", false));
-  PersistentHistogramAllocator::Iterator histogram_iter2(&recovery2);
-  while (true) {
-    recovered = histogram_iter2.GetNext();
-    if (!recovered)
-      break;
-    recovery2.MergeHistogramDeltaToStatisticsRecorder(recovered.get());
-  };
-  EXPECT_EQ(starting_sr_count + 2, StatisticsRecorder::GetHistogramCount());
-
-  // And verify.
-  found = StatisticsRecorder::FindHistogram(LinearHistogramName);
-  snapshot = found->SnapshotSamples();
-  EXPECT_EQ(found->SnapshotSamples()->TotalCount(), snapshot->TotalCount());
-  EXPECT_EQ(1, snapshot->GetCount(3));
-  EXPECT_EQ(8, snapshot->GetCount(1));
-  EXPECT_EQ(1, snapshot->GetCount(4));
-  EXPECT_EQ(2, snapshot->GetCount(6));
-
-  found = StatisticsRecorder::FindHistogram(SparseHistogramName);
-  snapshot = found->SnapshotSamples();
-  EXPECT_EQ(found->SnapshotSamples()->TotalCount(), snapshot->TotalCount());
-  EXPECT_EQ(1, snapshot->GetCount(3));
-  EXPECT_EQ(8, snapshot->GetCount(1));
-  EXPECT_EQ(1, snapshot->GetCount(4));
-  EXPECT_EQ(1, snapshot->GetCount(6));
-  EXPECT_EQ(1, snapshot->GetCount(7));
 }
 
 }  // namespace base
diff --git a/base/metrics/persistent_memory_allocator.cc b/base/metrics/persistent_memory_allocator.cc
index f70b396..dfa408f 100644
--- a/base/metrics/persistent_memory_allocator.cc
+++ b/base/metrics/persistent_memory_allocator.cc
@@ -17,7 +17,6 @@
 #include "base/logging.h"
 #include "base/memory/shared_memory.h"
 #include "base/metrics/histogram_macros.h"
-#include "base/metrics/sparse_histogram.h"
 
 namespace {
 
@@ -49,11 +48,6 @@
   kFlagFull    = 1 << 1
 };
 
-// Errors that are logged in "errors" histogram.
-enum AllocatorError : int {
-  kMemoryIsCorrupt = 1,
-};
-
 bool CheckFlag(const volatile std::atomic<uint32_t>* flags, int flag) {
   uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
   return (loaded_flags & flag) != 0;
@@ -64,13 +58,8 @@
   for (;;) {
     uint32_t new_flags = (loaded_flags & ~flag) | flag;
     // In the failue case, actual "flags" value stored in loaded_flags.
-    // These access are "relaxed" because they are completely independent
-    // of all other values.
-    if (flags->compare_exchange_weak(loaded_flags, new_flags,
-                                     std::memory_order_relaxed,
-                                     std::memory_order_relaxed)) {
+    if (flags->compare_exchange_weak(loaded_flags, new_flags))
       break;
-    }
   }
 }
 
@@ -143,19 +132,7 @@
 PersistentMemoryAllocator::Iterator::Iterator(
     const PersistentMemoryAllocator* allocator,
     Reference starting_after)
-    : allocator_(allocator), last_record_(0), record_count_(0) {
-  Reset(starting_after);
-}
-
-void PersistentMemoryAllocator::Iterator::Reset() {
-  last_record_.store(kReferenceQueue, std::memory_order_relaxed);
-  record_count_.store(0, std::memory_order_relaxed);
-}
-
-void PersistentMemoryAllocator::Iterator::Reset(Reference starting_after) {
-  last_record_.store(starting_after, std::memory_order_relaxed);
-  record_count_.store(0, std::memory_order_relaxed);
-
+    : allocator_(allocator), last_record_(starting_after), record_count_(0) {
   // Ensure that the starting point is a valid, iterable block (meaning it can
   // be read and has a non-zero "next" pointer).
   const volatile BlockHeader* block =
@@ -167,14 +144,6 @@
 }
 
 PersistentMemoryAllocator::Reference
-PersistentMemoryAllocator::Iterator::GetLast() {
-  Reference last = last_record_.load(std::memory_order_relaxed);
-  if (last == kReferenceQueue)
-    return kReferenceNull;
-  return last;
-}
-
-PersistentMemoryAllocator::Reference
 PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return) {
   // Make a copy of the existing count of found-records, acquiring all changes
   // made to the allocator, notably "freeptr" (see comment in loop for why
@@ -224,8 +193,7 @@
     // is no need to do another such load when the while-loop restarts. A
     // "strong" compare-exchange is used because failing unnecessarily would
     // mean repeating some fairly costly validations above.
-    if (last_record_.compare_exchange_strong(
-            last, next, std::memory_order_acq_rel, std::memory_order_acquire)) {
+    if (last_record_.compare_exchange_strong(last, next)) {
       *type_return = block->type_id.load(std::memory_order_relaxed);
       break;
     }
@@ -279,42 +247,20 @@
           (page_size == 0 || size % page_size == 0 || readonly));
 }
 
-PersistentMemoryAllocator::PersistentMemoryAllocator(void* base,
-                                                     size_t size,
-                                                     size_t page_size,
-                                                     uint64_t id,
-                                                     base::StringPiece name,
-                                                     bool readonly)
-    : PersistentMemoryAllocator(Memory(base, MEM_EXTERNAL),
-                                size,
-                                page_size,
-                                id,
-                                name,
-                                readonly) {}
-
-PersistentMemoryAllocator::PersistentMemoryAllocator(Memory memory,
-                                                     size_t size,
-                                                     size_t page_size,
-                                                     uint64_t id,
-                                                     base::StringPiece name,
-                                                     bool readonly)
-    : mem_base_(static_cast<char*>(memory.base)),
-      mem_type_(memory.type),
+PersistentMemoryAllocator::PersistentMemoryAllocator(
+    void* base,
+    size_t size,
+    size_t page_size,
+    uint64_t id,
+    base::StringPiece name,
+    bool readonly)
+    : mem_base_(static_cast<char*>(base)),
       mem_size_(static_cast<uint32_t>(size)),
       mem_page_(static_cast<uint32_t>((page_size ? page_size : size))),
       readonly_(readonly),
       corrupt_(0),
       allocs_histogram_(nullptr),
-      used_histogram_(nullptr),
-      errors_histogram_(nullptr) {
-  // These asserts ensure that the structures are 32/64-bit agnostic and meet
-  // all the requirements of use within the allocator. They access private
-  // definitions and so cannot be moved to the global scope.
-  static_assert(sizeof(PersistentMemoryAllocator::BlockHeader) == 16,
-                "struct is not portable across different natural word widths");
-  static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 56,
-                "struct is not portable across different natural word widths");
-
+      used_histogram_(nullptr) {
   static_assert(sizeof(BlockHeader) % kAllocAlignment == 0,
                 "BlockHeader is not a multiple of kAllocAlignment");
   static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0,
@@ -323,7 +269,7 @@
                 "\"queue\" is not aligned properly; must be at end of struct");
 
   // Ensure that memory segment is of acceptable size.
-  CHECK(IsMemoryAcceptable(memory.base, size, page_size, readonly));
+  CHECK(IsMemoryAcceptable(base, size, page_size, readonly));
 
   // These atomics operate inter-process and so must be lock-free. The local
   // casts are to make sure it can be evaluated at compile time to a constant.
@@ -380,7 +326,7 @@
     if (!name.empty()) {
       const size_t name_length = name.length() + 1;
       shared_meta()->name = Allocate(name_length, 0);
-      char* name_cstr = GetAsArray<char>(shared_meta()->name, 0, name_length);
+      char* name_cstr = GetAsObject<char>(shared_meta()->name, 0);
       if (name_cstr)
         memcpy(name_cstr, name.data(), name.length());
     }
@@ -409,7 +355,7 @@
         *const_cast<uint32_t*>(&mem_page_) = shared_meta()->page_size;
 
       // Ensure that settings are still valid after the above adjustments.
-      if (!IsMemoryAcceptable(memory.base, mem_size_, mem_page_, readonly))
+      if (!IsMemoryAcceptable(base, mem_size_, mem_page_, readonly))
         SetCorrupt();
     }
   }
@@ -428,8 +374,7 @@
 
 const char* PersistentMemoryAllocator::Name() const {
   Reference name_ref = shared_meta()->name;
-  const char* name_cstr =
-      GetAsArray<char>(name_ref, 0, PersistentMemoryAllocator::kSizeAny);
+  const char* name_cstr = GetAsObject<char>(name_ref, 0);
   if (!name_cstr)
     return "";
 
@@ -447,26 +392,16 @@
     base::StringPiece name) {
   if (name.empty() || readonly_)
     return;
+
   std::string name_string = name.as_string();
-
-#if 0
-  // This histogram wasn't being used so has been disabled. It is left here
-  // in case development of a new use of the allocator could benefit from
-  // recording (temporarily and locally) the allocation sizes.
-  DCHECK(!allocs_histogram_);
-  allocs_histogram_ = Histogram::FactoryGet(
-      "UMA.PersistentAllocator." + name_string + ".Allocs", 1, 10000, 50,
-      HistogramBase::kUmaTargetedHistogramFlag);
-#endif
-
   DCHECK(!used_histogram_);
   used_histogram_ = LinearHistogram::FactoryGet(
       "UMA.PersistentAllocator." + name_string + ".UsedPct", 1, 101, 21,
       HistogramBase::kUmaTargetedHistogramFlag);
 
-  DCHECK(!errors_histogram_);
-  errors_histogram_ = SparseHistogram::FactoryGet(
-      "UMA.PersistentAllocator." + name_string + ".Errors",
+  DCHECK(!allocs_histogram_);
+  allocs_histogram_ = Histogram::FactoryGet(
+      "UMA.PersistentAllocator." + name_string + ".Allocs", 1, 10000, 50,
       HistogramBase::kUmaTargetedHistogramFlag);
 }
 
@@ -475,24 +410,6 @@
                   mem_size_);
 }
 
-PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetAsReference(
-    const void* memory,
-    uint32_t type_id) const {
-  uintptr_t address = reinterpret_cast<uintptr_t>(memory);
-  if (address < reinterpret_cast<uintptr_t>(mem_base_))
-    return kReferenceNull;
-
-  uintptr_t offset = address - reinterpret_cast<uintptr_t>(mem_base_);
-  if (offset >= mem_size_ || offset < sizeof(BlockHeader))
-    return kReferenceNull;
-
-  Reference ref = static_cast<Reference>(offset) - sizeof(BlockHeader);
-  if (!GetBlockData(ref, type_id, kSizeAny))
-    return kReferenceNull;
-
-  return ref;
-}
-
 size_t PersistentMemoryAllocator::GetAllocSize(Reference ref) const {
   const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
   if (!block)
@@ -516,62 +433,15 @@
 
 bool PersistentMemoryAllocator::ChangeType(Reference ref,
                                            uint32_t to_type_id,
-                                           uint32_t from_type_id,
-                                           bool clear) {
+                                           uint32_t from_type_id) {
   DCHECK(!readonly_);
   volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
   if (!block)
     return false;
 
-  // "Strong" exchanges are used below because there is no loop that can retry
-  // in the wake of spurious failures possible with "weak" exchanges. It is,
-  // in aggregate, an "acquire-release" operation so no memory accesses can be
-  // reordered either before or after this method (since changes based on type
-  // could happen on either side).
-
-  if (clear) {
-    // If clearing the memory, first change it to the "transitioning" type so
-    // there can be no confusion by other threads. After the memory is cleared,
-    // it can be changed to its final type.
-    if (!block->type_id.compare_exchange_strong(
-            from_type_id, kTypeIdTransitioning, std::memory_order_acquire,
-            std::memory_order_acquire)) {
-      // Existing type wasn't what was expected: fail (with no changes)
-      return false;
-    }
-
-    // Clear the memory in an atomic manner. Using "release" stores force
-    // every write to be done after the ones before it. This is better than
-    // using memset because (a) it supports "volatile" and (b) it creates a
-    // reliable pattern upon which other threads may rely.
-    volatile std::atomic<int>* data =
-        reinterpret_cast<volatile std::atomic<int>*>(
-            reinterpret_cast<volatile char*>(block) + sizeof(BlockHeader));
-    const uint32_t words = (block->size - sizeof(BlockHeader)) / sizeof(int);
-    DCHECK_EQ(0U, (block->size - sizeof(BlockHeader)) % sizeof(int));
-    for (uint32_t i = 0; i < words; ++i) {
-      data->store(0, std::memory_order_release);
-      ++data;
-    }
-
-    // If the destination type is "transitioning" then skip the final exchange.
-    if (to_type_id == kTypeIdTransitioning)
-      return true;
-
-    // Finish the change to the desired type.
-    from_type_id = kTypeIdTransitioning;  // Exchange needs modifiable original.
-    bool success = block->type_id.compare_exchange_strong(
-        from_type_id, to_type_id, std::memory_order_release,
-        std::memory_order_relaxed);
-    DCHECK(success);  // Should never fail.
-    return success;
-  }
-
-  // One step change to the new type. Will return false if the existing value
-  // doesn't match what is expected.
-  return block->type_id.compare_exchange_strong(from_type_id, to_type_id,
-                                                std::memory_order_acq_rel,
-                                                std::memory_order_acquire);
+  // This is a "strong" exchange because there is no loop that can retry in
+  // the wake of spurious failures possible with "weak" exchanges.
+  return block->type_id.compare_exchange_strong(from_type_id, to_type_id);
 }
 
 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::Allocate(
@@ -650,9 +520,8 @@
         return kReferenceNull;
       }
       const uint32_t new_freeptr = freeptr + page_free;
-      if (shared_meta()->freeptr.compare_exchange_strong(
-              freeptr, new_freeptr, std::memory_order_acq_rel,
-              std::memory_order_acquire)) {
+      if (shared_meta()->freeptr.compare_exchange_strong(freeptr,
+                                                         new_freeptr)) {
         block->size = page_free;
         block->cookie = kBlockCookieWasted;
       }
@@ -675,11 +544,8 @@
     // while we were processing. A "weak" exchange would be permissable here
     // because the code will just loop and try again but the above processing
     // is significant so make the extra effort of a "strong" exchange.
-    if (!shared_meta()->freeptr.compare_exchange_strong(
-            freeptr, new_freeptr, std::memory_order_acq_rel,
-            std::memory_order_acquire)) {
+    if (!shared_meta()->freeptr.compare_exchange_strong(freeptr, new_freeptr))
       continue;
-    }
 
     // Given that all memory was zeroed before ever being given to an instance
     // of this class and given that we only allocate in a monotomic fashion
@@ -695,10 +561,6 @@
       return kReferenceNull;
     }
 
-    // Load information into the block header. There is no "release" of the
-    // data here because this memory can, currently, be seen only by the thread
-    // performing the allocation. When it comes time to share this, the thread
-    // will call MakeIterable() which does the release operation.
     block->size = size;
     block->cookie = kBlockCookieAllocated;
     block->type_id.store(type_id, std::memory_order_relaxed);
@@ -711,7 +573,7 @@
       mem_size_ - shared_meta()->freeptr.load(std::memory_order_relaxed),
       (uint32_t)sizeof(BlockHeader));
   meminfo->total = mem_size_;
-  meminfo->free = remaining - sizeof(BlockHeader);
+  meminfo->free = IsCorrupt() ? 0 : remaining - sizeof(BlockHeader);
 }
 
 void PersistentMemoryAllocator::MakeIterable(Reference ref) {
@@ -779,15 +641,9 @@
 // case, it's safe to discard the constness and modify the local flag and
 // maybe even the shared flag if the underlying data isn't actually read-only.
 void PersistentMemoryAllocator::SetCorrupt() const {
-  if (!corrupt_.load(std::memory_order_relaxed) &&
-      !CheckFlag(
-          const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
-          kFlagCorrupt)) {
-    LOG(ERROR) << "Corruption detected in shared-memory segment.";
-    RecordError(kMemoryIsCorrupt);
-  }
-
-  corrupt_.store(true, std::memory_order_relaxed);
+  LOG(ERROR) << "Corruption detected in shared-memory segment.";
+  const_cast<std::atomic<bool>*>(&corrupt_)->store(true,
+                                                   std::memory_order_relaxed);
   if (!readonly_) {
     SetFlag(const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
             kFlagCorrupt);
@@ -817,10 +673,10 @@
                                     uint32_t size, bool queue_ok,
                                     bool free_ok) const {
   // Validation of parameters.
-  if (ref < (queue_ok ? kReferenceQueue : sizeof(SharedMetadata)))
-    return nullptr;
   if (ref % kAllocAlignment != 0)
     return nullptr;
+  if (ref < (queue_ok ? kReferenceQueue : sizeof(SharedMetadata)))
+    return nullptr;
   size += sizeof(BlockHeader);
   if (ref + size > mem_size_)
     return nullptr;
@@ -849,11 +705,6 @@
   return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
 }
 
-void PersistentMemoryAllocator::RecordError(int error) const {
-  if (errors_histogram_)
-    errors_histogram_->Add(error);
-}
-
 const volatile void* PersistentMemoryAllocator::GetBlockData(
     Reference ref,
     uint32_t type_id,
@@ -888,60 +739,37 @@
                                 size, 0, id, name, false) {}
 
 LocalPersistentMemoryAllocator::~LocalPersistentMemoryAllocator() {
-  DeallocateLocalMemory(const_cast<char*>(mem_base_), mem_size_, mem_type_);
+  DeallocateLocalMemory(const_cast<char*>(mem_base_), mem_size_);
 }
 
 // static
-PersistentMemoryAllocator::Memory
-LocalPersistentMemoryAllocator::AllocateLocalMemory(size_t size) {
-  void* address;
-
+void* LocalPersistentMemoryAllocator::AllocateLocalMemory(size_t size) {
 #if defined(OS_WIN)
-  address =
+  void* address =
       ::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
-  if (address)
-    return Memory(address, MEM_VIRTUAL);
-  UMA_HISTOGRAM_SPARSE_SLOWLY("UMA.LocalPersistentMemoryAllocator.Failures.Win",
-                              ::GetLastError());
+  DPCHECK(address);
+  return address;
 #elif defined(OS_POSIX)
   // MAP_ANON is deprecated on Linux but MAP_ANONYMOUS is not universal on Mac.
   // MAP_SHARED is not available on Linux <2.4 but required on Mac.
-  address = ::mmap(nullptr, size, PROT_READ | PROT_WRITE,
-                   MAP_ANON | MAP_SHARED, -1, 0);
-  if (address != MAP_FAILED)
-    return Memory(address, MEM_VIRTUAL);
-  UMA_HISTOGRAM_SPARSE_SLOWLY(
-      "UMA.LocalPersistentMemoryAllocator.Failures.Posix", errno);
+  void* address = ::mmap(nullptr, size, PROT_READ | PROT_WRITE,
+                         MAP_ANON | MAP_SHARED, -1, 0);
+  DPCHECK(MAP_FAILED != address);
+  return address;
 #else
 #error This architecture is not (yet) supported.
 #endif
-
-  // As a last resort, just allocate the memory from the heap. This will
-  // achieve the same basic result but the acquired memory has to be
-  // explicitly zeroed and thus realized immediately (i.e. all pages are
-  // added to the process now istead of only when first accessed).
-  address = malloc(size);
-  DPCHECK(address);
-  memset(address, 0, size);
-  return Memory(address, MEM_MALLOC);
 }
 
 // static
 void LocalPersistentMemoryAllocator::DeallocateLocalMemory(void* memory,
-                                                           size_t size,
-                                                           MemoryType type) {
-  if (type == MEM_MALLOC) {
-    free(memory);
-    return;
-  }
-
-  DCHECK_EQ(MEM_VIRTUAL, type);
+                                                           size_t size) {
 #if defined(OS_WIN)
   BOOL success = ::VirtualFree(memory, 0, MEM_DECOMMIT);
-  DCHECK(success);
+  DPCHECK(success);
 #elif defined(OS_POSIX)
   int result = ::munmap(memory, size);
-  DCHECK_EQ(0, result);
+  DPCHECK(0 == result);
 #else
 #error This architecture is not (yet) supported.
 #endif
@@ -955,13 +783,12 @@
     uint64_t id,
     base::StringPiece name,
     bool read_only)
-    : PersistentMemoryAllocator(
-          Memory(static_cast<uint8_t*>(memory->memory()), MEM_SHARED),
-          memory->mapped_size(),
-          0,
-          id,
-          name,
-          read_only),
+    : PersistentMemoryAllocator(static_cast<uint8_t*>(memory->memory()),
+                                memory->mapped_size(),
+                                0,
+                                id,
+                                name,
+                                read_only),
       shared_memory_(std::move(memory)) {}
 
 SharedPersistentMemoryAllocator::~SharedPersistentMemoryAllocator() {}
@@ -982,13 +809,12 @@
     uint64_t id,
     base::StringPiece name,
     bool read_only)
-    : PersistentMemoryAllocator(
-          Memory(const_cast<uint8_t*>(file->data()), MEM_FILE),
-          max_size != 0 ? max_size : file->length(),
-          0,
-          id,
-          name,
-          read_only),
+    : PersistentMemoryAllocator(const_cast<uint8_t*>(file->data()),
+                                max_size != 0 ? max_size : file->length(),
+                                0,
+                                id,
+                                name,
+                                read_only),
       mapped_file_(std::move(file)) {}
 
 FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {}
diff --git a/base/metrics/persistent_memory_allocator.h b/base/metrics/persistent_memory_allocator.h
index b38f284..2fc0d2d 100644
--- a/base/metrics/persistent_memory_allocator.h
+++ b/base/metrics/persistent_memory_allocator.h
@@ -9,7 +9,6 @@
 
 #include <atomic>
 #include <memory>
-#include <type_traits>
 
 #include "base/atomicops.h"
 #include "base/base_export.h"
@@ -48,50 +47,6 @@
 // Note that memory not in active use is not accessed so it is possible to
 // use virtual memory, including memory-mapped files, as backing storage with
 // the OS "pinning" new (zeroed) physical RAM pages only as they are needed.
-//
-// OBJECTS: Although the allocator can be used in a "malloc" sense, fetching
-// character arrays and manipulating that memory manually, the better way is
-// generally to use the "object" methods to create and manage allocations. In
-// this way the sizing, type-checking, and construction are all automatic. For
-// this to work, however, every type of stored object must define two public
-// "constexpr" values, kPersistentTypeId and kExpectedInstanceSize, as such:
-//
-// struct MyPersistentObjectType {
-//     // SHA1(MyPersistentObjectType): Increment this if structure changes!
-//     static constexpr uint32_t kPersistentTypeId = 0x3E15F6DE + 1;
-//
-//     // Expected size for 32/64-bit check. Update this if structure changes!
-//     static constexpr size_t kExpectedInstanceSize = 20;
-//
-//     ...
-// };
-//
-// kPersistentTypeId: This value is an arbitrary identifier that allows the
-//   identification of these objects in the allocator, including the ability
-//   to find them via iteration. The number is arbitrary but using the first
-//   four bytes of the SHA1 hash of the type name means that there shouldn't
-//   be any conflicts with other types that may also be stored in the memory.
-//   The fully qualified name (e.g. base::debug::MyPersistentObjectType) could
-//   be used to generate the hash if the type name seems common. Use a command
-//   like this to get the hash: echo -n "MyPersistentObjectType" | sha1sum
-//   If the structure layout changes, ALWAYS increment this number so that
-//   newer versions of the code don't try to interpret persistent data written
-//   by older versions with a different layout.
-//
-// kExpectedInstanceSize: This value is the hard-coded number that matches
-//   what sizeof(T) would return. By providing it explicitly, the allocator can
-//   verify that the structure is compatible between both 32-bit and 64-bit
-//   versions of the code.
-//
-// Using New manages the memory and then calls the default constructor for the
-// object. Given that objects are persistent, no destructor is ever called
-// automatically though a caller can explicitly call Delete to destruct it and
-// change the type to something indicating it is no longer in use.
-//
-// Though persistent memory segments are transferrable between programs built
-// for different natural word widths, they CANNOT be exchanged between CPUs
-// of different endianess. Attempts to do so will simply see the existing data
-// as corrupt and refuse to access any of it.
 class BASE_EXPORT PersistentMemoryAllocator {
  public:
   typedef uint32_t Reference;
@@ -101,11 +56,6 @@
   // That means that multiple threads can share an iterator and the same
   // reference will not be returned twice.
   //
-  // The order of the items returned by an iterator matches the order in which
-  // MakeIterable() was called on them. Once an allocation is made iterable,
-  // it is always such so the only possible difference between successive
-  // iterations is for more to be added to the end.
-  //
   // Iteration, in general, is tolerant of corrupted memory. It will return
   // what it can and stop only when corruption forces it to. Bad corruption
   // could cause the same object to be returned many times but it will
@@ -126,17 +76,6 @@
     Iterator(const PersistentMemoryAllocator* allocator,
              Reference starting_after);
 
-    // Resets the iterator back to the beginning.
-    void Reset();
-
-    // Resets the iterator, resuming from the |starting_after| reference.
-    void Reset(Reference starting_after);
-
-    // Returns the previously retrieved reference, or kReferenceNull if none.
-    // If constructor or reset with a starting_after location, this will return
-    // that value.
-    Reference GetLast();
-
     // Gets the next iterable, storing that type in |type_return|. The actual
     // return value is a reference to the allocation inside the allocator or
     // zero if there are no more. GetNext() may still be called again at a
@@ -149,18 +88,6 @@
     // calls to GetNext() meaning it's possible to completely miss entries.
     Reference GetNextOfType(uint32_t type_match);
 
-    // As above but works using object type.
-    template <typename T>
-    Reference GetNextOfType() {
-      return GetNextOfType(T::kPersistentTypeId);
-    }
-
-    // As above but works using objects and returns null if not found.
-    template <typename T>
-    const T* GetNextOfObject() {
-      return GetAsObject<T>(GetNextOfType<T>());
-    }
-
     // Converts references to objects. This is a convenience method so that
     // users of the iterator don't need to also have their own pointer to the
     // allocator over which the iterator runs in order to retrieve objects.
@@ -169,27 +96,8 @@
     // non-const (external) pointer to the same allocator (or use const_cast
     // to remove the qualifier).
     template <typename T>
-    const T* GetAsObject(Reference ref) const {
-      return allocator_->GetAsObject<T>(ref);
-    }
-
-    // Similar to GetAsObject() but converts references to arrays of things.
-    template <typename T>
-    const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
-      return allocator_->GetAsArray<T>(ref, type_id, count);
-    }
-
-    // Convert a generic pointer back into a reference. A null reference will
-    // be returned if |memory| is not inside the persistent segment or does not
-    // point to an object of the specified |type_id|.
-    Reference GetAsReference(const void* memory, uint32_t type_id) const {
-      return allocator_->GetAsReference(memory, type_id);
-    }
-
-    // As above but convert an object back into a reference.
-    template <typename T>
-    Reference GetAsReference(const T* obj) const {
-      return allocator_->GetAsReference(obj);
+    const T* GetAsObject(Reference ref, uint32_t type_id) const {
+      return allocator_->GetAsObject<T>(ref, type_id);
     }
 
    private:
@@ -212,21 +120,11 @@
   };
 
   enum : Reference {
-    // A common "null" reference value.
-    kReferenceNull = 0,
+    kReferenceNull = 0  // A common "null" reference value.
   };
 
   enum : uint32_t {
-    // A value that will match any type when doing lookups.
-    kTypeIdAny = 0x00000000,
-
-    // A value indicating that the type is in transition. Work is being done
-    // on the contents to prepare it for a new type to come.
-    kTypeIdTransitioning = 0xFFFFFFFF,
-  };
-
-  enum : size_t {
-    kSizeAny = 1  // Constant indicating that any array size is acceptable.
+    kTypeIdAny = 0  // Match any type-id inside GetAsObject().
   };
 
   // This is the standard file extension (suitable for being passed to the
@@ -289,7 +187,7 @@
   //
   // IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
   // with the following histograms:
-  //    UMA.PersistentAllocator.name.Errors
+  //    UMA.PersistentAllocator.name.Allocs
   //    UMA.PersistentAllocator.name.UsedPct
   void CreateTrackingHistograms(base::StringPiece name);
 
@@ -310,27 +208,6 @@
   // TIME before accessing it or risk crashing! Once dereferenced, the pointer
   // is safe to reuse forever.
   //
-  // It is essential that the object be of a fixed size. All fields must be of
-  // a defined type that does not change based on the compiler or the CPU
-  // natural word size. Acceptable are char, float, double, and (u)intXX_t.
-  // Unacceptable are int, bool, and wchar_t which are implementation defined
-  // with regards to their size.
-  //
-  // Alignment must also be consistent. A uint64_t after a uint32_t will pad
-  // differently between 32 and 64 bit architectures. Either put the bigger
-  // elements first, group smaller elements into blocks the size of larger
-  // elements, or manually insert padding fields as appropriate for the
-  // largest architecture, including at the end.
-  //
-  // To protected against mistakes, all objects must have the attribute
-  // |kExpectedInstanceSize| (static constexpr size_t)  that is a hard-coded
-  // numerical value -- NNN, not sizeof(T) -- that can be tested. If the
-  // instance size is not fixed, at least one build will fail.
-  //
-  // If the size of a structure changes, the type-ID used to recognize it
-  // should also change so later versions of the code don't try to read
-  // incompatible structures from earlier versions.
-  //
   // NOTE: Though this method will guarantee that an object of the specified
   // type can be accessed without going outside the bounds of the memory
   // segment, it makes no guarantees of the validity of the data within the
@@ -343,51 +220,19 @@
   // nature of that keyword to the caller. It can add it back, if necessary,
   // based on knowledge of how the allocator is being used.
   template <typename T>
-  T* GetAsObject(Reference ref) {
-    static_assert(std::is_standard_layout<T>::value, "only standard objects");
-    static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
-    static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
-    return const_cast<T*>(reinterpret_cast<volatile T*>(
-        GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
+  T* GetAsObject(Reference ref, uint32_t type_id) {
+    static_assert(!std::is_polymorphic<T>::value, "no polymorphic objects");
+    return const_cast<T*>(
+        reinterpret_cast<volatile T*>(GetBlockData(ref, type_id, sizeof(T))));
   }
   template <typename T>
-  const T* GetAsObject(Reference ref) const {
-    static_assert(std::is_standard_layout<T>::value, "only standard objects");
-    static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
-    static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
-    return const_cast<const T*>(reinterpret_cast<const volatile T*>(
-        GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
+  const T* GetAsObject(Reference ref, uint32_t type_id) const {
+    static_assert(!std::is_polymorphic<T>::value, "no polymorphic objects");
+    return const_cast<const T*>(
+        reinterpret_cast<const volatile T*>(GetBlockData(
+            ref, type_id, sizeof(T))));
   }
 
-  // Like GetAsObject but get an array of simple, fixed-size types.
-  //
-  // Use a |count| of the required number of array elements, or kSizeAny.
-  // GetAllocSize() can be used to calculate the upper bound but isn't reliable
-  // because padding can make space for extra elements that were not written.
-  //
-  // Remember that an array of char is a string but may not be NUL terminated.
-  //
-  // There are no compile-time or run-time checks to ensure 32/64-bit size
-  // compatibilty when using these accessors. Only use fixed-size types such
-  // as char, float, double, or (u)intXX_t.
-  template <typename T>
-  T* GetAsArray(Reference ref, uint32_t type_id, size_t count) {
-    static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
-    return const_cast<T*>(reinterpret_cast<volatile T*>(
-        GetBlockData(ref, type_id, count * sizeof(T))));
-  }
-  template <typename T>
-  const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
-    static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
-    return const_cast<const char*>(reinterpret_cast<const volatile T*>(
-        GetBlockData(ref, type_id, count * sizeof(T))));
-  }
-
-  // Get the corresponding reference for an object held in persistent memory.
-  // If the |memory| is not valid or the type does not match, a kReferenceNull
-  // result will be returned.
-  Reference GetAsReference(const void* memory, uint32_t type_id) const;
-
   // Get the number of bytes allocated to a block. This is useful when storing
   // arrays in order to validate the ending boundary. The returned value will
   // include any padding added to achieve the required alignment and so could
@@ -399,22 +244,14 @@
   // even though the memory stays valid and allocated. Changing the type is
   // an atomic compare/exchange and so requires knowing the existing value.
   // It will return false if the existing type is not what is expected.
-  //
-  // Changing the type doesn't mean the data is compatible with the new type.
-  // Passing true for |clear| will zero the memory after the type has been
-  // changed away from |from_type_id| but before it becomes |to_type_id| meaning
-  // that it is done in a manner that is thread-safe. Memory is guaranteed to
-  // be zeroed atomically by machine-word in a monotonically increasing order.
-  //
-  // It will likely be necessary to reconstruct the type before it can be used.
-  // Changing the type WILL NOT invalidate existing pointers to the data, either
-  // in this process or others, so changing the data structure could have
-  // unpredicatable results. USE WITH CARE!
   uint32_t GetType(Reference ref) const;
-  bool ChangeType(Reference ref,
-                  uint32_t to_type_id,
-                  uint32_t from_type_id,
-                  bool clear);
+  bool ChangeType(Reference ref, uint32_t to_type_id, uint32_t from_type_id);
+
+  // Reserve space in the memory segment of the desired |size| and |type_id|.
+  // A return value of zero indicates the allocation failed, otherwise the
+  // returned reference can be used by any process to get a real pointer via
+  // the GetAsObject() call.
+  Reference Allocate(size_t size, uint32_t type_id);
 
   // Allocated objects can be added to an internal list that can then be
   // iterated over by other processes. If an allocated object can be found
@@ -423,7 +260,6 @@
   // succeeds unless corruption is detected; check IsCorrupted() to find out.
   // Once an object is made iterable, its position in iteration can never
   // change; new iterable objects will always be added after it in the series.
-  // Changing the type does not alter its "iterable" status.
   void MakeIterable(Reference ref);
 
   // Get the information about the amount of free space in the allocator. The
@@ -450,138 +286,8 @@
   // called before such information is to be displayed or uploaded.
   void UpdateTrackingHistograms();
 
-  // While the above works much like malloc & free, these next methods provide
-  // an "object" interface similar to new and delete.
-
-  // Reserve space in the memory segment of the desired |size| and |type_id|.
-  // A return value of zero indicates the allocation failed, otherwise the
-  // returned reference can be used by any process to get a real pointer via
-  // the GetAsObject() or GetAsArray calls.
-  Reference Allocate(size_t size, uint32_t type_id);
-
-  // Allocate and construct an object in persistent memory. The type must have
-  // both (size_t) kExpectedInstanceSize and (uint32_t) kPersistentTypeId
-  // static constexpr fields that are used to ensure compatibility between
-  // software versions. An optional size parameter can be specified to force
-  // the allocation to be bigger than the size of the object; this is useful
-  // when the last field is actually variable length.
-  template <typename T>
-  T* New(size_t size) {
-    if (size < sizeof(T))
-      size = sizeof(T);
-    Reference ref = Allocate(size, T::kPersistentTypeId);
-    void* mem =
-        const_cast<void*>(GetBlockData(ref, T::kPersistentTypeId, size));
-    if (!mem)
-      return nullptr;
-    DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (ALIGNOF(T) - 1));
-    return new (mem) T();
-  }
-  template <typename T>
-  T* New() {
-    return New<T>(sizeof(T));
-  }
-
-  // Similar to New, above, but construct the object out of an existing memory
-  // block and of an expected type. If |clear| is true, memory will be zeroed
-  // before construction. Though this is not standard object behavior, it
-  // is present to match with new allocations that always come from zeroed
-  // memory. Anything previously present simply ceases to exist; no destructor
-  // is called for it so explicitly Delete() the old object first if need be.
-  // Calling this will not invalidate existing pointers to the object, either
-  // in this process or others, so changing the object could have unpredictable
-  // results. USE WITH CARE!
-  template <typename T>
-  T* New(Reference ref, uint32_t from_type_id, bool clear) {
-    DCHECK_LE(sizeof(T), GetAllocSize(ref)) << "alloc not big enough for obj";
-    // Make sure the memory is appropriate. This won't be used until after
-    // the type is changed but checking first avoids the possibility of having
-    // to change the type back.
-    void* mem = const_cast<void*>(GetBlockData(ref, 0, sizeof(T)));
-    if (!mem)
-      return nullptr;
-    // Ensure the allocator's internal alignment is sufficient for this object.
-    // This protects against coding errors in the allocator.
-    DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (ALIGNOF(T) - 1));
-    // Change the type, clearing the memory if so desired. The new type is
-    // "transitioning" so that there is no race condition with the construction
-    // of the object should another thread be simultaneously iterating over
-    // data. This will "acquire" the memory so no changes get reordered before
-    // it.
-    if (!ChangeType(ref, kTypeIdTransitioning, from_type_id, clear))
-      return nullptr;
-    // Construct an object of the desired type on this memory, just as if
-    // New() had been called to create it.
-    T* obj = new (mem) T();
-    // Finally change the type to the desired one. This will "release" all of
-    // the changes above and so provide a consistent view to other threads.
-    bool success =
-        ChangeType(ref, T::kPersistentTypeId, kTypeIdTransitioning, false);
-    DCHECK(success);
-    return obj;
-  }
-
-  // Deletes an object by destructing it and then changing the type to a
-  // different value (default 0).
-  template <typename T>
-  void Delete(T* obj, uint32_t new_type) {
-    // Get the reference for the object.
-    Reference ref = GetAsReference<T>(obj);
-    // First change the type to "transitioning" so there is no race condition
-    // where another thread could find the object through iteration while it
-    // is been destructed. This will "acquire" the memory so no changes get
-    // reordered before it. It will fail if |ref| is invalid.
-    if (!ChangeType(ref, kTypeIdTransitioning, T::kPersistentTypeId, false))
-      return;
-    // Destruct the object.
-    obj->~T();
-    // Finally change the type to the desired value. This will "release" all
-    // the changes above.
-    bool success = ChangeType(ref, new_type, kTypeIdTransitioning, false);
-    DCHECK(success);
-  }
-  template <typename T>
-  void Delete(T* obj) {
-    Delete<T>(obj, 0);
-  }
-
-  // As above but works with objects allocated from persistent memory.
-  template <typename T>
-  Reference GetAsReference(const T* obj) const {
-    return GetAsReference(obj, T::kPersistentTypeId);
-  }
-
-  // As above but works with an object allocated from persistent memory.
-  template <typename T>
-  void MakeIterable(const T* obj) {
-    MakeIterable(GetAsReference<T>(obj));
-  }
-
  protected:
-  enum MemoryType {
-    MEM_EXTERNAL,
-    MEM_MALLOC,
-    MEM_VIRTUAL,
-    MEM_SHARED,
-    MEM_FILE,
-  };
-
-  struct Memory {
-    Memory(void* b, MemoryType t) : base(b), type(t) {}
-
-    void* base;
-    MemoryType type;
-  };
-
-  // Constructs the allocator. Everything is the same as the public allocator
-  // except |memory| which is a structure with additional information besides
-  // the base address.
-  PersistentMemoryAllocator(Memory memory, size_t size, size_t page_size,
-                            uint64_t id, base::StringPiece name,
-                            bool readonly);
-
   volatile char* const mem_base_;  // Memory base. (char so sizeof guaranteed 1)
-  const MemoryType mem_type_;      // Type of memory allocation.
   const uint32_t mem_size_;        // Size of entire memory segment.
   const uint32_t mem_page_;        // Page size allocations shouldn't cross.
 
@@ -626,15 +332,11 @@
               ref, type_id, size));
   }
 
-  // Record an error in the internal histogram.
-  void RecordError(int error) const;
-
-  const bool readonly_;                // Indicates access to read-only memory.
-  mutable std::atomic<bool> corrupt_;  // Local version of "corrupted" flag.
+  const bool readonly_;              // Indicates access to read-only memory.
+  std::atomic<bool> corrupt_;        // Local version of "corrupted" flag.
 
   HistogramBase* allocs_histogram_;  // Histogram recording allocs.
   HistogramBase* used_histogram_;    // Histogram recording used space.
-  HistogramBase* errors_histogram_;  // Histogram recording errors.
 
   friend class PersistentMemoryAllocatorTest;
   FRIEND_TEST_ALL_PREFIXES(PersistentMemoryAllocatorTest, AllocateAndIterate);
@@ -657,10 +359,10 @@
   // Allocates a block of local memory of the specified |size|, ensuring that
   // the memory will not be physically allocated until accessed and will read
   // as zero when that happens.
-  static Memory AllocateLocalMemory(size_t size);
+  static void* AllocateLocalMemory(size_t size);
 
   // Deallocates a block of local |memory| of the specified |size|.
-  static void DeallocateLocalMemory(void* memory, size_t size, MemoryType type);
+  static void DeallocateLocalMemory(void* memory, size_t size);
 
   DISALLOW_COPY_AND_ASSIGN(LocalPersistentMemoryAllocator);
 };
@@ -680,7 +382,7 @@
 
   SharedMemory* shared_memory() { return shared_memory_.get(); }
 
-  // Ensure that the memory isn't so invalid that it would crash when passing it
+  // Ensure that the memory isn't so invalid that it won't crash when passing it
   // to the allocator. This doesn't guarantee the data is valid, just that it
   // won't cause the program to abort. The existing IsCorrupt() call will handle
   // the rest.
@@ -709,7 +411,7 @@
                                 bool read_only);
   ~FilePersistentMemoryAllocator() override;
 
-  // Ensure that the file isn't so invalid that it would crash when passing it
+  // Ensure that the file isn't so invalid that it won't crash when passing it
   // to the allocator. This doesn't guarantee the file is valid, just that it
   // won't cause the program to abort. The existing IsCorrupt() call will handle
   // the rest.
diff --git a/base/metrics/persistent_memory_allocator_unittest.cc b/base/metrics/persistent_memory_allocator_unittest.cc
index d12e00f..a3d90c2 100644
--- a/base/metrics/persistent_memory_allocator_unittest.cc
+++ b/base/metrics/persistent_memory_allocator_unittest.cc
@@ -40,20 +40,16 @@
   uint32_t kAllocAlignment;
 
   struct TestObject1 {
-    static constexpr uint32_t kPersistentTypeId = 1;
-    static constexpr size_t kExpectedInstanceSize = 4 + 1 + 3;
-    int32_t onething;
+    int onething;
     char oranother;
   };
 
   struct TestObject2 {
-    static constexpr uint32_t kPersistentTypeId = 2;
-    static constexpr size_t kExpectedInstanceSize = 8 + 4 + 4 + 8 + 8;
-    int64_t thiis;
-    int32_t that;
+    int thiis;
+    long that;
     float andthe;
-    double other;
-    char thing[8];
+    char other;
+    double thing;
   };
 
   PersistentMemoryAllocatorTest() {
@@ -67,6 +63,7 @@
     allocator_.reset(new PersistentMemoryAllocator(
         mem_segment_.get(), TEST_MEMORY_SIZE, TEST_MEMORY_PAGE,
         TEST_ID, TEST_NAME, false));
+    allocator_->CreateTrackingHistograms(allocator_->Name());
   }
 
   void TearDown() override {
@@ -93,13 +90,14 @@
 };
 
 TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
-  allocator_->CreateTrackingHistograms(allocator_->Name());
-
   std::string base_name(TEST_NAME);
   EXPECT_EQ(TEST_ID, allocator_->Id());
   EXPECT_TRUE(allocator_->used_histogram_);
   EXPECT_EQ("UMA.PersistentAllocator." + base_name + ".UsedPct",
             allocator_->used_histogram_->histogram_name());
+  EXPECT_TRUE(allocator_->allocs_histogram_);
+  EXPECT_EQ("UMA.PersistentAllocator." + base_name + ".Allocs",
+            allocator_->allocs_histogram_->histogram_name());
 
   // Get base memory info for later comparison.
   PersistentMemoryAllocator::MemoryInfo meminfo0;
@@ -109,12 +107,10 @@
 
   // Validate allocation of test object and make sure it can be referenced
   // and all metadata looks correct.
-  TestObject1* obj1 = allocator_->New<TestObject1>();
-  ASSERT_TRUE(obj1);
-  Reference block1 = allocator_->GetAsReference(obj1);
-  ASSERT_NE(0U, block1);
-  EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject1>(block1));
-  EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block1));
+  Reference block1 = allocator_->Allocate(sizeof(TestObject1), 1);
+  EXPECT_NE(0U, block1);
+  EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject1>(block1, 1));
+  EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block1, 1));
   EXPECT_LE(sizeof(TestObject1), allocator_->GetAllocSize(block1));
   EXPECT_GT(sizeof(TestObject1) + kAllocAlignment,
             allocator_->GetAllocSize(block1));
@@ -123,38 +119,21 @@
   EXPECT_EQ(meminfo0.total, meminfo1.total);
   EXPECT_GT(meminfo0.free, meminfo1.free);
 
-  // Verify that pointers can be turned back into references and that invalid
-  // addresses return null.
-  char* memory1 = allocator_->GetAsArray<char>(block1, 1, 1);
-  ASSERT_TRUE(memory1);
-  EXPECT_EQ(block1, allocator_->GetAsReference(memory1, 0));
-  EXPECT_EQ(block1, allocator_->GetAsReference(memory1, 1));
-  EXPECT_EQ(0U, allocator_->GetAsReference(memory1, 2));
-  EXPECT_EQ(0U, allocator_->GetAsReference(memory1 + 1, 0));
-  EXPECT_EQ(0U, allocator_->GetAsReference(memory1 + 16, 0));
-  EXPECT_EQ(0U, allocator_->GetAsReference(nullptr, 0));
-  EXPECT_EQ(0U, allocator_->GetAsReference(&base_name, 0));
-
   // Ensure that the test-object can be made iterable.
   PersistentMemoryAllocator::Iterator iter1a(allocator_.get());
-  EXPECT_EQ(0U, iter1a.GetLast());
   uint32_t type;
   EXPECT_EQ(0U, iter1a.GetNext(&type));
   allocator_->MakeIterable(block1);
   EXPECT_EQ(block1, iter1a.GetNext(&type));
   EXPECT_EQ(1U, type);
-  EXPECT_EQ(block1, iter1a.GetLast());
   EXPECT_EQ(0U, iter1a.GetNext(&type));
-  EXPECT_EQ(block1, iter1a.GetLast());
 
   // Create second test-object and ensure everything is good and it cannot
   // be confused with test-object of another type.
-  TestObject2* obj2 = allocator_->New<TestObject2>();
-  ASSERT_TRUE(obj2);
-  Reference block2 = allocator_->GetAsReference(obj2);
-  ASSERT_NE(0U, block2);
-  EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject2>(block2));
-  EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject1>(block2));
+  Reference block2 = allocator_->Allocate(sizeof(TestObject2), 2);
+  EXPECT_NE(0U, block2);
+  EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject2>(block2, 2));
+  EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block2, 1));
   EXPECT_LE(sizeof(TestObject2), allocator_->GetAllocSize(block2));
   EXPECT_GT(sizeof(TestObject2) + kAllocAlignment,
             allocator_->GetAllocSize(block2));
@@ -164,27 +143,9 @@
   EXPECT_GT(meminfo1.free, meminfo2.free);
 
   // Ensure that second test-object can also be made iterable.
-  allocator_->MakeIterable(obj2);
+  allocator_->MakeIterable(block2);
   EXPECT_EQ(block2, iter1a.GetNext(&type));
   EXPECT_EQ(2U, type);
-  EXPECT_EQ(block2, iter1a.GetLast());
-  EXPECT_EQ(0U, iter1a.GetNext(&type));
-  EXPECT_EQ(block2, iter1a.GetLast());
-
-  // Check that the iterator can be reset to the beginning.
-  iter1a.Reset();
-  EXPECT_EQ(0U, iter1a.GetLast());
-  EXPECT_EQ(block1, iter1a.GetNext(&type));
-  EXPECT_EQ(block1, iter1a.GetLast());
-  EXPECT_EQ(block2, iter1a.GetNext(&type));
-  EXPECT_EQ(block2, iter1a.GetLast());
-  EXPECT_EQ(0U, iter1a.GetNext(&type));
-
-  // Check that the iterator can be reset to an arbitrary location.
-  iter1a.Reset(block1);
-  EXPECT_EQ(block1, iter1a.GetLast());
-  EXPECT_EQ(block2, iter1a.GetNext(&type));
-  EXPECT_EQ(block2, iter1a.GetLast());
   EXPECT_EQ(0U, iter1a.GetNext(&type));
 
   // Check that iteration can begin after an arbitrary location.
@@ -203,11 +164,26 @@
   EXPECT_TRUE(used_samples);
   EXPECT_EQ(1, used_samples->TotalCount());
 
-  // Check that an object's type can be changed.
+  // Check the internal histogram record of allocation requests.
+  std::unique_ptr<HistogramSamples> allocs_samples(
+      allocator_->allocs_histogram_->SnapshotSamples());
+  EXPECT_TRUE(allocs_samples);
+  EXPECT_EQ(2, allocs_samples->TotalCount());
+  EXPECT_EQ(0, allocs_samples->GetCount(0));
+  EXPECT_EQ(1, allocs_samples->GetCount(sizeof(TestObject1)));
+  EXPECT_EQ(1, allocs_samples->GetCount(sizeof(TestObject2)));
+#if !DCHECK_IS_ON()  // DCHECK builds will die at a NOTREACHED().
+  EXPECT_EQ(0U, allocator_->Allocate(TEST_MEMORY_SIZE + 1, 0));
+  allocs_samples = allocator_->allocs_histogram_->SnapshotSamples();
+  EXPECT_EQ(3, allocs_samples->TotalCount());
+  EXPECT_EQ(1, allocs_samples->GetCount(0));
+#endif
+
+  // Check that an objcet's type can be changed.
   EXPECT_EQ(2U, allocator_->GetType(block2));
-  allocator_->ChangeType(block2, 3, 2, false);
+  allocator_->ChangeType(block2, 3, 2);
   EXPECT_EQ(3U, allocator_->GetType(block2));
-  allocator_->New<TestObject2>(block2, 3, false);
+  allocator_->ChangeType(block2, 2, 3);
   EXPECT_EQ(2U, allocator_->GetType(block2));
 
   // Create second allocator (read/write) using the same memory segment.
@@ -216,14 +192,16 @@
                                     TEST_MEMORY_PAGE, 0, "", false));
   EXPECT_EQ(TEST_ID, allocator2->Id());
   EXPECT_FALSE(allocator2->used_histogram_);
+  EXPECT_FALSE(allocator2->allocs_histogram_);
+  EXPECT_NE(allocator2->allocs_histogram_, allocator_->allocs_histogram_);
 
   // Ensure that iteration and access through second allocator works.
   PersistentMemoryAllocator::Iterator iter2(allocator2.get());
   EXPECT_EQ(block1, iter2.GetNext(&type));
   EXPECT_EQ(block2, iter2.GetNext(&type));
   EXPECT_EQ(0U, iter2.GetNext(&type));
-  EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject1>(block1));
-  EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject2>(block2));
+  EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject1>(block1, 1));
+  EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject2>(block2, 2));
 
   // Create a third allocator (read-only) using the same memory segment.
   std::unique_ptr<const PersistentMemoryAllocator> allocator3(
@@ -231,29 +209,20 @@
                                     TEST_MEMORY_PAGE, 0, "", true));
   EXPECT_EQ(TEST_ID, allocator3->Id());
   EXPECT_FALSE(allocator3->used_histogram_);
+  EXPECT_FALSE(allocator3->allocs_histogram_);
 
   // Ensure that iteration and access through third allocator works.
   PersistentMemoryAllocator::Iterator iter3(allocator3.get());
   EXPECT_EQ(block1, iter3.GetNext(&type));
   EXPECT_EQ(block2, iter3.GetNext(&type));
   EXPECT_EQ(0U, iter3.GetNext(&type));
-  EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject1>(block1));
-  EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject2>(block2));
+  EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject1>(block1, 1));
+  EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject2>(block2, 2));
 
   // Ensure that GetNextOfType works.
   PersistentMemoryAllocator::Iterator iter1c(allocator_.get());
-  EXPECT_EQ(block2, iter1c.GetNextOfType<TestObject2>());
+  EXPECT_EQ(block2, iter1c.GetNextOfType(2));
   EXPECT_EQ(0U, iter1c.GetNextOfType(2));
-
-  // Ensure that GetNextOfObject works.
-  PersistentMemoryAllocator::Iterator iter1d(allocator_.get());
-  EXPECT_EQ(obj2, iter1d.GetNextOfObject<TestObject2>());
-  EXPECT_EQ(nullptr, iter1d.GetNextOfObject<TestObject2>());
-
-  // Ensure that deleting an object works.
-  allocator_->Delete(obj2);
-  PersistentMemoryAllocator::Iterator iter1z(allocator_.get());
-  EXPECT_EQ(nullptr, iter1z.GetNextOfObject<TestObject2>());
 }
 
 TEST_F(PersistentMemoryAllocatorTest, PageTest) {
@@ -566,7 +535,7 @@
     r456 = local.Allocate(456, 456);
     r789 = local.Allocate(789, 789);
     local.MakeIterable(r123);
-    local.ChangeType(r456, 654, 456, false);
+    local.ChangeType(r456, 654, 456);
     local.MakeIterable(r789);
     local.GetMemoryInfo(&meminfo1);
     EXPECT_FALSE(local.IsFull());
@@ -635,25 +604,6 @@
   shalloc3.MakeIterable(obj);
   EXPECT_EQ(obj, iter2.GetNext(&type));
   EXPECT_EQ(42U, type);
-
-  // Clear-on-change test.
-  Reference data_ref = shalloc3.Allocate(sizeof(int) * 4, 911);
-  int* data = shalloc3.GetAsArray<int>(data_ref, 911, 4);
-  ASSERT_TRUE(data);
-  data[0] = 0;
-  data[1] = 1;
-  data[2] = 2;
-  data[3] = 3;
-  ASSERT_TRUE(shalloc3.ChangeType(data_ref, 119, 911, false));
-  EXPECT_EQ(0, data[0]);
-  EXPECT_EQ(1, data[1]);
-  EXPECT_EQ(2, data[2]);
-  EXPECT_EQ(3, data[3]);
-  ASSERT_TRUE(shalloc3.ChangeType(data_ref, 191, 119, true));
-  EXPECT_EQ(0, data[0]);
-  EXPECT_EQ(0, data[1]);
-  EXPECT_EQ(0, data[2]);
-  EXPECT_EQ(0, data[3]);
 }
 
 
@@ -663,7 +613,7 @@
 TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
   ScopedTempDir temp_dir;
   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  FilePath file_path = temp_dir.GetPath().AppendASCII("persistent_memory");
+  FilePath file_path = temp_dir.path().AppendASCII("persistent_memory");
 
   PersistentMemoryAllocator::MemoryInfo meminfo1;
   Reference r123, r456, r789;
@@ -674,7 +624,7 @@
     r456 = local.Allocate(456, 456);
     r789 = local.Allocate(789, 789);
     local.MakeIterable(r123);
-    local.ChangeType(r456, 654, 456, false);
+    local.ChangeType(r456, 654, 456);
     local.MakeIterable(r789);
     local.GetMemoryInfo(&meminfo1);
     EXPECT_FALSE(local.IsFull());
@@ -718,7 +668,7 @@
 TEST(FilePersistentMemoryAllocatorTest, ExtendTest) {
   ScopedTempDir temp_dir;
   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  FilePath file_path = temp_dir.GetPath().AppendASCII("extend_test");
+  FilePath file_path = temp_dir.path().AppendASCII("extend_test");
   MemoryMappedFile::Region region = {0, 16 << 10};  // 16KiB maximum size.
 
   // Start with a small but valid file of persistent data.
@@ -784,7 +734,7 @@
   char filename[100];
   for (size_t filesize = minsize; filesize > 0; --filesize) {
     strings::SafeSPrintf(filename, "memory_%d_A", filesize);
-    FilePath file_path = temp_dir.GetPath().AppendASCII(filename);
+    FilePath file_path = temp_dir.path().AppendASCII(filename);
     ASSERT_FALSE(PathExists(file_path));
     {
       File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
@@ -815,8 +765,7 @@
       uint32_t type_id;
       Reference ref;
       while ((ref = iter.GetNext(&type_id)) != 0) {
-        const char* data = allocator.GetAsArray<char>(
-            ref, 0, PersistentMemoryAllocator::kSizeAny);
+        const char* data = allocator.GetAsObject<char>(ref, 0);
         uint32_t type = allocator.GetType(ref);
         size_t size = allocator.GetAllocSize(ref);
         // Ensure compiler can't optimize-out above variables.
@@ -835,7 +784,7 @@
     }
 
     strings::SafeSPrintf(filename, "memory_%d_B", filesize);
-    file_path = temp_dir.GetPath().AppendASCII(filename);
+    file_path = temp_dir.path().AppendASCII(filename);
     ASSERT_FALSE(PathExists(file_path));
     {
       File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
diff --git a/base/metrics/persistent_sample_map.cc b/base/metrics/persistent_sample_map.cc
index 51cc0c7..15f83cd 100644
--- a/base/metrics/persistent_sample_map.cc
+++ b/base/metrics/persistent_sample_map.cc
@@ -6,7 +6,6 @@
 
 #include "base/logging.h"
 #include "base/memory/ptr_util.h"
-#include "base/metrics/histogram_macros.h"
 #include "base/metrics/persistent_histogram_allocator.h"
 #include "base/stl_util.h"
 
@@ -17,12 +16,6 @@
 
 namespace {
 
-enum NegativeSampleReason {
-  PERSISTENT_SPARSE_HAVE_LOGGED_BUT_NOT_SAMPLE,
-  PERSISTENT_SPARSE_SAMPLE_LESS_THAN_LOGGED,
-  MAX_NEGATIVE_SAMPLE_REASONS
-};
-
 // An iterator for going through a PersistentSampleMap. The logic here is
 // identical to that of SampleMapIterator but with different data structures.
 // Changes here likely need to be duplicated there.
@@ -89,17 +82,14 @@
 // memory allocator. The "id" must be unique across all maps held by an
 // allocator or they will get attached to the wrong sample map.
 struct SampleRecord {
-  // SHA1(SampleRecord): Increment this if structure changes!
-  static constexpr uint32_t kPersistentTypeId = 0x8FE6A69F + 1;
-
-  // Expected size for 32/64-bit check.
-  static constexpr size_t kExpectedInstanceSize = 16;
-
   uint64_t id;   // Unique identifier of owner.
   Sample value;  // The value for which this record holds a count.
   Count count;   // The count associated with the above value.
 };
 
+// The type-id used to identify sample records inside an allocator.
+const uint32_t kTypeIdSampleRecord = 0x8FE6A69F + 1;  // SHA1(SampleRecord) v1
+
 }  // namespace
 
 PersistentSampleMap::PersistentSampleMap(
@@ -151,12 +141,15 @@
 PersistentSampleMap::GetNextPersistentRecord(
     PersistentMemoryAllocator::Iterator& iterator,
     uint64_t* sample_map_id) {
-  const SampleRecord* record = iterator.GetNextOfObject<SampleRecord>();
+  PersistentMemoryAllocator::Reference ref =
+      iterator.GetNextOfType(kTypeIdSampleRecord);
+  const SampleRecord* record =
+      iterator.GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
   if (!record)
     return 0;
 
   *sample_map_id = record->id;
-  return iterator.GetAsReference(record);
+  return ref;
 }
 
 // static
@@ -165,7 +158,11 @@
     PersistentMemoryAllocator* allocator,
     uint64_t sample_map_id,
     Sample value) {
-  SampleRecord* record = allocator->New<SampleRecord>();
+  PersistentMemoryAllocator::Reference ref =
+      allocator->Allocate(sizeof(SampleRecord), kTypeIdSampleRecord);
+  SampleRecord* record =
+      allocator->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
+
   if (!record) {
     NOTREACHED() << "full=" << allocator->IsFull()
                  << ", corrupt=" << allocator->IsCorrupt();
@@ -175,8 +172,6 @@
   record->id = sample_map_id;
   record->value = value;
   record->count = 0;
-
-  PersistentMemoryAllocator::Reference ref = allocator->GetAsReference(record);
   allocator->MakeIterable(ref);
   return ref;
 }
@@ -188,39 +183,11 @@
   Count count;
   for (; !iter->Done(); iter->Next()) {
     iter->Get(&min, &max, &count);
-    if (count == 0)
-      continue;
     if (min + 1 != max)
       return false;  // SparseHistogram only supports bucket with size 1.
 
-#if 0  // TODO(bcwhite) Re-enable efficient version after crbug.com/682680.
     *GetOrCreateSampleCountStorage(min) +=
         (op == HistogramSamples::ADD) ? count : -count;
-#else
-    if (op == HistogramSamples::ADD) {
-      *GetOrCreateSampleCountStorage(min) += count;
-    } else {
-      // Subtract is used only for determining deltas when reporting which
-      // means that it's in the "logged" iterator. It should have an active
-      // sample record and thus there is no need to try to create one.
-      NegativeSampleReason reason = MAX_NEGATIVE_SAMPLE_REASONS;
-      Count* bucket = GetSampleCountStorage(min);
-      if (bucket == nullptr) {
-        reason = PERSISTENT_SPARSE_HAVE_LOGGED_BUT_NOT_SAMPLE;
-      } else {
-        if (*bucket < count) {
-          reason = PERSISTENT_SPARSE_SAMPLE_LESS_THAN_LOGGED;
-          *bucket = 0;
-        } else {
-          *bucket -= count;
-        }
-      }
-      if (reason != MAX_NEGATIVE_SAMPLE_REASONS) {
-        UMA_HISTOGRAM_ENUMERATION("UMA.NegativeSamples.Reason", reason,
-                                  MAX_NEGATIVE_SAMPLE_REASONS);
-      }
-    }
-#endif
   }
   return true;
 }
@@ -286,7 +253,8 @@
   PersistentMemoryAllocator::Reference ref;
   PersistentSampleMapRecords* records = GetRecords();
   while ((ref = records->GetNext()) != 0) {
-    SampleRecord* record = records->GetAsObject<SampleRecord>(ref);
+    SampleRecord* record =
+        records->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
     if (!record)
       continue;
 
diff --git a/base/metrics/persistent_sample_map.h b/base/metrics/persistent_sample_map.h
index 853f862..3c175db 100644
--- a/base/metrics/persistent_sample_map.h
+++ b/base/metrics/persistent_sample_map.h
@@ -24,6 +24,7 @@
 
 class PersistentHistogramAllocator;
 class PersistentSampleMapRecords;
+class PersistentSparseHistogramDataManager;
 
 // The logic here is similar to that of SampleMap but with different data
 // structures. Changes here likely need to be duplicated there.
diff --git a/base/metrics/persistent_sample_map_unittest.cc b/base/metrics/persistent_sample_map_unittest.cc
index d50ab99..beb72e5 100644
--- a/base/metrics/persistent_sample_map_unittest.cc
+++ b/base/metrics/persistent_sample_map_unittest.cc
@@ -8,7 +8,6 @@
 
 #include "base/memory/ptr_util.h"
 #include "base/metrics/persistent_histogram_allocator.h"
-#include "base/test/gtest_util.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
@@ -16,22 +15,22 @@
 
 std::unique_ptr<PersistentHistogramAllocator> CreateHistogramAllocator(
     size_t bytes) {
-  return MakeUnique<PersistentHistogramAllocator>(
-      MakeUnique<LocalPersistentMemoryAllocator>(bytes, 0, ""));
+  return WrapUnique(new PersistentHistogramAllocator(
+      WrapUnique(new LocalPersistentMemoryAllocator(bytes, 0, ""))));
 }
 
 std::unique_ptr<PersistentHistogramAllocator> DuplicateHistogramAllocator(
     PersistentHistogramAllocator* original) {
-  return MakeUnique<PersistentHistogramAllocator>(
-      MakeUnique<PersistentMemoryAllocator>(
+  return WrapUnique(
+      new PersistentHistogramAllocator(WrapUnique(new PersistentMemoryAllocator(
           const_cast<void*>(original->data()), original->length(), 0,
-          original->Id(), original->Name(), false));
+          original->Id(), original->Name(), false))));
 }
 
 TEST(PersistentSampleMapTest, AccumulateTest) {
   std::unique_ptr<PersistentHistogramAllocator> allocator =
       CreateHistogramAllocator(64 << 10);  // 64 KiB
-  HistogramSamples::LocalMetadata meta;
+  HistogramSamples::Metadata meta;
   PersistentSampleMap samples(1, allocator.get(), &meta);
 
   samples.Accumulate(1, 100);
@@ -48,7 +47,7 @@
 TEST(PersistentSampleMapTest, Accumulate_LargeValuesDontOverflow) {
   std::unique_ptr<PersistentHistogramAllocator> allocator =
       CreateHistogramAllocator(64 << 10);  // 64 KiB
-  HistogramSamples::LocalMetadata meta;
+  HistogramSamples::Metadata meta;
   PersistentSampleMap samples(1, allocator.get(), &meta);
 
   samples.Accumulate(250000000, 100);
@@ -65,7 +64,7 @@
 TEST(PersistentSampleMapTest, AddSubtractTest) {
   std::unique_ptr<PersistentHistogramAllocator> allocator1 =
       CreateHistogramAllocator(64 << 10);  // 64 KiB
-  HistogramSamples::LocalMetadata meta1;
+  HistogramSamples::Metadata meta1;
   PersistentSampleMap samples1(1, allocator1.get(), &meta1);
   samples1.Accumulate(1, 100);
   samples1.Accumulate(2, 100);
@@ -73,7 +72,7 @@
 
   std::unique_ptr<PersistentHistogramAllocator> allocator2 =
       DuplicateHistogramAllocator(allocator1.get());
-  HistogramSamples::LocalMetadata meta2;
+  HistogramSamples::Metadata meta2;
   PersistentSampleMap samples2(2, allocator2.get(), &meta2);
   samples2.Accumulate(1, 200);
   samples2.Accumulate(2, 200);
@@ -101,7 +100,7 @@
 TEST(PersistentSampleMapTest, PersistenceTest) {
   std::unique_ptr<PersistentHistogramAllocator> allocator1 =
       CreateHistogramAllocator(64 << 10);  // 64 KiB
-  HistogramSamples::LocalMetadata meta12;
+  HistogramSamples::Metadata meta12;
   PersistentSampleMap samples1(12, allocator1.get(), &meta12);
   samples1.Accumulate(1, 100);
   samples1.Accumulate(2, 200);
@@ -154,7 +153,7 @@
 TEST(PersistentSampleMapIteratorTest, IterateTest) {
   std::unique_ptr<PersistentHistogramAllocator> allocator =
       CreateHistogramAllocator(64 << 10);  // 64 KiB
-  HistogramSamples::LocalMetadata meta;
+  HistogramSamples::Metadata meta;
   PersistentSampleMap samples(1, allocator.get(), &meta);
   samples.Accumulate(1, 100);
   samples.Accumulate(2, 200);
@@ -192,7 +191,7 @@
 TEST(PersistentSampleMapIteratorTest, SkipEmptyRanges) {
   std::unique_ptr<PersistentHistogramAllocator> allocator1 =
       CreateHistogramAllocator(64 << 10);  // 64 KiB
-  HistogramSamples::LocalMetadata meta1;
+  HistogramSamples::Metadata meta1;
   PersistentSampleMap samples1(1, allocator1.get(), &meta1);
   samples1.Accumulate(5, 1);
   samples1.Accumulate(10, 2);
@@ -202,7 +201,7 @@
 
   std::unique_ptr<PersistentHistogramAllocator> allocator2 =
       DuplicateHistogramAllocator(allocator1.get());
-  HistogramSamples::LocalMetadata meta2;
+  HistogramSamples::Metadata meta2;
   PersistentSampleMap samples2(2, allocator2.get(), &meta2);
   samples2.Accumulate(5, 1);
   samples2.Accumulate(20, 4);
@@ -234,10 +233,12 @@
   EXPECT_TRUE(it->Done());
 }
 
+// Only run this test on builds that support catching a DCHECK crash.
+#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
 TEST(PersistentSampleMapIteratorDeathTest, IterateDoneTest) {
   std::unique_ptr<PersistentHistogramAllocator> allocator =
       CreateHistogramAllocator(64 << 10);  // 64 KiB
-  HistogramSamples::LocalMetadata meta;
+  HistogramSamples::Metadata meta;
   PersistentSampleMap samples(1, allocator.get(), &meta);
 
   std::unique_ptr<SampleCountIterator> it = samples.Iterator();
@@ -247,14 +248,16 @@
   HistogramBase::Sample min;
   HistogramBase::Sample max;
   HistogramBase::Count count;
-  EXPECT_DCHECK_DEATH(it->Get(&min, &max, &count));
+  EXPECT_DEATH(it->Get(&min, &max, &count), "");
 
-  EXPECT_DCHECK_DEATH(it->Next());
+  EXPECT_DEATH(it->Next(), "");
 
   samples.Accumulate(1, 100);
   it = samples.Iterator();
   EXPECT_FALSE(it->Done());
 }
+#endif
+// (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
 
 }  // namespace
 }  // namespace base
diff --git a/base/metrics/sample_map_unittest.cc b/base/metrics/sample_map_unittest.cc
index 9d7e818..8f57710 100644
--- a/base/metrics/sample_map_unittest.cc
+++ b/base/metrics/sample_map_unittest.cc
@@ -6,7 +6,6 @@
 
 #include <memory>
 
-#include "base/test/gtest_util.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
diff --git a/base/metrics/sample_vector_unittest.cc b/base/metrics/sample_vector_unittest.cc
index 897ceed..02e48aa 100644
--- a/base/metrics/sample_vector_unittest.cc
+++ b/base/metrics/sample_vector_unittest.cc
@@ -12,7 +12,6 @@
 
 #include "base/metrics/bucket_ranges.h"
 #include "base/metrics/histogram.h"
-#include "base/test/gtest_util.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
diff --git a/base/metrics/sparse_histogram.cc b/base/metrics/sparse_histogram.cc
index bee48d4..3c1222d 100644
--- a/base/metrics/sparse_histogram.cc
+++ b/base/metrics/sparse_histogram.cc
@@ -68,7 +68,7 @@
     ReportHistogramActivity(*histogram, HISTOGRAM_LOOKUP);
   }
 
-  CHECK_EQ(SPARSE_HISTOGRAM, histogram->GetHistogramType());
+  DCHECK_EQ(SPARSE_HISTOGRAM, histogram->GetHistogramType());
   return histogram;
 }
 
@@ -282,8 +282,8 @@
                 "Histogram: %s recorded %d samples",
                 histogram_name().c_str(),
                 total_count);
-  if (flags())
-    StringAppendF(output, " (flags = 0x%x)", flags());
+  if (flags() & ~kHexRangePrintingFlag)
+    StringAppendF(output, " (flags = 0x%x)", flags() & ~kHexRangePrintingFlag);
 }
 
 }  // namespace base
diff --git a/base/metrics/sparse_histogram.h b/base/metrics/sparse_histogram.h
index 97709ba..3b302d6 100644
--- a/base/metrics/sparse_histogram.h
+++ b/base/metrics/sparse_histogram.h
@@ -13,17 +13,45 @@
 #include <string>
 
 #include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
 #include "base/macros.h"
 #include "base/metrics/histogram_base.h"
-#include "base/metrics/histogram_samples.h"
+#include "base/metrics/sample_map.h"
 #include "base/synchronization/lock.h"
 
 namespace base {
 
+// Sparse histograms are well suited for recording counts of exact sample values
+// that are sparsely distributed over a large range.
+//
+// The implementation uses a lock and a map, whereas other histogram types use a
+// vector and no lock. It is thus more costly to add values to, and each value
+// stored has more overhead, compared to the other histogram types. However it
+// may be more efficient in memory if the total number of sample values is small
+// compared to the range of their values.
+//
+// UMA_HISTOGRAM_ENUMERATION would be better suited for a smaller range of
+// enumerations that are (nearly) contiguous. Also for code that is expected to
+// run often or in a tight loop.
+//
+// UMA_HISTOGRAM_SPARSE_SLOWLY is good for sparsely distributed and or
+// infrequently recorded values.
+//
+// For instance, Sqlite.Version.* are SPARSE because for any given database,
+// there's going to be exactly one version logged, meaning no gain to having a
+// pre-allocated vector of slots once the fleet gets to version 4 or 5 or 10.
+// Likewise Sqlite.Error.* are SPARSE, because most databases generate few or no
+// errors and there are large gaps in the set of possible errors.
+#define UMA_HISTOGRAM_SPARSE_SLOWLY(name, sample) \
+    do { \
+      base::HistogramBase* histogram = base::SparseHistogram::FactoryGet( \
+          name, base::HistogramBase::kUmaTargetedHistogramFlag); \
+      histogram->Add(sample); \
+    } while (0)
+
 class HistogramSamples;
 class PersistentHistogramAllocator;
-class Pickle;
-class PickleIterator;
 
 class BASE_EXPORT SparseHistogram : public HistogramBase {
  public:
diff --git a/base/metrics/sparse_histogram_unittest.cc b/base/metrics/sparse_histogram_unittest.cc
index f4a7c94..eab7790 100644
--- a/base/metrics/sparse_histogram_unittest.cc
+++ b/base/metrics/sparse_histogram_unittest.cc
@@ -8,7 +8,6 @@
 #include <string>
 
 #include "base/metrics/histogram_base.h"
-#include "base/metrics/histogram_macros.h"
 #include "base/metrics/histogram_samples.h"
 #include "base/metrics/persistent_histogram_allocator.h"
 #include "base/metrics/persistent_memory_allocator.h"
diff --git a/base/metrics/statistics_recorder.cc b/base/metrics/statistics_recorder.cc
index 74c964a..42ed5a9 100644
--- a/base/metrics/statistics_recorder.cc
+++ b/base/metrics/statistics_recorder.cc
@@ -16,6 +16,7 @@
 #include "base/metrics/persistent_histogram_allocator.h"
 #include "base/stl_util.h"
 #include "base/strings/stringprintf.h"
+#include "base/synchronization/lock.h"
 #include "base/values.h"
 
 namespace {
@@ -58,10 +59,10 @@
 StatisticsRecorder::HistogramIterator&
 StatisticsRecorder::HistogramIterator::operator++() {
   const HistogramMap::iterator histograms_end = histograms_->end();
-  if (iter_ == histograms_end)
+  if (iter_ == histograms_end || lock_ == NULL)
     return *this;
 
-  base::AutoLock auto_lock(lock_.Get());
+  base::AutoLock auto_lock(*lock_);
 
   for (;;) {
     ++iter_;
@@ -78,63 +79,51 @@
 }
 
 StatisticsRecorder::~StatisticsRecorder() {
+  DCHECK(lock_);
   DCHECK(histograms_);
   DCHECK(ranges_);
 
   // Clean out what this object created and then restore what existed before.
   Reset();
-  base::AutoLock auto_lock(lock_.Get());
+  base::AutoLock auto_lock(*lock_);
   histograms_ = existing_histograms_.release();
   callbacks_ = existing_callbacks_.release();
   ranges_ = existing_ranges_.release();
-  providers_ = existing_providers_.release();
 }
 
 // static
 void StatisticsRecorder::Initialize() {
-  // Tests sometimes create local StatisticsRecorders in order to provide a
-  // contained environment of histograms that can be later discarded. If a
-  // true global instance gets created in this environment then it will
-  // eventually get disconnected when the local instance destructs and
-  // restores the previous state, resulting in no StatisticsRecorder at all.
-  // The global lazy instance, however, will remain valid thus ensuring that
-  // another never gets installed via this method. If a |histograms_| map
-  // exists then assume the StatisticsRecorder is already "initialized".
-  if (histograms_)
-    return;
-
   // Ensure that an instance of the StatisticsRecorder object is created.
   g_statistics_recorder_.Get();
 }
 
 // static
 bool StatisticsRecorder::IsActive() {
-  base::AutoLock auto_lock(lock_.Get());
-  return histograms_ != nullptr;
-}
-
-// static
-void StatisticsRecorder::RegisterHistogramProvider(
-    const WeakPtr<HistogramProvider>& provider) {
-  providers_->push_back(provider);
+  if (lock_ == NULL)
+    return false;
+  base::AutoLock auto_lock(*lock_);
+  return NULL != histograms_;
 }
 
 // static
 HistogramBase* StatisticsRecorder::RegisterOrDeleteDuplicate(
     HistogramBase* histogram) {
-  HistogramBase* histogram_to_delete = nullptr;
-  HistogramBase* histogram_to_return = nullptr;
-  {
-    base::AutoLock auto_lock(lock_.Get());
-    if (!histograms_) {
-      histogram_to_return = histogram;
+  // As per crbug.com/79322 the histograms are intentionally leaked, so we need
+  // to annotate them. Because ANNOTATE_LEAKING_OBJECT_PTR may be used only once
+  // for an object, the duplicates should not be annotated.
+  // Callers are responsible for not calling RegisterOrDeleteDuplicate(ptr)
+  // twice if (lock_ == NULL) || (!histograms_).
+  if (lock_ == NULL) {
+    ANNOTATE_LEAKING_OBJECT_PTR(histogram);  // see crbug.com/79322
+    return histogram;
+  }
 
-      // As per crbug.com/79322 the histograms are intentionally leaked, so we
-      // need to annotate them. Because ANNOTATE_LEAKING_OBJECT_PTR may be used
-      // only once for an object, the duplicates should not be annotated.
-      // Callers are responsible for not calling RegisterOrDeleteDuplicate(ptr)
-      // twice |if (!histograms_)|.
-      ANNOTATE_LEAKING_OBJECT_PTR(histogram);  // see crbug.com/79322
+  HistogramBase* histogram_to_delete = NULL;
+  HistogramBase* histogram_to_return = NULL;
+  {
+    base::AutoLock auto_lock(*lock_);
+    if (histograms_ == NULL) {
+      histogram_to_return = histogram;
     } else {
       const std::string& name = histogram->histogram_name();
       HistogramMap::iterator it = histograms_->find(name);
@@ -175,8 +164,13 @@
   DCHECK(ranges->HasValidChecksum());
   std::unique_ptr<const BucketRanges> ranges_deleter;
 
-  base::AutoLock auto_lock(lock_.Get());
-  if (!ranges_) {
+  if (lock_ == NULL) {
+    ANNOTATE_LEAKING_OBJECT_PTR(ranges);
+    return ranges;
+  }
+
+  base::AutoLock auto_lock(*lock_);
+  if (ranges_ == NULL) {
     ANNOTATE_LEAKING_OBJECT_PTR(ranges);
     return ranges;
   }
@@ -273,8 +267,10 @@
 
 // static
 void StatisticsRecorder::GetHistograms(Histograms* output) {
-  base::AutoLock auto_lock(lock_.Get());
-  if (!histograms_)
+  if (lock_ == NULL)
+    return;
+  base::AutoLock auto_lock(*lock_);
+  if (histograms_ == NULL)
     return;
 
   for (const auto& entry : *histograms_) {
@@ -285,8 +281,10 @@
 // static
 void StatisticsRecorder::GetBucketRanges(
     std::vector<const BucketRanges*>* output) {
-  base::AutoLock auto_lock(lock_.Get());
-  if (!ranges_)
+  if (lock_ == NULL)
+    return;
+  base::AutoLock auto_lock(*lock_);
+  if (ranges_ == NULL)
     return;
 
   for (const auto& entry : *ranges_) {
@@ -303,31 +301,19 @@
   // will acquire the lock at that time.
   ImportGlobalPersistentHistograms();
 
-  base::AutoLock auto_lock(lock_.Get());
-  if (!histograms_)
-    return nullptr;
+  if (lock_ == NULL)
+    return NULL;
+  base::AutoLock auto_lock(*lock_);
+  if (histograms_ == NULL)
+    return NULL;
 
   HistogramMap::iterator it = histograms_->find(name);
   if (histograms_->end() == it)
-    return nullptr;
+    return NULL;
   return it->second;
 }
 
 // static
-void StatisticsRecorder::ImportProvidedHistograms() {
-  if (!providers_)
-    return;
-
-  // Merge histogram data from each provider in turn.
-  for (const WeakPtr<HistogramProvider>& provider : *providers_) {
-    // Weak-pointer may be invalid if the provider was destructed, though they
-    // generally never are.
-    if (provider)
-      provider->MergeHistogramDeltas();
-  }
-}
-
-// static
 StatisticsRecorder::HistogramIterator StatisticsRecorder::begin(
     bool include_persistent) {
   DCHECK(histograms_);
@@ -335,7 +321,7 @@
 
   HistogramMap::iterator iter_begin;
   {
-    base::AutoLock auto_lock(lock_.Get());
+    base::AutoLock auto_lock(*lock_);
     iter_begin = histograms_->begin();
   }
   return HistogramIterator(iter_begin, include_persistent);
@@ -345,7 +331,7 @@
 StatisticsRecorder::HistogramIterator StatisticsRecorder::end() {
   HistogramMap::iterator iter_end;
   {
-    base::AutoLock auto_lock(lock_.Get());
+    base::AutoLock auto_lock(*lock_);
     iter_end = histograms_->end();
   }
   return HistogramIterator(iter_end, true);
@@ -353,21 +339,20 @@
 
 // static
 void StatisticsRecorder::InitLogOnShutdown() {
-  if (!histograms_)
+  if (lock_ == nullptr)
     return;
-
-  base::AutoLock auto_lock(lock_.Get());
+  base::AutoLock auto_lock(*lock_);
   g_statistics_recorder_.Get().InitLogOnShutdownWithoutLock();
 }
 
 // static
 void StatisticsRecorder::GetSnapshot(const std::string& query,
                                      Histograms* snapshot) {
-  base::AutoLock auto_lock(lock_.Get());
-  if (!histograms_)
+  if (lock_ == NULL)
     return;
-
-  ImportGlobalPersistentHistograms();
+  base::AutoLock auto_lock(*lock_);
+  if (histograms_ == NULL)
+    return;
 
   for (const auto& entry : *histograms_) {
     if (entry.second->histogram_name().find(query) != std::string::npos)
@@ -380,8 +365,10 @@
     const std::string& name,
     const StatisticsRecorder::OnSampleCallback& cb) {
   DCHECK(!cb.is_null());
-  base::AutoLock auto_lock(lock_.Get());
-  if (!histograms_)
+  if (lock_ == NULL)
+    return false;
+  base::AutoLock auto_lock(*lock_);
+  if (histograms_ == NULL)
     return false;
 
   if (ContainsKey(*callbacks_, name))
@@ -397,8 +384,10 @@
 
 // static
 void StatisticsRecorder::ClearCallback(const std::string& name) {
-  base::AutoLock auto_lock(lock_.Get());
-  if (!histograms_)
+  if (lock_ == NULL)
+    return;
+  base::AutoLock auto_lock(*lock_);
+  if (histograms_ == NULL)
     return;
 
   callbacks_->erase(name);
@@ -412,8 +401,10 @@
 // static
 StatisticsRecorder::OnSampleCallback StatisticsRecorder::FindCallback(
     const std::string& name) {
-  base::AutoLock auto_lock(lock_.Get());
-  if (!histograms_)
+  if (lock_ == NULL)
+    return OnSampleCallback();
+  base::AutoLock auto_lock(*lock_);
+  if (histograms_ == NULL)
     return OnSampleCallback();
 
   auto callback_iterator = callbacks_->find(name);
@@ -423,7 +414,10 @@
 
 // static
 size_t StatisticsRecorder::GetHistogramCount() {
-  base::AutoLock auto_lock(lock_.Get());
+  if (!lock_)
+    return 0;
+
+  base::AutoLock auto_lock(*lock_);
   if (!histograms_)
     return 0;
   return histograms_->size();
@@ -444,7 +438,7 @@
 // static
 void StatisticsRecorder::UninitializeForTesting() {
   // Stop now if it's never been initialized.
-  if (!histograms_)
+  if (lock_ == NULL || histograms_ == NULL)
     return;
 
   // Get the global instance and destruct it. It's held in static memory so
@@ -460,7 +454,7 @@
 
 // static
 void StatisticsRecorder::ImportGlobalPersistentHistograms() {
-  if (!histograms_)
+  if (lock_ == NULL)
     return;
 
   // Import histograms from known persistent storage. Histograms could have
@@ -476,17 +470,25 @@
 // of main(), and hence it is not thread safe.  It initializes globals to
 // provide support for all future calls.
 StatisticsRecorder::StatisticsRecorder() {
-  base::AutoLock auto_lock(lock_.Get());
+  if (lock_ == NULL) {
+    // This will leak on purpose. It's the only way to make sure we won't race
+    // against the static uninitialization of the module while one of our
+    // static methods relying on the lock get called at an inappropriate time
+    // during the termination phase. Since it's a static data member, we will
+    // leak one per process, which would be similar to the instance allocated
+    // during static initialization and released only on  process termination.
+    lock_ = new base::Lock;
+  }
+
+  base::AutoLock auto_lock(*lock_);
 
   existing_histograms_.reset(histograms_);
   existing_callbacks_.reset(callbacks_);
   existing_ranges_.reset(ranges_);
-  existing_providers_.reset(providers_);
 
   histograms_ = new HistogramMap;
   callbacks_ = new CallbackMap;
   ranges_ = new RangesMap;
-  providers_ = new HistogramProviders;
 
   InitLogOnShutdownWithoutLock();
 }
@@ -500,21 +502,23 @@
 
 // static
 void StatisticsRecorder::Reset() {
+  // If there's no lock then there is nothing to reset.
+  if (!lock_)
+    return;
 
   std::unique_ptr<HistogramMap> histograms_deleter;
   std::unique_ptr<CallbackMap> callbacks_deleter;
   std::unique_ptr<RangesMap> ranges_deleter;
-  std::unique_ptr<HistogramProviders> providers_deleter;
+  // We don't delete lock_ on purpose to avoid having to properly protect
+  // against it going away after we checked for NULL in the static methods.
   {
-    base::AutoLock auto_lock(lock_.Get());
+    base::AutoLock auto_lock(*lock_);
     histograms_deleter.reset(histograms_);
     callbacks_deleter.reset(callbacks_);
     ranges_deleter.reset(ranges_);
-    providers_deleter.reset(providers_);
-    histograms_ = nullptr;
-    callbacks_ = nullptr;
-    ranges_ = nullptr;
-    providers_ = nullptr;
+    histograms_ = NULL;
+    callbacks_ = NULL;
+    ranges_ = NULL;
   }
   // We are going to leak the histograms and the ranges.
 }
@@ -528,15 +532,12 @@
 
 
 // static
-StatisticsRecorder::HistogramMap* StatisticsRecorder::histograms_ = nullptr;
+StatisticsRecorder::HistogramMap* StatisticsRecorder::histograms_ = NULL;
 // static
-StatisticsRecorder::CallbackMap* StatisticsRecorder::callbacks_ = nullptr;
+StatisticsRecorder::CallbackMap* StatisticsRecorder::callbacks_ = NULL;
 // static
-StatisticsRecorder::RangesMap* StatisticsRecorder::ranges_ = nullptr;
+StatisticsRecorder::RangesMap* StatisticsRecorder::ranges_ = NULL;
 // static
-StatisticsRecorder::HistogramProviders* StatisticsRecorder::providers_;
-// static
-base::LazyInstance<base::Lock>::Leaky StatisticsRecorder::lock_ =
-    LAZY_INSTANCE_INITIALIZER;
+base::Lock* StatisticsRecorder::lock_ = NULL;
 
 }  // namespace base
diff --git a/base/metrics/statistics_recorder.h b/base/metrics/statistics_recorder.h
index 55be86a..c3c6ace 100644
--- a/base/metrics/statistics_recorder.h
+++ b/base/metrics/statistics_recorder.h
@@ -23,14 +23,15 @@
 #include "base/gtest_prod_util.h"
 #include "base/lazy_instance.h"
 #include "base/macros.h"
-#include "base/memory/weak_ptr.h"
 #include "base/metrics/histogram_base.h"
 #include "base/strings/string_piece.h"
-#include "base/synchronization/lock.h"
+
+class SubprocessMetricsProviderTest;
 
 namespace base {
 
 class BucketRanges;
+class Lock;
 
 class BASE_EXPORT StatisticsRecorder {
  public:
@@ -63,18 +64,8 @@
     }
   };
 
-  // An interface class that allows the StatisticsRecorder to forcibly merge
-  // histograms from providers when necessary.
-  class HistogramProvider {
-   public:
-    virtual ~HistogramProvider() {}
-    // Merges all histogram information into the global versions.
-    virtual void MergeHistogramDeltas() = 0;
-  };
-
   typedef std::map<StringKey, HistogramBase*> HistogramMap;
   typedef std::vector<HistogramBase*> Histograms;
-  typedef std::vector<WeakPtr<HistogramProvider>> HistogramProviders;
 
   // A class for iterating over the histograms held within this global resource.
   class BASE_EXPORT HistogramIterator {
@@ -112,12 +103,6 @@
   // Find out if histograms can now be registered into our list.
   static bool IsActive();
 
-  // Register a provider of histograms that can be called to merge those into
-  // the global StatisticsRecorder. Calls to ImportProvidedHistograms() will
-  // fetch from registered providers.
-  static void RegisterHistogramProvider(
-      const WeakPtr<HistogramProvider>& provider);
-
   // Register, or add a new histogram to the collection of statistics. If an
   // identically named histogram is already registered, then the argument
   // |histogram| will deleted.  The returned value is always the registered
@@ -151,9 +136,6 @@
   // safe.  It returns NULL if a matching histogram is not found.
   static HistogramBase* FindHistogram(base::StringPiece name);
 
-  // Imports histograms from providers. This must be called on the UI thread.
-  static void ImportProvidedHistograms();
-
   // Support for iterating over known histograms.
   static HistogramIterator begin(bool include_persistent);
   static HistogramIterator end();
@@ -218,7 +200,7 @@
   // |bucket_ranges_|.
   typedef std::map<uint32_t, std::list<const BucketRanges*>*> RangesMap;
 
-  friend struct LazyInstanceTraitsBase<StatisticsRecorder>;
+  friend struct DefaultLazyInstanceTraits<StatisticsRecorder>;
   friend class StatisticsRecorderTest;
 
   // Imports histograms from global persistent memory. The global lock must
@@ -240,7 +222,6 @@
   std::unique_ptr<HistogramMap> existing_histograms_;
   std::unique_ptr<CallbackMap> existing_callbacks_;
   std::unique_ptr<RangesMap> existing_ranges_;
-  std::unique_ptr<HistogramProviders> existing_providers_;
 
   bool vlog_initialized_ = false;
 
@@ -250,13 +231,9 @@
   static HistogramMap* histograms_;
   static CallbackMap* callbacks_;
   static RangesMap* ranges_;
-  static HistogramProviders* providers_;
 
-  // Lock protects access to above maps. This is a LazyInstance to avoid races
-  // when the above methods are used before Initialize(). Previously each method
-  // would do |if (!lock_) return;| which would race with
-  // |lock_ = new Lock;| in StatisticsRecorder(). http://crbug.com/672852.
-  static base::LazyInstance<base::Lock>::Leaky lock_;
+  // Lock protects access to above maps.
+  static base::Lock* lock_;
 
   DISALLOW_COPY_AND_ASSIGN(StatisticsRecorder);
 };
diff --git a/base/metrics/statistics_recorder_unittest.cc b/base/metrics/statistics_recorder_unittest.cc
index 48b6df3..65e2c98 100644
--- a/base/metrics/statistics_recorder_unittest.cc
+++ b/base/metrics/statistics_recorder_unittest.cc
@@ -12,7 +12,6 @@
 #include "base/bind.h"
 #include "base/json/json_reader.h"
 #include "base/logging.h"
-#include "base/memory/weak_ptr.h"
 #include "base/metrics/histogram_macros.h"
 #include "base/metrics/persistent_histogram_allocator.h"
 #include "base/metrics/sparse_histogram.h"
@@ -657,74 +656,4 @@
   EXPECT_TRUE(VLogInitialized());
 }
 
-class TestHistogramProvider : public StatisticsRecorder::HistogramProvider {
- public:
-  TestHistogramProvider(std::unique_ptr<PersistentHistogramAllocator> allocator)
-      : allocator_(std::move(allocator)), weak_factory_(this) {
-    StatisticsRecorder::RegisterHistogramProvider(weak_factory_.GetWeakPtr());
-  }
-
-  void MergeHistogramDeltas() override {
-    PersistentHistogramAllocator::Iterator hist_iter(allocator_.get());
-    while (true) {
-      std::unique_ptr<base::HistogramBase> histogram = hist_iter.GetNext();
-      if (!histogram)
-        break;
-      allocator_->MergeHistogramDeltaToStatisticsRecorder(histogram.get());
-    }
-  }
-
- private:
-  std::unique_ptr<PersistentHistogramAllocator> allocator_;
-  WeakPtrFactory<TestHistogramProvider> weak_factory_;
-
-  DISALLOW_COPY_AND_ASSIGN(TestHistogramProvider);
-};
-
-TEST_P(StatisticsRecorderTest, ImportHistogramsTest) {
-  // Create a second SR to create some histograms for later import.
-  std::unique_ptr<StatisticsRecorder> temp_sr =
-      StatisticsRecorder::CreateTemporaryForTesting();
-
-  // Extract any existing global allocator so a new one can be created.
-  std::unique_ptr<GlobalHistogramAllocator> old_allocator =
-      GlobalHistogramAllocator::ReleaseForTesting();
-
-  // Create a histogram inside a new allocator for testing.
-  GlobalHistogramAllocator::CreateWithLocalMemory(kAllocatorMemorySize, 0, "");
-  HistogramBase* histogram = LinearHistogram::FactoryGet("Foo", 1, 10, 11, 0);
-  histogram->Add(3);
-
-  // Undo back to the starting point.
-  std::unique_ptr<GlobalHistogramAllocator> new_allocator =
-      GlobalHistogramAllocator::ReleaseForTesting();
-  GlobalHistogramAllocator::Set(std::move(old_allocator));
-  temp_sr.reset();
-
-  // Create a provider that can supply histograms to the current SR.
-  TestHistogramProvider provider(std::move(new_allocator));
-
-  // Verify that the created histogram is no longer known.
-  ASSERT_FALSE(StatisticsRecorder::FindHistogram(histogram->histogram_name()));
-
-  // Now test that it merges.
-  StatisticsRecorder::ImportProvidedHistograms();
-  HistogramBase* found =
-      StatisticsRecorder::FindHistogram(histogram->histogram_name());
-  ASSERT_TRUE(found);
-  EXPECT_NE(histogram, found);
-  std::unique_ptr<HistogramSamples> snapshot = found->SnapshotSamples();
-  EXPECT_EQ(1, snapshot->TotalCount());
-  EXPECT_EQ(1, snapshot->GetCount(3));
-
-  // Finally, verify that updates can also be merged.
-  histogram->Add(3);
-  histogram->Add(5);
-  StatisticsRecorder::ImportProvidedHistograms();
-  snapshot = found->SnapshotSamples();
-  EXPECT_EQ(3, snapshot->TotalCount());
-  EXPECT_EQ(2, snapshot->GetCount(3));
-  EXPECT_EQ(1, snapshot->GetCount(5));
-}
-
 }  // namespace base
diff --git a/base/metrics/user_metrics.cc b/base/metrics/user_metrics.cc
index 65ac918..169a063 100644
--- a/base/metrics/user_metrics.cc
+++ b/base/metrics/user_metrics.cc
@@ -17,10 +17,10 @@
 namespace base {
 namespace {
 
-LazyInstance<std::vector<ActionCallback>>::DestructorAtExit g_callbacks =
+LazyInstance<std::vector<ActionCallback>> g_callbacks =
     LAZY_INSTANCE_INITIALIZER;
-LazyInstance<scoped_refptr<SingleThreadTaskRunner>>::DestructorAtExit
-    g_task_runner = LAZY_INSTANCE_INITIALIZER;
+LazyInstance<scoped_refptr<SingleThreadTaskRunner>> g_task_runner =
+    LAZY_INSTANCE_INITIALIZER;
 
 }  // namespace
 
diff --git a/base/metrics/user_metrics.h b/base/metrics/user_metrics.h
index 87fbd9c..93701e8 100644
--- a/base/metrics/user_metrics.h
+++ b/base/metrics/user_metrics.h
@@ -17,9 +17,6 @@
 // This module provides some helper functions for logging actions tracked by
 // the user metrics system.
 
-// For best practices on deciding when to emit a user action, see
-// https://chromium.googlesource.com/chromium/src.git/+/HEAD/tools/metrics/actions/README.md
-
 // Record that the user performed an action.
 // This function must be called after the task runner has been set with
 // SetRecordActionTaskRunner().
diff --git a/base/native_library.h b/base/native_library.h
index 02eae1d..b4f3a3c 100644
--- a/base/native_library.h
+++ b/base/native_library.h
@@ -65,32 +65,12 @@
 #endif  // OS_WIN
 };
 
-struct BASE_EXPORT NativeLibraryOptions {
-  NativeLibraryOptions() = default;
-  NativeLibraryOptions(const NativeLibraryOptions& options) = default;
-
-  // If |true|, a loaded library is required to prefer local symbol resolution
-  // before considering global symbols. Note that this is already the default
-  // behavior on most systems. Setting this to |false| does not guarantee the
-  // inverse, i.e., it does not force a preference for global symbols over local
-  // ones.
-  bool prefer_own_symbols = false;
-};
-
 // Loads a native library from disk.  Release it with UnloadNativeLibrary when
 // you're done.  Returns NULL on failure.
 // If |error| is not NULL, it may be filled in on load error.
 BASE_EXPORT NativeLibrary LoadNativeLibrary(const FilePath& library_path,
                                             NativeLibraryLoadError* error);
 
-// Loads a native library from disk.  Release it with UnloadNativeLibrary when
-// you're done.  Returns NULL on failure.
-// If |error| is not NULL, it may be filled in on load error.
-BASE_EXPORT NativeLibrary LoadNativeLibraryWithOptions(
-    const FilePath& library_path,
-    const NativeLibraryOptions& options,
-    NativeLibraryLoadError* error);
-
 #if defined(OS_WIN)
 // Loads a native library from disk.  Release it with UnloadNativeLibrary when
 // you're done.
diff --git a/base/native_library_posix.cc b/base/native_library_posix.cc
index 3459716..2dc434b 100644
--- a/base/native_library_posix.cc
+++ b/base/native_library_posix.cc
@@ -19,27 +19,16 @@
 }
 
 // static
-NativeLibrary LoadNativeLibraryWithOptions(const FilePath& library_path,
-                                           const NativeLibraryOptions& options,
-                                           NativeLibraryLoadError* error) {
+NativeLibrary LoadNativeLibrary(const FilePath& library_path,
+                                NativeLibraryLoadError* error) {
   // dlopen() opens the file off disk.
   ThreadRestrictions::AssertIOAllowed();
 
-  // We deliberately do not use RTLD_DEEPBIND by default.  For the history why,
-  // please refer to the bug tracker.  Some useful bug reports to read include:
+  // We deliberately do not use RTLD_DEEPBIND.  For the history why, please
+  // refer to the bug tracker.  Some useful bug reports to read include:
   // http://crbug.com/17943, http://crbug.com/17557, http://crbug.com/36892,
   // and http://crbug.com/40794.
-  int flags = RTLD_LAZY;
-#if defined(OS_ANDROID) || !defined(RTLD_DEEPBIND)
-  // Certain platforms don't define RTLD_DEEPBIND. Android dlopen() requires
-  // further investigation, as it might vary across versions. Crash here to
-  // warn developers that they're trying to rely on uncertain behavior.
-  CHECK(!options.prefer_own_symbols);
-#else
-  if (options.prefer_own_symbols)
-    flags |= RTLD_DEEPBIND;
-#endif
-  void* dl = dlopen(library_path.value().c_str(), flags);
+  void* dl = dlopen(library_path.value().c_str(), RTLD_LAZY);
   if (!dl && error)
     error->message = dlerror();
 
diff --git a/base/numerics/safe_conversions.h b/base/numerics/safe_conversions.h
index b0ec279..6b558af 100644
--- a/base/numerics/safe_conversions.h
+++ b/base/numerics/safe_conversions.h
@@ -8,130 +8,95 @@
 #include <stddef.h>
 
 #include <limits>
-#include <ostream>
 #include <type_traits>
 
+#include "base/logging.h"
 #include "base/numerics/safe_conversions_impl.h"
 
 namespace base {
 
-// The following are helper constexpr template functions and classes for safely
-// performing a range of conversions, assignments, and tests:
-//
-//  checked_cast<> - Analogous to static_cast<> for numeric types, except
-//      that it CHECKs that the specified numeric conversion will not overflow
-//      or underflow. NaN source will always trigger a CHECK.
-//      The default CHECK triggers a crash, but the handler can be overriden.
-//  saturated_cast<> - Analogous to static_cast<> for numeric types, except
-//      that it returns a saturated result when the specified numeric conversion
-//      would otherwise overflow or underflow. An NaN source returns 0 by
-//      default, but can be overridden to return a different result.
-//  strict_cast<> - Analogous to static_cast<> for numeric types, except that
-//      it will cause a compile failure if the destination type is not large
-//      enough to contain any value in the source type. It performs no runtime
-//      checking and thus introduces no runtime overhead.
-//  IsValueInRangeForNumericType<>() - A convenience function that returns true
-//      if the type supplied to the template parameter can represent the value
-//      passed as an argument to the function.
-//  IsValueNegative<>() - A convenience function that will accept any arithmetic
-//      type as an argument and will return whether the value is less than zero.
-//      Unsigned types always return false.
-//  SafeUnsignedAbs() - Returns the absolute value of the supplied integer
-//      parameter as an unsigned result (thus avoiding an overflow if the value
-//      is the signed, two's complement minimum).
-//  StrictNumeric<> - A wrapper type that performs assignments and copies via
-//      the strict_cast<> template, and can perform valid arithmetic comparisons
-//      across any range of arithmetic types. StrictNumeric is the return type
-//      for values extracted from a CheckedNumeric class instance. The raw
-//      arithmetic value is extracted via static_cast to the underlying type.
-//  MakeStrictNum() - Creates a new StrictNumeric from the underlying type of
-//      the supplied arithmetic or StrictNumeric type.
-
 // Convenience function that returns true if the supplied value is in range
 // for the destination type.
 template <typename Dst, typename Src>
 constexpr bool IsValueInRangeForNumericType(Src value) {
-  return internal::DstRangeRelationToSrcRange<Dst>(value).IsValid();
+  return internal::DstRangeRelationToSrcRange<Dst>(value) ==
+         internal::RANGE_VALID;
 }
 
-// Forces a crash, like a CHECK(false). Used for numeric boundary errors.
-struct CheckOnFailure {
-  template <typename T>
-  static T HandleFailure() {
-#if defined(__GNUC__) || defined(__clang__)
-    __builtin_trap();
-#else
-    ((void)(*(volatile char*)0 = 0));
-#endif
-    return T();
-  }
-};
+// Convenience function for determining if a numeric value is negative without
+// throwing compiler warnings on: unsigned(value) < 0.
+template <typename T>
+constexpr typename std::enable_if<std::numeric_limits<T>::is_signed, bool>::type
+IsValueNegative(T value) {
+  static_assert(std::numeric_limits<T>::is_specialized,
+                "Argument must be numeric.");
+  return value < 0;
+}
+
+template <typename T>
+constexpr typename std::enable_if<!std::numeric_limits<T>::is_signed,
+                                  bool>::type IsValueNegative(T) {
+  static_assert(std::numeric_limits<T>::is_specialized,
+                "Argument must be numeric.");
+  return false;
+}
 
 // checked_cast<> is analogous to static_cast<> for numeric types,
 // except that it CHECKs that the specified numeric conversion will not
 // overflow or underflow. NaN source will always trigger a CHECK.
-template <typename Dst,
-          class CheckHandler = CheckOnFailure,
-          typename Src>
-constexpr Dst checked_cast(Src value) {
-  // This throws a compile-time error on evaluating the constexpr if it can be
-  // determined at compile-time as failing, otherwise it will CHECK at runtime.
-  using SrcType = typename internal::UnderlyingType<Src>::type;
-  return IsValueInRangeForNumericType<Dst, SrcType>(value)
-             ? static_cast<Dst>(static_cast<SrcType>(value))
-             : CheckHandler::template HandleFailure<Dst>();
+template <typename Dst, typename Src>
+inline Dst checked_cast(Src value) {
+  CHECK(IsValueInRangeForNumericType<Dst>(value));
+  return static_cast<Dst>(value);
 }
 
-// Default boundaries for integral/float: max/infinity, lowest/-infinity, 0/NaN.
-template <typename T>
-struct SaturationDefaultHandler {
-  static constexpr T NaN() {
-    return std::numeric_limits<T>::has_quiet_NaN
-               ? std::numeric_limits<T>::quiet_NaN()
-               : T();
+// HandleNaN will cause this class to CHECK(false).
+struct SaturatedCastNaNBehaviorCheck {
+  template <typename T>
+  static T HandleNaN() {
+    CHECK(false);
+    return T();
   }
-  static constexpr T max() { return std::numeric_limits<T>::max(); }
-  static constexpr T Overflow() {
-    return std::numeric_limits<T>::has_infinity
-               ? std::numeric_limits<T>::infinity()
-               : std::numeric_limits<T>::max();
-  }
-  static constexpr T lowest() { return std::numeric_limits<T>::lowest(); }
-  static constexpr T Underflow() {
-    return std::numeric_limits<T>::has_infinity
-               ? std::numeric_limits<T>::infinity() * -1
-               : std::numeric_limits<T>::lowest();
+};
+
+// HandleNaN will return 0 in this case.
+struct SaturatedCastNaNBehaviorReturnZero {
+  template <typename T>
+  static constexpr T HandleNaN() {
+    return T();
   }
 };
 
 namespace internal {
-
-template <typename Dst, template <typename> class S, typename Src>
-constexpr Dst saturated_cast_impl(Src value, RangeCheck constraint) {
-  // For some reason clang generates much better code when the branch is
-  // structured exactly this way, rather than a sequence of checks.
-  return !constraint.IsOverflowFlagSet()
-             ? (!constraint.IsUnderflowFlagSet() ? static_cast<Dst>(value)
-                                                 : S<Dst>::Underflow())
-             // Skip this check for integral Src, which cannot be NaN.
-             : (std::is_integral<Src>::value || !constraint.IsUnderflowFlagSet()
-                    ? S<Dst>::Overflow()
-                    : S<Dst>::NaN());
+// This wrapper is used for C++11 constexpr support by avoiding the declaration
+// of local variables in the saturated_cast template function.
+template <typename Dst, class NaNHandler, typename Src>
+constexpr Dst saturated_cast_impl(const Src value,
+                                  const RangeConstraint constraint) {
+  return constraint == RANGE_VALID
+             ? static_cast<Dst>(value)
+             : (constraint == RANGE_UNDERFLOW
+                    ? std::numeric_limits<Dst>::min()
+                    : (constraint == RANGE_OVERFLOW
+                           ? std::numeric_limits<Dst>::max()
+                           : (constraint == RANGE_INVALID
+                                  ? NaNHandler::template HandleNaN<Dst>()
+                                  : (NOTREACHED(), static_cast<Dst>(value)))));
 }
+}  // namespace internal
 
 // saturated_cast<> is analogous to static_cast<> for numeric types, except
-// that the specified numeric conversion will saturate by default rather than
-// overflow or underflow, and NaN assignment to an integral will return 0.
-// All boundary condition behaviors can be overriden with a custom handler.
+// that the specified numeric conversion will saturate rather than overflow or
+// underflow. NaN assignment to an integral will defer the behavior to a
+// specified class. By default, it will return 0.
 template <typename Dst,
-          template <typename>
-          class SaturationHandler = SaturationDefaultHandler,
+          class NaNHandler = SaturatedCastNaNBehaviorReturnZero,
           typename Src>
 constexpr Dst saturated_cast(Src value) {
-  using SrcType = typename UnderlyingType<Src>::type;
-  return saturated_cast_impl<Dst, SaturationHandler, SrcType>(
-      value,
-      DstRangeRelationToSrcRange<Dst, SaturationHandler, SrcType>(value));
+  return std::numeric_limits<Dst>::is_iec559
+             ? static_cast<Dst>(value)  // Floating point optimization.
+             : internal::saturated_cast_impl<Dst, NaNHandler>(
+                   value, internal::DstRangeRelationToSrcRange<Dst>(value));
 }
 
 // strict_cast<> is analogous to static_cast<> for numeric types, except that
@@ -139,40 +104,22 @@
 // to contain any value in the source type. It performs no runtime checking.
 template <typename Dst, typename Src>
 constexpr Dst strict_cast(Src value) {
-  using SrcType = typename UnderlyingType<Src>::type;
-  static_assert(UnderlyingType<Src>::is_numeric, "Argument must be numeric.");
-  static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
+  static_assert(std::numeric_limits<Src>::is_specialized,
+                "Argument must be numeric.");
+  static_assert(std::numeric_limits<Dst>::is_specialized,
+                "Result must be numeric.");
+  static_assert((internal::StaticDstRangeRelationToSrcRange<Dst, Src>::value ==
+                 internal::NUMERIC_RANGE_CONTAINED),
+                "The numeric conversion is out of range for this type. You "
+                "should probably use one of the following conversion "
+                "mechanisms on the value you want to pass:\n"
+                "- base::checked_cast\n"
+                "- base::saturated_cast\n"
+                "- base::CheckedNumeric");
 
-  // If you got here from a compiler error, it's because you tried to assign
-  // from a source type to a destination type that has insufficient range.
-  // The solution may be to change the destination type you're assigning to,
-  // and use one large enough to represent the source.
-  // Alternatively, you may be better served with the checked_cast<> or
-  // saturated_cast<> template functions for your particular use case.
-  static_assert(StaticDstRangeRelationToSrcRange<Dst, SrcType>::value ==
-                    NUMERIC_RANGE_CONTAINED,
-                "The source type is out of range for the destination type. "
-                "Please see strict_cast<> comments for more information.");
-
-  return static_cast<Dst>(static_cast<SrcType>(value));
+  return static_cast<Dst>(value);
 }
 
-// Some wrappers to statically check that a type is in range.
-template <typename Dst, typename Src, class Enable = void>
-struct IsNumericRangeContained {
-  static const bool value = false;
-};
-
-template <typename Dst, typename Src>
-struct IsNumericRangeContained<
-    Dst,
-    Src,
-    typename std::enable_if<ArithmeticOrUnderlyingEnum<Dst>::value &&
-                            ArithmeticOrUnderlyingEnum<Src>::value>::type> {
-  static const bool value = StaticDstRangeRelationToSrcRange<Dst, Src>::value ==
-                            NUMERIC_RANGE_CONTAINED;
-};
-
 // StrictNumeric implements compile time range checking between numeric types by
 // wrapping assignment operations in a strict_cast. This class is intended to be
 // used for function arguments and return types, to ensure the destination type
@@ -186,7 +133,7 @@
 template <typename T>
 class StrictNumeric {
  public:
-  using type = T;
+  typedef T type;
 
   constexpr StrictNumeric() : value_(0) {}
 
@@ -198,74 +145,21 @@
   // This is not an explicit constructor because we implicitly upgrade regular
   // numerics to StrictNumerics to make them easier to use.
   template <typename Src>
-  constexpr StrictNumeric(Src value)  // NOLINT(runtime/explicit)
+  constexpr StrictNumeric(Src value)
       : value_(strict_cast<T>(value)) {}
 
-  // If you got here from a compiler error, it's because you tried to assign
-  // from a source type to a destination type that has insufficient range.
-  // The solution may be to change the destination type you're assigning to,
-  // and use one large enough to represent the source.
-  // If you're assigning from a CheckedNumeric<> class, you may be able to use
-  // the AssignIfValid() member function, specify a narrower destination type to
-  // the member value functions (e.g. val.template ValueOrDie<Dst>()), use one
-  // of the value helper functions (e.g. ValueOrDieForType<Dst>(val)).
-  // If you've encountered an _ambiguous overload_ you can use a static_cast<>
-  // to explicitly cast the result to the destination type.
-  // If none of that works, you may be better served with the checked_cast<> or
-  // saturated_cast<> template functions for your particular use case.
-  template <typename Dst,
-            typename std::enable_if<
-                IsNumericRangeContained<Dst, T>::value>::type* = nullptr>
+  // The numeric cast operator basically handles all the magic.
+  template <typename Dst>
   constexpr operator Dst() const {
-    return static_cast<typename ArithmeticOrUnderlyingEnum<Dst>::type>(value_);
+    return strict_cast<Dst>(value_);
   }
 
  private:
   const T value_;
 };
 
-// Convience wrapper returns a StrictNumeric from the provided arithmetic type.
-template <typename T>
-constexpr StrictNumeric<typename UnderlyingType<T>::type> MakeStrictNum(
-    const T value) {
-  return value;
-}
-
-// Overload the ostream output operator to make logging work nicely.
-template <typename T>
-std::ostream& operator<<(std::ostream& os, const StrictNumeric<T>& value) {
-  os << static_cast<T>(value);
-  return os;
-}
-
-#define STRICT_COMPARISON_OP(NAME, OP)                               \
-  template <typename L, typename R,                                  \
-            typename std::enable_if<                                 \
-                internal::IsStrictOp<L, R>::value>::type* = nullptr> \
-  constexpr bool operator OP(const L lhs, const R rhs) {             \
-    return SafeCompare<NAME, typename UnderlyingType<L>::type,       \
-                       typename UnderlyingType<R>::type>(lhs, rhs);  \
-  }
-
-STRICT_COMPARISON_OP(IsLess, <);
-STRICT_COMPARISON_OP(IsLessOrEqual, <=);
-STRICT_COMPARISON_OP(IsGreater, >);
-STRICT_COMPARISON_OP(IsGreaterOrEqual, >=);
-STRICT_COMPARISON_OP(IsEqual, ==);
-STRICT_COMPARISON_OP(IsNotEqual, !=);
-
-#undef STRICT_COMPARISON_OP
-};
-
-using internal::strict_cast;
-using internal::saturated_cast;
-using internal::SafeUnsignedAbs;
-using internal::StrictNumeric;
-using internal::MakeStrictNum;
-using internal::IsValueNegative;
-
-// Explicitly make a shorter size_t alias for convenience.
-using SizeT = StrictNumeric<size_t>;
+// Explicitly make a shorter size_t typedef for convenience.
+typedef StrictNumeric<size_t> SizeT;
 
 }  // namespace base
 
diff --git a/base/numerics/safe_conversions_impl.h b/base/numerics/safe_conversions_impl.h
index 24357fd..0f0aebc 100644
--- a/base/numerics/safe_conversions_impl.h
+++ b/base/numerics/safe_conversions_impl.h
@@ -5,77 +5,28 @@
 #ifndef BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
 #define BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
 
+#include <limits.h>
 #include <stdint.h>
 
+#include <climits>
 #include <limits>
-#include <type_traits>
 
 namespace base {
 namespace internal {
 
 // The std library doesn't provide a binary max_exponent for integers, however
-// we can compute an analog using std::numeric_limits<>::digits.
+// we can compute one by adding one to the number of non-sign bits. This allows
+// for accurate range comparisons between floating point and integer types.
 template <typename NumericType>
 struct MaxExponent {
-  static const int value = std::is_floating_point<NumericType>::value
+  static_assert(std::is_arithmetic<NumericType>::value,
+                "Argument must be numeric.");
+  static const int value = std::numeric_limits<NumericType>::is_iec559
                                ? std::numeric_limits<NumericType>::max_exponent
-                               : std::numeric_limits<NumericType>::digits + 1;
+                               : (sizeof(NumericType) * CHAR_BIT + 1 -
+                                  std::numeric_limits<NumericType>::is_signed);
 };
 
-// The number of bits (including the sign) in an integer. Eliminates sizeof
-// hacks.
-template <typename NumericType>
-struct IntegerBitsPlusSign {
-  static const int value = std::numeric_limits<NumericType>::digits +
-                           std::is_signed<NumericType>::value;
-};
-
-// Helper templates for integer manipulations.
-
-template <typename Integer>
-struct PositionOfSignBit {
-  static const size_t value = IntegerBitsPlusSign<Integer>::value - 1;
-};
-
-// Determines if a numeric value is negative without throwing compiler
-// warnings on: unsigned(value) < 0.
-template <typename T,
-          typename std::enable_if<std::is_signed<T>::value>::type* = nullptr>
-constexpr bool IsValueNegative(T value) {
-  static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
-  return value < 0;
-}
-
-template <typename T,
-          typename std::enable_if<!std::is_signed<T>::value>::type* = nullptr>
-constexpr bool IsValueNegative(T) {
-  static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
-  return false;
-}
-
-// This performs a fast negation, returning a signed value. It works on unsigned
-// arguments, but probably doesn't do what you want for any unsigned value
-// larger than max / 2 + 1 (i.e. signed min cast to unsigned).
-template <typename T>
-constexpr typename std::make_signed<T>::type ConditionalNegate(
-    T x,
-    bool is_negative) {
-  static_assert(std::is_integral<T>::value, "Type must be integral");
-  using SignedT = typename std::make_signed<T>::type;
-  using UnsignedT = typename std::make_unsigned<T>::type;
-  return static_cast<SignedT>(
-      (static_cast<UnsignedT>(x) ^ -SignedT(is_negative)) + is_negative);
-}
-
-// This performs a safe, absolute value via unsigned overflow.
-template <typename T>
-constexpr typename std::make_unsigned<T>::type SafeUnsignedAbs(T value) {
-  static_assert(std::is_integral<T>::value, "Type must be integral");
-  using UnsignedT = typename std::make_unsigned<T>::type;
-  return IsValueNegative(value) ? 0 - static_cast<UnsignedT>(value)
-                                : static_cast<UnsignedT>(value);
-}
-
 enum IntegerRepresentation {
   INTEGER_REPRESENTATION_UNSIGNED,
   INTEGER_REPRESENTATION_SIGNED
@@ -83,7 +34,7 @@
 
 // A range for a given nunmeric Src type is contained for a given numeric Dst
 // type if both numeric_limits<Src>::max() <= numeric_limits<Dst>::max() and
-// numeric_limits<Src>::lowest() >= numeric_limits<Dst>::lowest() are true.
+// numeric_limits<Src>::min() >= numeric_limits<Dst>::min() are true.
 // We implement this as template specializations rather than simple static
 // comparisons to ensure type correctness in our comparisons.
 enum NumericRangeRepresentation {
@@ -94,14 +45,16 @@
 // Helper templates to statically determine if our destination type can contain
 // maximum and minimum values represented by the source type.
 
-template <typename Dst,
-          typename Src,
-          IntegerRepresentation DstSign = std::is_signed<Dst>::value
-                                              ? INTEGER_REPRESENTATION_SIGNED
-                                              : INTEGER_REPRESENTATION_UNSIGNED,
-          IntegerRepresentation SrcSign = std::is_signed<Src>::value
-                                              ? INTEGER_REPRESENTATION_SIGNED
-                                              : INTEGER_REPRESENTATION_UNSIGNED>
+template <
+    typename Dst,
+    typename Src,
+    IntegerRepresentation DstSign = std::numeric_limits<Dst>::is_signed
+                                            ? INTEGER_REPRESENTATION_SIGNED
+                                            : INTEGER_REPRESENTATION_UNSIGNED,
+    IntegerRepresentation SrcSign =
+        std::numeric_limits<Src>::is_signed
+            ? INTEGER_REPRESENTATION_SIGNED
+            : INTEGER_REPRESENTATION_UNSIGNED >
 struct StaticDstRangeRelationToSrcRange;
 
 // Same sign: Dst is guaranteed to contain Src only if its range is equal or
@@ -136,34 +89,30 @@
   static const NumericRangeRepresentation value = NUMERIC_RANGE_NOT_CONTAINED;
 };
 
-// This class wraps the range constraints as separate booleans so the compiler
-// can identify constants and eliminate unused code paths.
-class RangeCheck {
- public:
-  constexpr RangeCheck(bool is_in_lower_bound, bool is_in_upper_bound)
-      : is_underflow_(!is_in_lower_bound), is_overflow_(!is_in_upper_bound) {}
-  constexpr RangeCheck() : is_underflow_(0), is_overflow_(0) {}
-  constexpr bool IsValid() const { return !is_overflow_ && !is_underflow_; }
-  constexpr bool IsInvalid() const { return is_overflow_ && is_underflow_; }
-  constexpr bool IsOverflow() const { return is_overflow_ && !is_underflow_; }
-  constexpr bool IsUnderflow() const { return !is_overflow_ && is_underflow_; }
-  constexpr bool IsOverflowFlagSet() const { return is_overflow_; }
-  constexpr bool IsUnderflowFlagSet() const { return is_underflow_; }
-  constexpr bool operator==(const RangeCheck rhs) const {
-    return is_underflow_ == rhs.is_underflow_ &&
-           is_overflow_ == rhs.is_overflow_;
-  }
-  constexpr bool operator!=(const RangeCheck rhs) const {
-    return !(*this == rhs);
-  }
-
- private:
-  // Do not change the order of these member variables. The integral conversion
-  // optimization depends on this exact order.
-  const bool is_underflow_;
-  const bool is_overflow_;
+enum RangeConstraint {
+  RANGE_VALID = 0x0,  // Value can be represented by the destination type.
+  RANGE_UNDERFLOW = 0x1,  // Value would overflow.
+  RANGE_OVERFLOW = 0x2,  // Value would underflow.
+  RANGE_INVALID = RANGE_UNDERFLOW | RANGE_OVERFLOW  // Invalid (i.e. NaN).
 };
 
+// Helper function for coercing an int back to a RangeContraint.
+constexpr RangeConstraint GetRangeConstraint(int integer_range_constraint) {
+  // TODO(jschuh): Once we get full C++14 support we want this
+  // assert(integer_range_constraint >= RANGE_VALID &&
+  //        integer_range_constraint <= RANGE_INVALID)
+  return static_cast<RangeConstraint>(integer_range_constraint);
+}
+
+// This function creates a RangeConstraint from an upper and lower bound
+// check by taking advantage of the fact that only NaN can be out of range in
+// both directions at once.
+constexpr inline RangeConstraint GetRangeConstraint(bool is_in_upper_bound,
+                                                    bool is_in_lower_bound) {
+  return GetRangeConstraint((is_in_upper_bound ? 0 : RANGE_OVERFLOW) |
+                            (is_in_lower_bound ? 0 : RANGE_UNDERFLOW));
+}
+
 // The following helper template addresses a corner case in range checks for
 // conversion from a floating-point type to an integral type of smaller range
 // but larger precision (e.g. float -> unsigned). The problem is as follows:
@@ -185,547 +134,131 @@
 // To fix this bug we manually truncate the maximum value when the destination
 // type is an integral of larger precision than the source floating-point type,
 // such that the resulting maximum is represented exactly as a floating point.
-template <typename Dst, typename Src, template <typename> class Bounds>
+template <typename Dst, typename Src>
 struct NarrowingRange {
-  using SrcLimits = std::numeric_limits<Src>;
-  using DstLimits = typename std::numeric_limits<Dst>;
+  typedef typename std::numeric_limits<Src> SrcLimits;
+  typedef typename std::numeric_limits<Dst> DstLimits;
+  // The following logic avoids warnings where the max function is
+  // instantiated with invalid values for a bit shift (even though
+  // such a function can never be called).
+  static const int shift = (MaxExponent<Src>::value > MaxExponent<Dst>::value &&
+                            SrcLimits::digits < DstLimits::digits &&
+                            SrcLimits::is_iec559 &&
+                            DstLimits::is_integer)
+                               ? (DstLimits::digits - SrcLimits::digits)
+                               : 0;
 
-  // Computes the mask required to make an accurate comparison between types.
-  static const int kShift =
-      (MaxExponent<Src>::value > MaxExponent<Dst>::value &&
-       SrcLimits::digits < DstLimits::digits)
-          ? (DstLimits::digits - SrcLimits::digits)
-          : 0;
-  template <
-      typename T,
-      typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
-
-  // Masks out the integer bits that are beyond the precision of the
-  // intermediate type used for comparison.
-  static constexpr T Adjust(T value) {
-    static_assert(std::is_same<T, Dst>::value, "");
-    static_assert(kShift < DstLimits::digits, "");
-    return static_cast<T>(
-        ConditionalNegate(SafeUnsignedAbs(value) & ~((T(1) << kShift) - T(1)),
-                          IsValueNegative(value)));
+  static constexpr Dst max() {
+    // We use UINTMAX_C below to avoid compiler warnings about shifting floating
+    // points. Since it's a compile time calculation, it shouldn't have any
+    // performance impact.
+    return DstLimits::max() - static_cast<Dst>((UINTMAX_C(1) << shift) - 1);
   }
 
-  template <typename T,
-            typename std::enable_if<std::is_floating_point<T>::value>::type* =
-                nullptr>
-  static constexpr T Adjust(T value) {
-    static_assert(std::is_same<T, Dst>::value, "");
-    static_assert(kShift == 0, "");
-    return value;
+  static constexpr Dst min() {
+    return std::numeric_limits<Dst>::is_iec559 ? -DstLimits::max()
+                                               : DstLimits::min();
   }
-
-  static constexpr Dst max() { return Adjust(Bounds<Dst>::max()); }
-  static constexpr Dst lowest() { return Adjust(Bounds<Dst>::lowest()); }
 };
 
-template <typename Dst,
-          typename Src,
-          template <typename> class Bounds,
-          IntegerRepresentation DstSign = std::is_signed<Dst>::value
-                                              ? INTEGER_REPRESENTATION_SIGNED
-                                              : INTEGER_REPRESENTATION_UNSIGNED,
-          IntegerRepresentation SrcSign = std::is_signed<Src>::value
-                                              ? INTEGER_REPRESENTATION_SIGNED
-                                              : INTEGER_REPRESENTATION_UNSIGNED,
-          NumericRangeRepresentation DstRange =
-              StaticDstRangeRelationToSrcRange<Dst, Src>::value>
+template <
+    typename Dst,
+    typename Src,
+    IntegerRepresentation DstSign = std::numeric_limits<Dst>::is_signed
+                                            ? INTEGER_REPRESENTATION_SIGNED
+                                            : INTEGER_REPRESENTATION_UNSIGNED,
+    IntegerRepresentation SrcSign = std::numeric_limits<Src>::is_signed
+                                            ? INTEGER_REPRESENTATION_SIGNED
+                                            : INTEGER_REPRESENTATION_UNSIGNED,
+    NumericRangeRepresentation DstRange =
+        StaticDstRangeRelationToSrcRange<Dst, Src>::value >
 struct DstRangeRelationToSrcRangeImpl;
 
 // The following templates are for ranges that must be verified at runtime. We
 // split it into checks based on signedness to avoid confusing casts and
 // compiler warnings on signed an unsigned comparisons.
 
-// Same sign narrowing: The range is contained for normal limits.
+// Dst range is statically determined to contain Src: Nothing to check.
 template <typename Dst,
           typename Src,
-          template <typename> class Bounds,
           IntegerRepresentation DstSign,
           IntegerRepresentation SrcSign>
 struct DstRangeRelationToSrcRangeImpl<Dst,
                                       Src,
-                                      Bounds,
                                       DstSign,
                                       SrcSign,
                                       NUMERIC_RANGE_CONTAINED> {
-  static constexpr RangeCheck Check(Src value) {
-    using SrcLimits = std::numeric_limits<Src>;
-    using DstLimits = NarrowingRange<Dst, Src, Bounds>;
-    return RangeCheck(
-        static_cast<Dst>(SrcLimits::lowest()) >= DstLimits::lowest() ||
-            static_cast<Dst>(value) >= DstLimits::lowest(),
-        static_cast<Dst>(SrcLimits::max()) <= DstLimits::max() ||
-            static_cast<Dst>(value) <= DstLimits::max());
-  }
+  static constexpr RangeConstraint Check(Src /*value*/) { return RANGE_VALID; }
 };
 
 // Signed to signed narrowing: Both the upper and lower boundaries may be
-// exceeded for standard limits.
-template <typename Dst, typename Src, template <typename> class Bounds>
+// exceeded.
+template <typename Dst, typename Src>
 struct DstRangeRelationToSrcRangeImpl<Dst,
                                       Src,
-                                      Bounds,
                                       INTEGER_REPRESENTATION_SIGNED,
                                       INTEGER_REPRESENTATION_SIGNED,
                                       NUMERIC_RANGE_NOT_CONTAINED> {
-  static constexpr RangeCheck Check(Src value) {
-    using DstLimits = NarrowingRange<Dst, Src, Bounds>;
-    return RangeCheck(value >= DstLimits::lowest(), value <= DstLimits::max());
+  static constexpr RangeConstraint Check(Src value) {
+    return GetRangeConstraint((value <= NarrowingRange<Dst, Src>::max()),
+                              (value >= NarrowingRange<Dst, Src>::min()));
   }
 };
 
-// Unsigned to unsigned narrowing: Only the upper bound can be exceeded for
-// standard limits.
-template <typename Dst, typename Src, template <typename> class Bounds>
+// Unsigned to unsigned narrowing: Only the upper boundary can be exceeded.
+template <typename Dst, typename Src>
 struct DstRangeRelationToSrcRangeImpl<Dst,
                                       Src,
-                                      Bounds,
                                       INTEGER_REPRESENTATION_UNSIGNED,
                                       INTEGER_REPRESENTATION_UNSIGNED,
                                       NUMERIC_RANGE_NOT_CONTAINED> {
-  static constexpr RangeCheck Check(Src value) {
-    using DstLimits = NarrowingRange<Dst, Src, Bounds>;
-    return RangeCheck(
-        DstLimits::lowest() == Dst(0) || value >= DstLimits::lowest(),
-        value <= DstLimits::max());
+  static constexpr RangeConstraint Check(Src value) {
+    return GetRangeConstraint(value <= NarrowingRange<Dst, Src>::max(), true);
   }
 };
 
-// Unsigned to signed: Only the upper bound can be exceeded for standard limits.
-template <typename Dst, typename Src, template <typename> class Bounds>
+// Unsigned to signed: The upper boundary may be exceeded.
+template <typename Dst, typename Src>
 struct DstRangeRelationToSrcRangeImpl<Dst,
                                       Src,
-                                      Bounds,
                                       INTEGER_REPRESENTATION_SIGNED,
                                       INTEGER_REPRESENTATION_UNSIGNED,
                                       NUMERIC_RANGE_NOT_CONTAINED> {
-  static constexpr RangeCheck Check(Src value) {
-    using DstLimits = NarrowingRange<Dst, Src, Bounds>;
-    using Promotion = decltype(Src() + Dst());
-    return RangeCheck(DstLimits::lowest() <= Dst(0) ||
-                          static_cast<Promotion>(value) >=
-                              static_cast<Promotion>(DstLimits::lowest()),
-                      static_cast<Promotion>(value) <=
-                          static_cast<Promotion>(DstLimits::max()));
+  static constexpr RangeConstraint Check(Src value) {
+    return sizeof(Dst) > sizeof(Src)
+               ? RANGE_VALID
+               : GetRangeConstraint(
+                     value <= static_cast<Src>(NarrowingRange<Dst, Src>::max()),
+                     true);
   }
 };
 
 // Signed to unsigned: The upper boundary may be exceeded for a narrower Dst,
-// and any negative value exceeds the lower boundary for standard limits.
-template <typename Dst, typename Src, template <typename> class Bounds>
+// and any negative value exceeds the lower boundary.
+template <typename Dst, typename Src>
 struct DstRangeRelationToSrcRangeImpl<Dst,
                                       Src,
-                                      Bounds,
                                       INTEGER_REPRESENTATION_UNSIGNED,
                                       INTEGER_REPRESENTATION_SIGNED,
                                       NUMERIC_RANGE_NOT_CONTAINED> {
-  static constexpr RangeCheck Check(Src value) {
-    using SrcLimits = std::numeric_limits<Src>;
-    using DstLimits = NarrowingRange<Dst, Src, Bounds>;
-    using Promotion = decltype(Src() + Dst());
-    return RangeCheck(
-        value >= Src(0) && (DstLimits::lowest() == 0 ||
-                            static_cast<Dst>(value) >= DstLimits::lowest()),
-        static_cast<Promotion>(SrcLimits::max()) <=
-                static_cast<Promotion>(DstLimits::max()) ||
-            static_cast<Promotion>(value) <=
-                static_cast<Promotion>(DstLimits::max()));
+  static constexpr RangeConstraint Check(Src value) {
+    return (MaxExponent<Dst>::value >= MaxExponent<Src>::value)
+               ? GetRangeConstraint(true, value >= static_cast<Src>(0))
+               : GetRangeConstraint(
+                     value <= static_cast<Src>(NarrowingRange<Dst, Src>::max()),
+                     value >= static_cast<Src>(0));
   }
 };
 
-template <typename Dst,
-          template <typename> class Bounds = std::numeric_limits,
-          typename Src>
-constexpr RangeCheck DstRangeRelationToSrcRange(Src value) {
-  static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
-  static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
-  static_assert(Bounds<Dst>::lowest() < Bounds<Dst>::max(), "");
-  return DstRangeRelationToSrcRangeImpl<Dst, Src, Bounds>::Check(value);
+template <typename Dst, typename Src>
+constexpr RangeConstraint DstRangeRelationToSrcRange(Src value) {
+  static_assert(std::numeric_limits<Src>::is_specialized,
+                "Argument must be numeric.");
+  static_assert(std::numeric_limits<Dst>::is_specialized,
+                "Result must be numeric.");
+  return DstRangeRelationToSrcRangeImpl<Dst, Src>::Check(value);
 }
 
-// Integer promotion templates used by the portable checked integer arithmetic.
-template <size_t Size, bool IsSigned>
-struct IntegerForDigitsAndSign;
-
-#define INTEGER_FOR_DIGITS_AND_SIGN(I)                          \
-  template <>                                                   \
-  struct IntegerForDigitsAndSign<IntegerBitsPlusSign<I>::value, \
-                                 std::is_signed<I>::value> {    \
-    using type = I;                                             \
-  }
-
-INTEGER_FOR_DIGITS_AND_SIGN(int8_t);
-INTEGER_FOR_DIGITS_AND_SIGN(uint8_t);
-INTEGER_FOR_DIGITS_AND_SIGN(int16_t);
-INTEGER_FOR_DIGITS_AND_SIGN(uint16_t);
-INTEGER_FOR_DIGITS_AND_SIGN(int32_t);
-INTEGER_FOR_DIGITS_AND_SIGN(uint32_t);
-INTEGER_FOR_DIGITS_AND_SIGN(int64_t);
-INTEGER_FOR_DIGITS_AND_SIGN(uint64_t);
-#undef INTEGER_FOR_DIGITS_AND_SIGN
-
-// WARNING: We have no IntegerForSizeAndSign<16, *>. If we ever add one to
-// support 128-bit math, then the ArithmeticPromotion template below will need
-// to be updated (or more likely replaced with a decltype expression).
-static_assert(IntegerBitsPlusSign<intmax_t>::value == 64,
-              "Max integer size not supported for this toolchain.");
-
-template <typename Integer, bool IsSigned = std::is_signed<Integer>::value>
-struct TwiceWiderInteger {
-  using type =
-      typename IntegerForDigitsAndSign<IntegerBitsPlusSign<Integer>::value * 2,
-                                       IsSigned>::type;
-};
-
-enum ArithmeticPromotionCategory {
-  LEFT_PROMOTION,  // Use the type of the left-hand argument.
-  RIGHT_PROMOTION  // Use the type of the right-hand argument.
-};
-
-// Determines the type that can represent the largest positive value.
-template <typename Lhs,
-          typename Rhs,
-          ArithmeticPromotionCategory Promotion =
-              (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value)
-                  ? LEFT_PROMOTION
-                  : RIGHT_PROMOTION>
-struct MaxExponentPromotion;
-
-template <typename Lhs, typename Rhs>
-struct MaxExponentPromotion<Lhs, Rhs, LEFT_PROMOTION> {
-  using type = Lhs;
-};
-
-template <typename Lhs, typename Rhs>
-struct MaxExponentPromotion<Lhs, Rhs, RIGHT_PROMOTION> {
-  using type = Rhs;
-};
-
-// Determines the type that can represent the lowest arithmetic value.
-template <typename Lhs,
-          typename Rhs,
-          ArithmeticPromotionCategory Promotion =
-              std::is_signed<Lhs>::value
-                  ? (std::is_signed<Rhs>::value
-                         ? (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value
-                                ? LEFT_PROMOTION
-                                : RIGHT_PROMOTION)
-                         : LEFT_PROMOTION)
-                  : (std::is_signed<Rhs>::value
-                         ? RIGHT_PROMOTION
-                         : (MaxExponent<Lhs>::value < MaxExponent<Rhs>::value
-                                ? LEFT_PROMOTION
-                                : RIGHT_PROMOTION))>
-struct LowestValuePromotion;
-
-template <typename Lhs, typename Rhs>
-struct LowestValuePromotion<Lhs, Rhs, LEFT_PROMOTION> {
-  using type = Lhs;
-};
-
-template <typename Lhs, typename Rhs>
-struct LowestValuePromotion<Lhs, Rhs, RIGHT_PROMOTION> {
-  using type = Rhs;
-};
-
-// Determines the type that is best able to represent an arithmetic result.
-template <
-    typename Lhs,
-    typename Rhs = Lhs,
-    bool is_intmax_type =
-        std::is_integral<typename MaxExponentPromotion<Lhs, Rhs>::type>::value&&
-            IntegerBitsPlusSign<typename MaxExponentPromotion<Lhs, Rhs>::type>::
-                value == IntegerBitsPlusSign<intmax_t>::value,
-    bool is_max_exponent =
-        StaticDstRangeRelationToSrcRange<
-            typename MaxExponentPromotion<Lhs, Rhs>::type,
-            Lhs>::value ==
-        NUMERIC_RANGE_CONTAINED&& StaticDstRangeRelationToSrcRange<
-            typename MaxExponentPromotion<Lhs, Rhs>::type,
-            Rhs>::value == NUMERIC_RANGE_CONTAINED>
-struct BigEnoughPromotion;
-
-// The side with the max exponent is big enough.
-template <typename Lhs, typename Rhs, bool is_intmax_type>
-struct BigEnoughPromotion<Lhs, Rhs, is_intmax_type, true> {
-  using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
-  static const bool is_contained = true;
-};
-
-// We can use a twice wider type to fit.
-template <typename Lhs, typename Rhs>
-struct BigEnoughPromotion<Lhs, Rhs, false, false> {
-  using type =
-      typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
-                                 std::is_signed<Lhs>::value ||
-                                     std::is_signed<Rhs>::value>::type;
-  static const bool is_contained = true;
-};
-
-// No type is large enough.
-template <typename Lhs, typename Rhs>
-struct BigEnoughPromotion<Lhs, Rhs, true, false> {
-  using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
-  static const bool is_contained = false;
-};
-
-// We can statically check if operations on the provided types can wrap, so we
-// can skip the checked operations if they're not needed. So, for an integer we
-// care if the destination type preserves the sign and is twice the width of
-// the source.
-template <typename T, typename Lhs, typename Rhs = Lhs>
-struct IsIntegerArithmeticSafe {
-  static const bool value =
-      !std::is_floating_point<T>::value &&
-      !std::is_floating_point<Lhs>::value &&
-      !std::is_floating_point<Rhs>::value &&
-      std::is_signed<T>::value >= std::is_signed<Lhs>::value &&
-      IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Lhs>::value) &&
-      std::is_signed<T>::value >= std::is_signed<Rhs>::value &&
-      IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Rhs>::value);
-};
-
-// Promotes to a type that can represent any possible result of a binary
-// arithmetic operation with the source types.
-template <typename Lhs,
-          typename Rhs,
-          bool is_promotion_possible = IsIntegerArithmeticSafe<
-              typename std::conditional<std::is_signed<Lhs>::value ||
-                                            std::is_signed<Rhs>::value,
-                                        intmax_t,
-                                        uintmax_t>::type,
-              typename MaxExponentPromotion<Lhs, Rhs>::type>::value>
-struct FastIntegerArithmeticPromotion;
-
-template <typename Lhs, typename Rhs>
-struct FastIntegerArithmeticPromotion<Lhs, Rhs, true> {
-  using type =
-      typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
-                                 std::is_signed<Lhs>::value ||
-                                     std::is_signed<Rhs>::value>::type;
-  static_assert(IsIntegerArithmeticSafe<type, Lhs, Rhs>::value, "");
-  static const bool is_contained = true;
-};
-
-template <typename Lhs, typename Rhs>
-struct FastIntegerArithmeticPromotion<Lhs, Rhs, false> {
-  using type = typename BigEnoughPromotion<Lhs, Rhs>::type;
-  static const bool is_contained = false;
-};
-
-// This hacks around libstdc++ 4.6 missing stuff in type_traits.
-#if defined(__GLIBCXX__)
-#define PRIV_GLIBCXX_4_7_0 20120322
-#define PRIV_GLIBCXX_4_5_4 20120702
-#define PRIV_GLIBCXX_4_6_4 20121127
-#if (__GLIBCXX__ < PRIV_GLIBCXX_4_7_0 || __GLIBCXX__ == PRIV_GLIBCXX_4_5_4 || \
-     __GLIBCXX__ == PRIV_GLIBCXX_4_6_4)
-#define PRIV_USE_FALLBACKS_FOR_OLD_GLIBCXX
-#undef PRIV_GLIBCXX_4_7_0
-#undef PRIV_GLIBCXX_4_5_4
-#undef PRIV_GLIBCXX_4_6_4
-#endif
-#endif
-
-// Extracts the underlying type from an enum.
-template <typename T, bool is_enum = std::is_enum<T>::value>
-struct ArithmeticOrUnderlyingEnum;
-
-template <typename T>
-struct ArithmeticOrUnderlyingEnum<T, true> {
-#if defined(PRIV_USE_FALLBACKS_FOR_OLD_GLIBCXX)
-  using type = __underlying_type(T);
-#else
-  using type = typename std::underlying_type<T>::type;
-#endif
-  static const bool value = std::is_arithmetic<type>::value;
-};
-
-#if defined(PRIV_USE_FALLBACKS_FOR_OLD_GLIBCXX)
-#undef PRIV_USE_FALLBACKS_FOR_OLD_GLIBCXX
-#endif
-
-template <typename T>
-struct ArithmeticOrUnderlyingEnum<T, false> {
-  using type = T;
-  static const bool value = std::is_arithmetic<type>::value;
-};
-
-// The following are helper templates used in the CheckedNumeric class.
-template <typename T>
-class CheckedNumeric;
-
-template <typename T>
-class StrictNumeric;
-
-// Used to treat CheckedNumeric and arithmetic underlying types the same.
-template <typename T>
-struct UnderlyingType {
-  using type = typename ArithmeticOrUnderlyingEnum<T>::type;
-  static const bool is_numeric = std::is_arithmetic<type>::value;
-  static const bool is_checked = false;
-  static const bool is_strict = false;
-};
-
-template <typename T>
-struct UnderlyingType<CheckedNumeric<T>> {
-  using type = T;
-  static const bool is_numeric = true;
-  static const bool is_checked = true;
-  static const bool is_strict = false;
-};
-
-template <typename T>
-struct UnderlyingType<StrictNumeric<T>> {
-  using type = T;
-  static const bool is_numeric = true;
-  static const bool is_checked = false;
-  static const bool is_strict = true;
-};
-
-template <typename L, typename R>
-struct IsCheckedOp {
-  static const bool value =
-      UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
-      (UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked);
-};
-
-template <typename L, typename R>
-struct IsStrictOp {
-  static const bool value =
-      UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
-      (UnderlyingType<L>::is_strict || UnderlyingType<R>::is_strict);
-};
-
-template <typename L, typename R>
-constexpr bool IsLessImpl(const L lhs,
-                          const R rhs,
-                          const RangeCheck l_range,
-                          const RangeCheck r_range) {
-  return l_range.IsUnderflow() || r_range.IsOverflow() ||
-         (l_range == r_range &&
-          static_cast<decltype(lhs + rhs)>(lhs) <
-              static_cast<decltype(lhs + rhs)>(rhs));
-}
-
-template <typename L, typename R>
-struct IsLess {
-  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
-                "Types must be numeric.");
-  static constexpr bool Test(const L lhs, const R rhs) {
-    return IsLessImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
-                      DstRangeRelationToSrcRange<L>(rhs));
-  }
-};
-
-template <typename L, typename R>
-constexpr bool IsLessOrEqualImpl(const L lhs,
-                                 const R rhs,
-                                 const RangeCheck l_range,
-                                 const RangeCheck r_range) {
-  return l_range.IsUnderflow() || r_range.IsOverflow() ||
-         (l_range == r_range &&
-          static_cast<decltype(lhs + rhs)>(lhs) <=
-              static_cast<decltype(lhs + rhs)>(rhs));
-}
-
-template <typename L, typename R>
-struct IsLessOrEqual {
-  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
-                "Types must be numeric.");
-  static constexpr bool Test(const L lhs, const R rhs) {
-    return IsLessOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
-                             DstRangeRelationToSrcRange<L>(rhs));
-  }
-};
-
-template <typename L, typename R>
-constexpr bool IsGreaterImpl(const L lhs,
-                             const R rhs,
-                             const RangeCheck l_range,
-                             const RangeCheck r_range) {
-  return l_range.IsOverflow() || r_range.IsUnderflow() ||
-         (l_range == r_range &&
-          static_cast<decltype(lhs + rhs)>(lhs) >
-              static_cast<decltype(lhs + rhs)>(rhs));
-}
-
-template <typename L, typename R>
-struct IsGreater {
-  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
-                "Types must be numeric.");
-  static constexpr bool Test(const L lhs, const R rhs) {
-    return IsGreaterImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
-                         DstRangeRelationToSrcRange<L>(rhs));
-  }
-};
-
-template <typename L, typename R>
-constexpr bool IsGreaterOrEqualImpl(const L lhs,
-                                    const R rhs,
-                                    const RangeCheck l_range,
-                                    const RangeCheck r_range) {
-  return l_range.IsOverflow() || r_range.IsUnderflow() ||
-         (l_range == r_range &&
-          static_cast<decltype(lhs + rhs)>(lhs) >=
-              static_cast<decltype(lhs + rhs)>(rhs));
-}
-
-template <typename L, typename R>
-struct IsGreaterOrEqual {
-  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
-                "Types must be numeric.");
-  static constexpr bool Test(const L lhs, const R rhs) {
-    return IsGreaterOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
-                                DstRangeRelationToSrcRange<L>(rhs));
-  }
-};
-
-template <typename L, typename R>
-struct IsEqual {
-  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
-                "Types must be numeric.");
-  static constexpr bool Test(const L lhs, const R rhs) {
-    return DstRangeRelationToSrcRange<R>(lhs) ==
-               DstRangeRelationToSrcRange<L>(rhs) &&
-           static_cast<decltype(lhs + rhs)>(lhs) ==
-               static_cast<decltype(lhs + rhs)>(rhs);
-  }
-};
-
-template <typename L, typename R>
-struct IsNotEqual {
-  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
-                "Types must be numeric.");
-  static constexpr bool Test(const L lhs, const R rhs) {
-    return DstRangeRelationToSrcRange<R>(lhs) !=
-               DstRangeRelationToSrcRange<L>(rhs) ||
-           static_cast<decltype(lhs + rhs)>(lhs) !=
-               static_cast<decltype(lhs + rhs)>(rhs);
-  }
-};
-
-// These perform the actual math operations on the CheckedNumerics.
-// Binary arithmetic operations.
-template <template <typename, typename> class C, typename L, typename R>
-constexpr bool SafeCompare(const L lhs, const R rhs) {
-  static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
-                "Types must be numeric.");
-  using Promotion = BigEnoughPromotion<L, R>;
-  using BigType = typename Promotion::type;
-  return Promotion::is_contained
-             // Force to a larger type for speed if both are contained.
-             ? C<BigType, BigType>::Test(
-                   static_cast<BigType>(static_cast<L>(lhs)),
-                   static_cast<BigType>(static_cast<R>(rhs)))
-             // Let the template functions figure it out for mixed types.
-             : C<L, R>::Test(lhs, rhs);
-};
-
 }  // namespace internal
 }  // namespace base
 
diff --git a/base/numerics/safe_math.h b/base/numerics/safe_math.h
index f5007db..d0003b7 100644
--- a/base/numerics/safe_math.h
+++ b/base/numerics/safe_math.h
@@ -10,259 +10,155 @@
 #include <limits>
 #include <type_traits>
 
+#include "base/logging.h"
 #include "base/numerics/safe_math_impl.h"
 
 namespace base {
+
 namespace internal {
 
-// CheckedNumeric<> implements all the logic and operators for detecting integer
+// CheckedNumeric implements all the logic and operators for detecting integer
 // boundary conditions such as overflow, underflow, and invalid conversions.
 // The CheckedNumeric type implicitly converts from floating point and integer
 // data types, and contains overloads for basic arithmetic operations (i.e.: +,
-// -, *, / for all types and %, <<, >>, &, |, ^ for integers). Type promotions
-// are a slightly modified version of the standard C arithmetic rules with the
-// two differences being that there is no default promotion to int and bitwise
-// logical operations always return an unsigned of the wider type.
-//
-// You may also use one of the variadic convenience functions, which accept
-// standard arithmetic or CheckedNumeric types, perform arithmetic operations,
-// and return a CheckedNumeric result. The supported functions are:
-//  CheckAdd() - Addition.
-//  CheckSub() - Subtraction.
-//  CheckMul() - Multiplication.
-//  CheckDiv() - Division.
-//  CheckMod() - Modulous (integer only).
-//  CheckLsh() - Left integer shift (integer only).
-//  CheckRsh() - Right integer shift (integer only).
-//  CheckAnd() - Bitwise AND (integer only with unsigned result).
-//  CheckOr()  - Bitwise OR (integer only with unsigned result).
-//  CheckXor() - Bitwise XOR (integer only with unsigned result).
-//  CheckMax() - Maximum of supplied arguments.
-//  CheckMin() - Minimum of supplied arguments.
-//
-// The unary negation, increment, and decrement operators are supported, along
-// with the following unary arithmetic methods, which return a new
-// CheckedNumeric as a result of the operation:
-//  Abs() - Absolute value.
-//  UnsignedAbs() - Absolute value as an equal-width unsigned underlying type
-//          (valid for only integral types).
-//  Max() - Returns whichever is greater of the current instance or argument.
-//          The underlying return type is whichever has the greatest magnitude.
-//  Min() - Returns whichever is lowest of the current instance or argument.
-//          The underlying return type is whichever has can represent the lowest
-//          number in the smallest width (e.g. int8_t over unsigned, int over
-//          int8_t, and float over int).
+// -, *, /, %).
 //
 // The following methods convert from CheckedNumeric to standard numeric values:
-//  AssignIfValid() - Assigns the underlying value to the supplied destination
-//          pointer if the value is currently valid and within the range
-//          supported by the destination type. Returns true on success.
-//  ****************************************************************************
-//  *  WARNING: All of the following functions return a StrictNumeric, which   *
-//  *  is valid for comparison and assignment operations, but will trigger a   *
-//  *  compile failure on attempts to assign to a type of insufficient range.  *
-//  ****************************************************************************
-//  IsValid() - Returns true if the underlying numeric value is valid (i.e. has
-//          has not wrapped and is not the result of an invalid conversion).
-//  ValueOrDie() - Returns the underlying value. If the state is not valid this
-//          call will crash on a CHECK.
-//  ValueOrDefault() - Returns the current value, or the supplied default if the
-//          state is not valid (will not trigger a CHECK).
+// IsValid() - Returns true if the underlying numeric value is valid (i.e. has
+//             has not wrapped and is not the result of an invalid conversion).
+// ValueOrDie() - Returns the underlying value. If the state is not valid this
+//                call will crash on a CHECK.
+// ValueOrDefault() - Returns the current value, or the supplied default if the
+//                    state is not valid.
+// ValueFloating() - Returns the underlying floating point value (valid only
+//                   only for floating point CheckedNumeric types).
 //
-// The following wrapper functions can be used to avoid the template
-// disambiguator syntax when converting a destination type.
-//   IsValidForType<>() in place of: a.template IsValid<Dst>()
-//   ValueOrDieForType<>() in place of: a.template ValueOrDie()
-//   ValueOrDefaultForType<>() in place of: a.template ValueOrDefault(default)
-//
-// The following are general utility methods that are useful for converting
-// between arithmetic types and CheckedNumeric types:
-//  CheckedNumeric::Cast<Dst>() - Instance method returning a CheckedNumeric
-//          derived from casting the current instance to a CheckedNumeric of
-//          the supplied destination type.
-//  MakeCheckedNum() - Creates a new CheckedNumeric from the underlying type of
-//          the supplied arithmetic, CheckedNumeric, or StrictNumeric type.
-//
-// Comparison operations are explicitly not supported because they could result
-// in a crash on an unexpected CHECK condition. You should use patterns like the
-// following for comparisons:
+// Bitwise operations are explicitly not supported, because correct
+// handling of some cases (e.g. sign manipulation) is ambiguous. Comparison
+// operations are explicitly not supported because they could result in a crash
+// on a CHECK condition. You should use patterns like the following for these
+// operations:
+// Bitwise operation:
+//     CheckedNumeric<int> checked_int = untrusted_input_value;
+//     int x = checked_int.ValueOrDefault(0) | kFlagValues;
+// Comparison:
 //   CheckedNumeric<size_t> checked_size = untrusted_input_value;
 //   checked_size += HEADER LENGTH;
 //   if (checked_size.IsValid() && checked_size.ValueOrDie() < buffer_size)
 //     Do stuff...
-
 template <typename T>
 class CheckedNumeric {
   static_assert(std::is_arithmetic<T>::value,
                 "CheckedNumeric<T>: T must be a numeric type.");
 
  public:
-  using type = T;
+  typedef T type;
 
-  constexpr CheckedNumeric() {}
+  CheckedNumeric() {}
 
   // Copy constructor.
   template <typename Src>
-  constexpr CheckedNumeric(const CheckedNumeric<Src>& rhs)
-      : state_(rhs.state_.value(), rhs.IsValid()) {}
+  CheckedNumeric(const CheckedNumeric<Src>& rhs)
+      : state_(rhs.ValueUnsafe(), rhs.validity()) {}
 
   template <typename Src>
-  friend class CheckedNumeric;
+  CheckedNumeric(Src value, RangeConstraint validity)
+      : state_(value, validity) {}
 
   // This is not an explicit constructor because we implicitly upgrade regular
   // numerics to CheckedNumerics to make them easier to use.
   template <typename Src>
-  constexpr CheckedNumeric(Src value)  // NOLINT(runtime/explicit)
+  CheckedNumeric(Src value)  // NOLINT(runtime/explicit)
       : state_(value) {
-    static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
+    static_assert(std::numeric_limits<Src>::is_specialized,
+                  "Argument must be numeric.");
   }
 
   // This is not an explicit constructor because we want a seamless conversion
   // from StrictNumeric types.
   template <typename Src>
-  constexpr CheckedNumeric(
-      StrictNumeric<Src> value)  // NOLINT(runtime/explicit)
-      : state_(static_cast<Src>(value)) {}
-
-  // IsValid() - The public API to test if a CheckedNumeric is currently valid.
-  // A range checked destination type can be supplied using the Dst template
-  // parameter.
-  template <typename Dst = T>
-  constexpr bool IsValid() const {
-    return state_.is_valid() &&
-           IsValueInRangeForNumericType<Dst>(state_.value());
+  CheckedNumeric(StrictNumeric<Src> value)  // NOLINT(runtime/explicit)
+      : state_(static_cast<Src>(value)) {
   }
 
-  // AssignIfValid(Dst) - Assigns the underlying value if it is currently valid
-  // and is within the range supported by the destination type. Returns true if
-  // successful and false otherwise.
-  template <typename Dst>
-  constexpr bool AssignIfValid(Dst* result) const {
-    return IsValid<Dst>() ? ((*result = static_cast<Dst>(state_.value())), true)
-                          : false;
+  // IsValid() is the public API to test if a CheckedNumeric is currently valid.
+  bool IsValid() const { return validity() == RANGE_VALID; }
+
+  // ValueOrDie() The primary accessor for the underlying value. If the current
+  // state is not valid it will CHECK and crash.
+  T ValueOrDie() const {
+    CHECK(IsValid());
+    return state_.value();
   }
 
-  // ValueOrDie() - The primary accessor for the underlying value. If the
-  // current state is not valid it will CHECK and crash.
-  // A range checked destination type can be supplied using the Dst template
-  // parameter, which will trigger a CHECK if the value is not in bounds for
-  // the destination.
-  // The CHECK behavior can be overridden by supplying a handler as a
-  // template parameter, for test code, etc. However, the handler cannot access
-  // the underlying value, and it is not available through other means.
-  template <typename Dst = T, class CheckHandler = CheckOnFailure>
-  constexpr StrictNumeric<Dst> ValueOrDie() const {
-    return IsValid<Dst>() ? static_cast<Dst>(state_.value())
-                          : CheckHandler::template HandleFailure<Dst>();
-  }
-
-  // ValueOrDefault(T default_value) - A convenience method that returns the
+  // ValueOrDefault(T default_value) A convenience method that returns the
   // current value if the state is valid, and the supplied default_value for
   // any other state.
-  // A range checked destination type can be supplied using the Dst template
-  // parameter. WARNING: This function may fail to compile or CHECK at runtime
-  // if the supplied default_value is not within range of the destination type.
-  template <typename Dst = T, typename Src>
-  constexpr StrictNumeric<Dst> ValueOrDefault(const Src default_value) const {
-    return IsValid<Dst>() ? static_cast<Dst>(state_.value())
-                          : checked_cast<Dst>(default_value);
+  T ValueOrDefault(T default_value) const {
+    return IsValid() ? state_.value() : default_value;
   }
 
-  // Returns a checked numeric of the specified type, cast from the current
-  // CheckedNumeric. If the current state is invalid or the destination cannot
-  // represent the result then the returned CheckedNumeric will be invalid.
-  template <typename Dst>
-  constexpr CheckedNumeric<typename UnderlyingType<Dst>::type> Cast() const {
-    return *this;
+  // ValueFloating() - Since floating point values include their validity state,
+  // we provide an easy method for extracting them directly, without a risk of
+  // crashing on a CHECK.
+  T ValueFloating() const {
+    static_assert(std::numeric_limits<T>::is_iec559, "Argument must be float.");
+    return CheckedNumeric<T>::cast(*this).ValueUnsafe();
   }
 
-  // This friend method is available solely for providing more detailed logging
-  // in the the tests. Do not implement it in production code, because the
-  // underlying values may change at any time.
-  template <typename U>
-  friend U GetNumericValueForTest(const CheckedNumeric<U>& src);
+  // validity() - DO NOT USE THIS IN EXTERNAL CODE - It is public right now for
+  // tests and to avoid a big matrix of friend operator overloads. But the
+  // values it returns are likely to change in the future.
+  // Returns: current validity state (i.e. valid, overflow, underflow, nan).
+  // TODO(jschuh): crbug.com/332611 Figure out and implement semantics for
+  // saturation/wrapping so we can expose this state consistently and implement
+  // saturated arithmetic.
+  RangeConstraint validity() const { return state_.validity(); }
+
+  // ValueUnsafe() - DO NOT USE THIS IN EXTERNAL CODE - It is public right now
+  // for tests and to avoid a big matrix of friend operator overloads. But the
+  // values it returns are likely to change in the future.
+  // Returns: the raw numeric value, regardless of the current state.
+  // TODO(jschuh): crbug.com/332611 Figure out and implement semantics for
+  // saturation/wrapping so we can expose this state consistently and implement
+  // saturated arithmetic.
+  T ValueUnsafe() const { return state_.value(); }
 
   // Prototypes for the supported arithmetic operator overloads.
-  template <typename Src>
-  CheckedNumeric& operator+=(const Src rhs);
-  template <typename Src>
-  CheckedNumeric& operator-=(const Src rhs);
-  template <typename Src>
-  CheckedNumeric& operator*=(const Src rhs);
-  template <typename Src>
-  CheckedNumeric& operator/=(const Src rhs);
-  template <typename Src>
-  CheckedNumeric& operator%=(const Src rhs);
-  template <typename Src>
-  CheckedNumeric& operator<<=(const Src rhs);
-  template <typename Src>
-  CheckedNumeric& operator>>=(const Src rhs);
-  template <typename Src>
-  CheckedNumeric& operator&=(const Src rhs);
-  template <typename Src>
-  CheckedNumeric& operator|=(const Src rhs);
-  template <typename Src>
-  CheckedNumeric& operator^=(const Src rhs);
+  template <typename Src> CheckedNumeric& operator+=(Src rhs);
+  template <typename Src> CheckedNumeric& operator-=(Src rhs);
+  template <typename Src> CheckedNumeric& operator*=(Src rhs);
+  template <typename Src> CheckedNumeric& operator/=(Src rhs);
+  template <typename Src> CheckedNumeric& operator%=(Src rhs);
 
-  constexpr CheckedNumeric operator-() const {
-    return CheckedNumeric<T>(
-        NegateWrapper(state_.value()),
-        IsValid() &&
-            (!std::is_signed<T>::value || std::is_floating_point<T>::value ||
-             NegateWrapper(state_.value()) !=
-                 std::numeric_limits<T>::lowest()));
+  CheckedNumeric operator-() const {
+    RangeConstraint validity;
+    T value = CheckedNeg(state_.value(), &validity);
+    // Negation is always valid for floating point.
+    if (std::numeric_limits<T>::is_iec559)
+      return CheckedNumeric<T>(value);
+
+    validity = GetRangeConstraint(state_.validity() | validity);
+    return CheckedNumeric<T>(value, validity);
   }
 
-  constexpr CheckedNumeric operator~() const {
-    return CheckedNumeric<decltype(InvertWrapper(T()))>(
-        InvertWrapper(state_.value()), IsValid());
-  }
+  CheckedNumeric Abs() const {
+    RangeConstraint validity;
+    T value = CheckedAbs(state_.value(), &validity);
+    // Absolute value is always valid for floating point.
+    if (std::numeric_limits<T>::is_iec559)
+      return CheckedNumeric<T>(value);
 
-  constexpr CheckedNumeric Abs() const {
-    return CheckedNumeric<T>(
-        AbsWrapper(state_.value()),
-        IsValid() &&
-            (!std::is_signed<T>::value || std::is_floating_point<T>::value ||
-             AbsWrapper(state_.value()) != std::numeric_limits<T>::lowest()));
-  }
-
-  template <typename U>
-  constexpr CheckedNumeric<typename MathWrapper<CheckedMaxOp, T, U>::type> Max(
-      const U rhs) const {
-    using R = typename UnderlyingType<U>::type;
-    using result_type = typename MathWrapper<CheckedMaxOp, T, U>::type;
-    // TODO(jschuh): This can be converted to the MathOp version and remain
-    // constexpr once we have C++14 support.
-    return CheckedNumeric<result_type>(
-        static_cast<result_type>(
-            IsGreater<T, R>::Test(state_.value(), Wrapper<U>::value(rhs))
-                ? state_.value()
-                : Wrapper<U>::value(rhs)),
-        state_.is_valid() && Wrapper<U>::is_valid(rhs));
-  }
-
-  template <typename U>
-  constexpr CheckedNumeric<typename MathWrapper<CheckedMinOp, T, U>::type> Min(
-      const U rhs) const {
-    using R = typename UnderlyingType<U>::type;
-    using result_type = typename MathWrapper<CheckedMinOp, T, U>::type;
-    // TODO(jschuh): This can be converted to the MathOp version and remain
-    // constexpr once we have C++14 support.
-    return CheckedNumeric<result_type>(
-        static_cast<result_type>(
-            IsLess<T, R>::Test(state_.value(), Wrapper<U>::value(rhs))
-                ? state_.value()
-                : Wrapper<U>::value(rhs)),
-        state_.is_valid() && Wrapper<U>::is_valid(rhs));
+    validity = GetRangeConstraint(state_.validity() | validity);
+    return CheckedNumeric<T>(value, validity);
   }
 
   // This function is available only for integral types. It returns an unsigned
   // integer of the same width as the source type, containing the absolute value
   // of the source, and properly handling signed min.
-  constexpr CheckedNumeric<typename UnsignedOrFloatForSize<T>::type>
-  UnsignedAbs() const {
+  CheckedNumeric<typename UnsignedOrFloatForSize<T>::type> UnsignedAbs() const {
     return CheckedNumeric<typename UnsignedOrFloatForSize<T>::type>(
-        SafeUnsignedAbs(state_.value()), state_.is_valid());
+        CheckedUnsignedAbs(state_.value()), state_.validity());
   }
 
   CheckedNumeric& operator++() {
@@ -287,221 +183,126 @@
     return value;
   }
 
-  // These perform the actual math operations on the CheckedNumerics.
-  // Binary arithmetic operations.
-  template <template <typename, typename, typename> class M,
-            typename L,
-            typename R>
-  static CheckedNumeric MathOp(const L lhs, const R rhs) {
-    using Math = typename MathWrapper<M, L, R>::math;
-    T result = 0;
-    bool is_valid =
-        Wrapper<L>::is_valid(lhs) && Wrapper<R>::is_valid(rhs) &&
-        Math::Do(Wrapper<L>::value(lhs), Wrapper<R>::value(rhs), &result);
-    return CheckedNumeric<T>(result, is_valid);
-  };
-
-  // Assignment arithmetic operations.
-  template <template <typename, typename, typename> class M, typename R>
-  CheckedNumeric& MathOp(const R rhs) {
-    using Math = typename MathWrapper<M, T, R>::math;
-    T result = 0;  // Using T as the destination saves a range check.
-    bool is_valid = state_.is_valid() && Wrapper<R>::is_valid(rhs) &&
-                    Math::Do(state_.value(), Wrapper<R>::value(rhs), &result);
-    *this = CheckedNumeric<T>(result, is_valid);
-    return *this;
-  };
-
- private:
-  CheckedNumericState<T> state_;
-
+  // These static methods behave like a convenience cast operator targeting
+  // the desired CheckedNumeric type. As an optimization, a reference is
+  // returned when Src is the same type as T.
   template <typename Src>
-  constexpr CheckedNumeric(Src value, bool is_valid)
-      : state_(value, is_valid) {}
-
-  // These wrappers allow us to handle state the same way for both
-  // CheckedNumeric and POD arithmetic types.
-  template <typename Src>
-  struct Wrapper {
-    static constexpr bool is_valid(Src) { return true; }
-    static constexpr Src value(Src value) { return value; }
-  };
-
-  template <typename Src>
-  struct Wrapper<CheckedNumeric<Src>> {
-    static constexpr bool is_valid(const CheckedNumeric<Src> v) {
-      return v.IsValid();
-    }
-    static constexpr Src value(const CheckedNumeric<Src> v) {
-      return v.state_.value();
-    }
-  };
-
-  template <typename Src>
-  struct Wrapper<StrictNumeric<Src>> {
-    static constexpr bool is_valid(const StrictNumeric<Src>) { return true; }
-    static constexpr Src value(const StrictNumeric<Src> v) {
-      return static_cast<Src>(v);
-    }
-  };
-};
-
-// Convenience functions to avoid the ugly template disambiguator syntax.
-template <typename Dst, typename Src>
-constexpr bool IsValidForType(const CheckedNumeric<Src> value) {
-  return value.template IsValid<Dst>();
-}
-
-template <typename Dst, typename Src>
-constexpr StrictNumeric<Dst> ValueOrDieForType(
-    const CheckedNumeric<Src> value) {
-  return value.template ValueOrDie<Dst>();
-}
-
-template <typename Dst, typename Src, typename Default>
-constexpr StrictNumeric<Dst> ValueOrDefaultForType(
-    const CheckedNumeric<Src> value,
-    const Default default_value) {
-  return value.template ValueOrDefault<Dst>(default_value);
-}
-
-// These variadic templates work out the return types.
-// TODO(jschuh): Rip all this out once we have C++14 non-trailing auto support.
-template <template <typename, typename, typename> class M,
-          typename L,
-          typename R,
-          typename... Args>
-struct ResultType;
-
-template <template <typename, typename, typename> class M,
-          typename L,
-          typename R>
-struct ResultType<M, L, R> {
-  using type = typename MathWrapper<M, L, R>::type;
-};
-
-template <template <typename, typename, typename> class M,
-          typename L,
-          typename R,
-          typename... Args>
-struct ResultType {
-  using type =
-      typename ResultType<M, typename ResultType<M, L, R>::type, Args...>::type;
-};
-
-// Convience wrapper to return a new CheckedNumeric from the provided arithmetic
-// or CheckedNumericType.
-template <typename T>
-constexpr CheckedNumeric<typename UnderlyingType<T>::type> MakeCheckedNum(
-    const T value) {
-  return value;
-}
-
-// These implement the variadic wrapper for the math operations.
-template <template <typename, typename, typename> class M,
-          typename L,
-          typename R>
-CheckedNumeric<typename MathWrapper<M, L, R>::type> ChkMathOp(const L lhs,
-                                                              const R rhs) {
-  using Math = typename MathWrapper<M, L, R>::math;
-  return CheckedNumeric<typename Math::result_type>::template MathOp<M>(lhs,
-                                                                        rhs);
-}
-
-// General purpose wrapper template for arithmetic operations.
-template <template <typename, typename, typename> class M,
-          typename L,
-          typename R,
-          typename... Args>
-CheckedNumeric<typename ResultType<M, L, R, Args...>::type>
-ChkMathOp(const L lhs, const R rhs, const Args... args) {
-  auto tmp = ChkMathOp<M>(lhs, rhs);
-  return tmp.IsValid() ? ChkMathOp<M>(tmp, args...)
-                       : decltype(ChkMathOp<M>(tmp, args...))(tmp);
-};
-
-// The following macros are just boilerplate for the standard arithmetic
-// operator overloads and variadic function templates. A macro isn't the nicest
-// solution, but it beats rewriting these over and over again.
-#define BASE_NUMERIC_ARITHMETIC_VARIADIC(NAME)                                \
-  template <typename L, typename R, typename... Args>                         \
-  CheckedNumeric<typename ResultType<Checked##NAME##Op, L, R, Args...>::type> \
-      Check##NAME(const L lhs, const R rhs, const Args... args) {             \
-    return ChkMathOp<Checked##NAME##Op, L, R, Args...>(lhs, rhs, args...);    \
+  static CheckedNumeric<T> cast(
+      Src u,
+      typename std::enable_if<std::numeric_limits<Src>::is_specialized,
+                              int>::type = 0) {
+    return u;
   }
 
-#define BASE_NUMERIC_ARITHMETIC_OPERATORS(NAME, OP, COMPOUND_OP)               \
-  /* Binary arithmetic operator for all CheckedNumeric operations. */          \
-  template <typename L, typename R,                                            \
-            typename std::enable_if<IsCheckedOp<L, R>::value>::type* =         \
-                nullptr>                                                       \
-  CheckedNumeric<typename MathWrapper<Checked##NAME##Op, L, R>::type>          \
-  operator OP(const L lhs, const R rhs) {                                      \
-    return decltype(lhs OP rhs)::template MathOp<Checked##NAME##Op>(lhs, rhs); \
-  }                                                                            \
-  /* Assignment arithmetic operator implementation from CheckedNumeric. */     \
-  template <typename L>                                                        \
-  template <typename R>                                                        \
-  CheckedNumeric<L>& CheckedNumeric<L>::operator COMPOUND_OP(const R rhs) {    \
-    return MathOp<Checked##NAME##Op>(rhs);                                     \
-  }                                                                            \
-  /* Variadic arithmetic functions that return CheckedNumeric. */              \
-  BASE_NUMERIC_ARITHMETIC_VARIADIC(NAME)
+  template <typename Src>
+  static CheckedNumeric<T> cast(
+      const CheckedNumeric<Src>& u,
+      typename std::enable_if<!std::is_same<Src, T>::value, int>::type = 0) {
+    return u;
+  }
 
-BASE_NUMERIC_ARITHMETIC_OPERATORS(Add, +, +=)
-BASE_NUMERIC_ARITHMETIC_OPERATORS(Sub, -, -=)
-BASE_NUMERIC_ARITHMETIC_OPERATORS(Mul, *, *=)
-BASE_NUMERIC_ARITHMETIC_OPERATORS(Div, /, /=)
-BASE_NUMERIC_ARITHMETIC_OPERATORS(Mod, %, %=)
-BASE_NUMERIC_ARITHMETIC_OPERATORS(Lsh, <<, <<=)
-BASE_NUMERIC_ARITHMETIC_OPERATORS(Rsh, >>, >>=)
-BASE_NUMERIC_ARITHMETIC_OPERATORS(And, &, &=)
-BASE_NUMERIC_ARITHMETIC_OPERATORS(Or, |, |=)
-BASE_NUMERIC_ARITHMETIC_OPERATORS(Xor, ^, ^=)
-BASE_NUMERIC_ARITHMETIC_VARIADIC(Max)
-BASE_NUMERIC_ARITHMETIC_VARIADIC(Min)
+  static const CheckedNumeric<T>& cast(const CheckedNumeric<T>& u) { return u; }
 
-#undef BASE_NUMERIC_ARITHMETIC_VARIADIC
+ private:
+  template <typename NumericType>
+  struct UnderlyingType {
+    using type = NumericType;
+  };
+
+  template <typename NumericType>
+  struct UnderlyingType<CheckedNumeric<NumericType>> {
+    using type = NumericType;
+  };
+
+  CheckedNumericState<T> state_;
+};
+
+// This is the boilerplate for the standard arithmetic operator overloads. A
+// macro isn't the prettiest solution, but it beats rewriting these five times.
+// Some details worth noting are:
+//  * We apply the standard arithmetic promotions.
+//  * We skip range checks for floating points.
+//  * We skip range checks for destination integers with sufficient range.
+// TODO(jschuh): extract these out into templates.
+#define BASE_NUMERIC_ARITHMETIC_OPERATORS(NAME, OP, COMPOUND_OP)              \
+  /* Binary arithmetic operator for CheckedNumerics of the same type. */      \
+  template <typename T>                                                       \
+  CheckedNumeric<typename ArithmeticPromotion<T>::type> operator OP(          \
+      const CheckedNumeric<T>& lhs, const CheckedNumeric<T>& rhs) {           \
+    typedef typename ArithmeticPromotion<T>::type Promotion;                  \
+    /* Floating point always takes the fast path */                           \
+    if (std::numeric_limits<T>::is_iec559)                                    \
+      return CheckedNumeric<T>(lhs.ValueUnsafe() OP rhs.ValueUnsafe());       \
+    if (IsIntegerArithmeticSafe<Promotion, T, T>::value)                      \
+      return CheckedNumeric<Promotion>(                                       \
+          lhs.ValueUnsafe() OP rhs.ValueUnsafe(),                             \
+          GetRangeConstraint(rhs.validity() | lhs.validity()));               \
+    RangeConstraint validity = RANGE_VALID;                                   \
+    T result = static_cast<T>(                                                \
+        Checked##NAME(static_cast<Promotion>(lhs.ValueUnsafe()),              \
+                      static_cast<Promotion>(rhs.ValueUnsafe()), &validity)); \
+    return CheckedNumeric<Promotion>(                                         \
+        result,                                                               \
+        GetRangeConstraint(validity | lhs.validity() | rhs.validity()));      \
+  }                                                                           \
+  /* Assignment arithmetic operator implementation from CheckedNumeric. */    \
+  template <typename T>                                                       \
+  template <typename Src>                                                     \
+  CheckedNumeric<T>& CheckedNumeric<T>::operator COMPOUND_OP(Src rhs) {       \
+    *this = CheckedNumeric<T>::cast(*this)                                    \
+        OP CheckedNumeric<typename UnderlyingType<Src>::type>::cast(rhs);     \
+    return *this;                                                             \
+  }                                                                           \
+  /* Binary arithmetic operator for CheckedNumeric of different type. */      \
+  template <typename T, typename Src>                                         \
+  CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP(     \
+      const CheckedNumeric<Src>& lhs, const CheckedNumeric<T>& rhs) {         \
+    typedef typename ArithmeticPromotion<T, Src>::type Promotion;             \
+    if (IsIntegerArithmeticSafe<Promotion, T, Src>::value)                    \
+      return CheckedNumeric<Promotion>(                                       \
+          lhs.ValueUnsafe() OP rhs.ValueUnsafe(),                             \
+          GetRangeConstraint(rhs.validity() | lhs.validity()));               \
+    return CheckedNumeric<Promotion>::cast(lhs)                               \
+        OP CheckedNumeric<Promotion>::cast(rhs);                              \
+  }                                                                           \
+  /* Binary arithmetic operator for left CheckedNumeric and right numeric. */ \
+  template <typename T, typename Src,                                         \
+            typename std::enable_if<std::is_arithmetic<Src>::value>::type* =  \
+                nullptr>                                                      \
+  CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP(     \
+      const CheckedNumeric<T>& lhs, Src rhs) {                                \
+    typedef typename ArithmeticPromotion<T, Src>::type Promotion;             \
+    if (IsIntegerArithmeticSafe<Promotion, T, Src>::value)                    \
+      return CheckedNumeric<Promotion>(lhs.ValueUnsafe() OP rhs,              \
+                                       lhs.validity());                       \
+    return CheckedNumeric<Promotion>::cast(lhs)                               \
+        OP CheckedNumeric<Promotion>::cast(rhs);                              \
+  }                                                                           \
+  /* Binary arithmetic operator for left numeric and right CheckedNumeric. */ \
+  template <typename T, typename Src,                                         \
+            typename std::enable_if<std::is_arithmetic<Src>::value>::type* =  \
+                nullptr>                                                      \
+  CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP(     \
+      Src lhs, const CheckedNumeric<T>& rhs) {                                \
+    typedef typename ArithmeticPromotion<T, Src>::type Promotion;             \
+    if (IsIntegerArithmeticSafe<Promotion, T, Src>::value)                    \
+      return CheckedNumeric<Promotion>(lhs OP rhs.ValueUnsafe(),              \
+                                       rhs.validity());                       \
+    return CheckedNumeric<Promotion>::cast(lhs)                               \
+        OP CheckedNumeric<Promotion>::cast(rhs);                              \
+  }
+
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Add, +, += )
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Sub, -, -= )
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Mul, *, *= )
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Div, /, /= )
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Mod, %, %= )
+
 #undef BASE_NUMERIC_ARITHMETIC_OPERATORS
 
-// These are some extra StrictNumeric operators to support simple pointer
-// arithmetic with our result types. Since wrapping on a pointer is always
-// bad, we trigger the CHECK condition here.
-template <typename L, typename R>
-L* operator+(L* lhs, const StrictNumeric<R> rhs) {
-  uintptr_t result = CheckAdd(reinterpret_cast<uintptr_t>(lhs),
-                              CheckMul(sizeof(L), static_cast<R>(rhs)))
-                         .template ValueOrDie<uintptr_t>();
-  return reinterpret_cast<L*>(result);
-}
-
-template <typename L, typename R>
-L* operator-(L* lhs, const StrictNumeric<R> rhs) {
-  uintptr_t result = CheckSub(reinterpret_cast<uintptr_t>(lhs),
-                              CheckMul(sizeof(L), static_cast<R>(rhs)))
-                         .template ValueOrDie<uintptr_t>();
-  return reinterpret_cast<L*>(result);
-}
-
 }  // namespace internal
 
 using internal::CheckedNumeric;
-using internal::IsValidForType;
-using internal::ValueOrDieForType;
-using internal::ValueOrDefaultForType;
-using internal::MakeCheckedNum;
-using internal::CheckMax;
-using internal::CheckMin;
-using internal::CheckAdd;
-using internal::CheckSub;
-using internal::CheckMul;
-using internal::CheckDiv;
-using internal::CheckMod;
-using internal::CheckLsh;
-using internal::CheckRsh;
-using internal::CheckAnd;
-using internal::CheckOr;
-using internal::CheckXor;
 
 }  // namespace base
 
diff --git a/base/numerics/safe_math_impl.h b/base/numerics/safe_math_impl.h
index a224f69..f214f3f 100644
--- a/base/numerics/safe_math_impl.h
+++ b/base/numerics/safe_math_impl.h
@@ -23,486 +23,348 @@
 // but it may not be fast. This code could be split based on
 // platform/architecture and replaced with potentially faster implementations.
 
+// Integer promotion templates used by the portable checked integer arithmetic.
+template <size_t Size, bool IsSigned>
+struct IntegerForSizeAndSign;
+template <>
+struct IntegerForSizeAndSign<1, true> {
+  typedef int8_t type;
+};
+template <>
+struct IntegerForSizeAndSign<1, false> {
+  typedef uint8_t type;
+};
+template <>
+struct IntegerForSizeAndSign<2, true> {
+  typedef int16_t type;
+};
+template <>
+struct IntegerForSizeAndSign<2, false> {
+  typedef uint16_t type;
+};
+template <>
+struct IntegerForSizeAndSign<4, true> {
+  typedef int32_t type;
+};
+template <>
+struct IntegerForSizeAndSign<4, false> {
+  typedef uint32_t type;
+};
+template <>
+struct IntegerForSizeAndSign<8, true> {
+  typedef int64_t type;
+};
+template <>
+struct IntegerForSizeAndSign<8, false> {
+  typedef uint64_t type;
+};
+
+// WARNING: We have no IntegerForSizeAndSign<16, *>. If we ever add one to
+// support 128-bit math, then the ArithmeticPromotion template below will need
+// to be updated (or more likely replaced with a decltype expression).
+
+template <typename Integer>
+struct UnsignedIntegerForSize {
+  typedef typename std::enable_if<
+      std::numeric_limits<Integer>::is_integer,
+      typename IntegerForSizeAndSign<sizeof(Integer), false>::type>::type type;
+};
+
+template <typename Integer>
+struct SignedIntegerForSize {
+  typedef typename std::enable_if<
+      std::numeric_limits<Integer>::is_integer,
+      typename IntegerForSizeAndSign<sizeof(Integer), true>::type>::type type;
+};
+
+template <typename Integer>
+struct TwiceWiderInteger {
+  typedef typename std::enable_if<
+      std::numeric_limits<Integer>::is_integer,
+      typename IntegerForSizeAndSign<
+          sizeof(Integer) * 2,
+          std::numeric_limits<Integer>::is_signed>::type>::type type;
+};
+
+template <typename Integer>
+struct PositionOfSignBit {
+  static const typename std::enable_if<std::numeric_limits<Integer>::is_integer,
+                                       size_t>::type value =
+      CHAR_BIT * sizeof(Integer) - 1;
+};
+
 // This is used for UnsignedAbs, where we need to support floating-point
 // template instantiations even though we don't actually support the operations.
-// However, there is no corresponding implementation of e.g. SafeUnsignedAbs,
+// However, there is no corresponding implementation of e.g. CheckedUnsignedAbs,
 // so the float versions will not compile.
 template <typename Numeric,
-          bool IsInteger = std::is_integral<Numeric>::value,
-          bool IsFloat = std::is_floating_point<Numeric>::value>
+          bool IsInteger = std::numeric_limits<Numeric>::is_integer,
+          bool IsFloat = std::numeric_limits<Numeric>::is_iec559>
 struct UnsignedOrFloatForSize;
 
 template <typename Numeric>
 struct UnsignedOrFloatForSize<Numeric, true, false> {
-  using type = typename std::make_unsigned<Numeric>::type;
+  typedef typename UnsignedIntegerForSize<Numeric>::type type;
 };
 
 template <typename Numeric>
 struct UnsignedOrFloatForSize<Numeric, false, true> {
-  using type = Numeric;
+  typedef Numeric type;
 };
 
-// Probe for builtin math overflow support on Clang and version check on GCC.
-#if defined(__has_builtin)
-#define USE_OVERFLOW_BUILTINS (__has_builtin(__builtin_add_overflow))
-#elif defined(__GNUC__)
-#define USE_OVERFLOW_BUILTINS (__GNUC__ >= 5)
-#else
-#define USE_OVERFLOW_BUILTINS (0)
-#endif
+// Helper templates for integer manipulations.
 
 template <typename T>
-bool CheckedAddImpl(T x, T y, T* result) {
-  static_assert(std::is_integral<T>::value, "Type must be integral");
+constexpr bool HasSignBit(T x) {
+  // Cast to unsigned since right shift on signed is undefined.
+  return !!(static_cast<typename UnsignedIntegerForSize<T>::type>(x) >>
+            PositionOfSignBit<T>::value);
+}
+
+// This wrapper undoes the standard integer promotions.
+template <typename T>
+constexpr T BinaryComplement(T x) {
+  return static_cast<T>(~x);
+}
+
+// Here are the actual portable checked integer math implementations.
+// TODO(jschuh): Break this code out from the enable_if pattern and find a clean
+// way to coalesce things into the CheckedNumericState specializations below.
+
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_integer, T>::type
+CheckedAdd(T x, T y, RangeConstraint* validity) {
   // Since the value of x+y is undefined if we have a signed type, we compute
   // it using the unsigned type of the same size.
-  using UnsignedDst = typename std::make_unsigned<T>::type;
-  using SignedDst = typename std::make_signed<T>::type;
+  typedef typename UnsignedIntegerForSize<T>::type UnsignedDst;
   UnsignedDst ux = static_cast<UnsignedDst>(x);
   UnsignedDst uy = static_cast<UnsignedDst>(y);
   UnsignedDst uresult = static_cast<UnsignedDst>(ux + uy);
-  *result = static_cast<T>(uresult);
   // Addition is valid if the sign of (x + y) is equal to either that of x or
   // that of y.
-  return (std::is_signed<T>::value)
-             ? static_cast<SignedDst>((uresult ^ ux) & (uresult ^ uy)) >= 0
-             : uresult >= uy;  // Unsigned is either valid or underflow.
+  if (std::numeric_limits<T>::is_signed) {
+    if (HasSignBit(BinaryComplement(
+            static_cast<UnsignedDst>((uresult ^ ux) & (uresult ^ uy))))) {
+      *validity = RANGE_VALID;
+    } else {  // Direction of wrap is inverse of result sign.
+      *validity = HasSignBit(uresult) ? RANGE_OVERFLOW : RANGE_UNDERFLOW;
+    }
+  } else {  // Unsigned is either valid or overflow.
+    *validity = BinaryComplement(x) >= y ? RANGE_VALID : RANGE_OVERFLOW;
+  }
+  return static_cast<T>(uresult);
 }
 
-template <typename T, typename U, class Enable = void>
-struct CheckedAddOp {};
-
-template <typename T, typename U>
-struct CheckedAddOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = typename MaxExponentPromotion<T, U>::type;
-  template <typename V>
-  static bool Do(T x, U y, V* result) {
-#if USE_OVERFLOW_BUILTINS
-    return !__builtin_add_overflow(x, y, result);
-#else
-    using Promotion = typename BigEnoughPromotion<T, U>::type;
-    Promotion presult;
-    // Fail if either operand is out of range for the promoted type.
-    // TODO(jschuh): This could be made to work for a broader range of values.
-    bool is_valid = IsValueInRangeForNumericType<Promotion>(x) &&
-                    IsValueInRangeForNumericType<Promotion>(y);
-
-    if (IsIntegerArithmeticSafe<Promotion, T, U>::value) {
-      presult = static_cast<Promotion>(x) + static_cast<Promotion>(y);
-    } else {
-      is_valid &= CheckedAddImpl(static_cast<Promotion>(x),
-                                 static_cast<Promotion>(y), &presult);
-    }
-    *result = static_cast<V>(presult);
-    return is_valid && IsValueInRangeForNumericType<V>(presult);
-#endif
-  }
-};
-
 template <typename T>
-bool CheckedSubImpl(T x, T y, T* result) {
-  static_assert(std::is_integral<T>::value, "Type must be integral");
+typename std::enable_if<std::numeric_limits<T>::is_integer, T>::type
+CheckedSub(T x, T y, RangeConstraint* validity) {
   // Since the value of x+y is undefined if we have a signed type, we compute
   // it using the unsigned type of the same size.
-  using UnsignedDst = typename std::make_unsigned<T>::type;
-  using SignedDst = typename std::make_signed<T>::type;
+  typedef typename UnsignedIntegerForSize<T>::type UnsignedDst;
   UnsignedDst ux = static_cast<UnsignedDst>(x);
   UnsignedDst uy = static_cast<UnsignedDst>(y);
   UnsignedDst uresult = static_cast<UnsignedDst>(ux - uy);
-  *result = static_cast<T>(uresult);
   // Subtraction is valid if either x and y have same sign, or (x-y) and x have
   // the same sign.
-  return (std::is_signed<T>::value)
-             ? static_cast<SignedDst>((uresult ^ ux) & (ux ^ uy)) >= 0
-             : x >= y;
+  if (std::numeric_limits<T>::is_signed) {
+    if (HasSignBit(BinaryComplement(
+            static_cast<UnsignedDst>((uresult ^ ux) & (ux ^ uy))))) {
+      *validity = RANGE_VALID;
+    } else {  // Direction of wrap is inverse of result sign.
+      *validity = HasSignBit(uresult) ? RANGE_OVERFLOW : RANGE_UNDERFLOW;
+    }
+  } else {  // Unsigned is either valid or underflow.
+    *validity = x >= y ? RANGE_VALID : RANGE_UNDERFLOW;
+  }
+  return static_cast<T>(uresult);
 }
 
-template <typename T, typename U, class Enable = void>
-struct CheckedSubOp {};
-
-template <typename T, typename U>
-struct CheckedSubOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = typename MaxExponentPromotion<T, U>::type;
-  template <typename V>
-  static bool Do(T x, U y, V* result) {
-#if USE_OVERFLOW_BUILTINS
-    return !__builtin_sub_overflow(x, y, result);
-#else
-    using Promotion = typename BigEnoughPromotion<T, U>::type;
-    Promotion presult;
-    // Fail if either operand is out of range for the promoted type.
-    // TODO(jschuh): This could be made to work for a broader range of values.
-    bool is_valid = IsValueInRangeForNumericType<Promotion>(x) &&
-                    IsValueInRangeForNumericType<Promotion>(y);
-
-    if (IsIntegerArithmeticSafe<Promotion, T, U>::value) {
-      presult = static_cast<Promotion>(x) - static_cast<Promotion>(y);
-    } else {
-      is_valid &= CheckedSubImpl(static_cast<Promotion>(x),
-                                 static_cast<Promotion>(y), &presult);
-    }
-    *result = static_cast<V>(presult);
-    return is_valid && IsValueInRangeForNumericType<V>(presult);
-#endif
-  }
-};
+// Integer multiplication is a bit complicated. In the fast case we just
+// we just promote to a twice wider type, and range check the result. In the
+// slow case we need to manually check that the result won't be truncated by
+// checking with division against the appropriate bound.
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+                            sizeof(T) * 2 <= sizeof(uintmax_t),
+                        T>::type
+CheckedMul(T x, T y, RangeConstraint* validity) {
+  typedef typename TwiceWiderInteger<T>::type IntermediateType;
+  IntermediateType tmp =
+      static_cast<IntermediateType>(x) * static_cast<IntermediateType>(y);
+  *validity = DstRangeRelationToSrcRange<T>(tmp);
+  return static_cast<T>(tmp);
+}
 
 template <typename T>
-bool CheckedMulImpl(T x, T y, T* result) {
-  static_assert(std::is_integral<T>::value, "Type must be integral");
-  // Since the value of x*y is potentially undefined if we have a signed type,
-  // we compute it using the unsigned type of the same size.
-  using UnsignedDst = typename std::make_unsigned<T>::type;
-  using SignedDst = typename std::make_signed<T>::type;
-  const UnsignedDst ux = SafeUnsignedAbs(x);
-  const UnsignedDst uy = SafeUnsignedAbs(y);
-  UnsignedDst uresult = static_cast<UnsignedDst>(ux * uy);
-  const bool is_negative =
-      std::is_signed<T>::value && static_cast<SignedDst>(x ^ y) < 0;
-  *result = is_negative ? 0 - uresult : uresult;
-  // We have a fast out for unsigned identity or zero on the second operand.
-  // After that it's an unsigned overflow check on the absolute value, with
-  // a +1 bound for a negative result.
-  return uy <= UnsignedDst(!std::is_signed<T>::value || is_negative) ||
-         ux <= (std::numeric_limits<T>::max() + UnsignedDst(is_negative)) / uy;
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+                            std::numeric_limits<T>::is_signed &&
+                            (sizeof(T) * 2 > sizeof(uintmax_t)),
+                        T>::type
+CheckedMul(T x, T y, RangeConstraint* validity) {
+  // If either side is zero then the result will be zero.
+  if (!x || !y) {
+    *validity = RANGE_VALID;
+    return static_cast<T>(0);
+
+  } else if (x > 0) {
+    if (y > 0)
+      *validity =
+          x <= std::numeric_limits<T>::max() / y ? RANGE_VALID : RANGE_OVERFLOW;
+    else
+      *validity = y >= std::numeric_limits<T>::min() / x ? RANGE_VALID
+                                                         : RANGE_UNDERFLOW;
+
+  } else {
+    if (y > 0)
+      *validity = x >= std::numeric_limits<T>::min() / y ? RANGE_VALID
+                                                         : RANGE_UNDERFLOW;
+    else
+      *validity =
+          y >= std::numeric_limits<T>::max() / x ? RANGE_VALID : RANGE_OVERFLOW;
+  }
+
+  return static_cast<T>(x * y);
 }
 
-template <typename T, typename U, class Enable = void>
-struct CheckedMulOp {};
-
-template <typename T, typename U>
-struct CheckedMulOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = typename MaxExponentPromotion<T, U>::type;
-  template <typename V>
-  static bool Do(T x, U y, V* result) {
-#if USE_OVERFLOW_BUILTINS
-#if defined(__clang__)
-    // TODO(jschuh): Get the Clang runtime library issues sorted out so we can
-    // support full-width, mixed-sign multiply builtins.
-    // https://crbug.com/613003
-    static const bool kUseMaxInt =
-        // Narrower type than uintptr_t is always safe.
-        std::numeric_limits<__typeof__(x * y)>::digits <
-            std::numeric_limits<intptr_t>::digits ||
-        // Safe for intptr_t and uintptr_t if the sign matches.
-        (IntegerBitsPlusSign<__typeof__(x * y)>::value ==
-             IntegerBitsPlusSign<intptr_t>::value &&
-         std::is_signed<T>::value == std::is_signed<U>::value);
-#else
-    static const bool kUseMaxInt = true;
-#endif
-    if (kUseMaxInt)
-      return !__builtin_mul_overflow(x, y, result);
-#endif
-    using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
-    Promotion presult;
-    // Fail if either operand is out of range for the promoted type.
-    // TODO(jschuh): This could be made to work for a broader range of values.
-    bool is_valid = IsValueInRangeForNumericType<Promotion>(x) &&
-                    IsValueInRangeForNumericType<Promotion>(y);
-
-    if (IsIntegerArithmeticSafe<Promotion, T, U>::value) {
-      presult = static_cast<Promotion>(x) * static_cast<Promotion>(y);
-    } else {
-      is_valid &= CheckedMulImpl(static_cast<Promotion>(x),
-                                 static_cast<Promotion>(y), &presult);
-    }
-    *result = static_cast<V>(presult);
-    return is_valid && IsValueInRangeForNumericType<V>(presult);
-  }
-};
-
-// Avoid poluting the namespace once we're done with the macro.
-#undef USE_OVERFLOW_BUILTINS
-
-// Division just requires a check for a zero denominator or an invalid negation
-// on signed min/-1.
-template <typename T>
-bool CheckedDivImpl(T x, T y, T* result) {
-  static_assert(std::is_integral<T>::value, "Type must be integral");
-  if (y && (!std::is_signed<T>::value ||
-            x != std::numeric_limits<T>::lowest() || y != static_cast<T>(-1))) {
-    *result = x / y;
-    return true;
-  }
-  return false;
-}
-
-template <typename T, typename U, class Enable = void>
-struct CheckedDivOp {};
-
-template <typename T, typename U>
-struct CheckedDivOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = typename MaxExponentPromotion<T, U>::type;
-  template <typename V>
-  static bool Do(T x, U y, V* result) {
-    using Promotion = typename BigEnoughPromotion<T, U>::type;
-    Promotion presult;
-    // Fail if either operand is out of range for the promoted type.
-    // TODO(jschuh): This could be made to work for a broader range of values.
-    bool is_valid = IsValueInRangeForNumericType<Promotion>(x) &&
-                    IsValueInRangeForNumericType<Promotion>(y);
-    is_valid &= CheckedDivImpl(static_cast<Promotion>(x),
-                               static_cast<Promotion>(y), &presult);
-    *result = static_cast<V>(presult);
-    return is_valid && IsValueInRangeForNumericType<V>(presult);
-  }
-};
-
 template <typename T>
-bool CheckedModImpl(T x, T y, T* result) {
-  static_assert(std::is_integral<T>::value, "Type must be integral");
-  if (y > 0) {
-    *result = static_cast<T>(x % y);
-    return true;
-  }
-  return false;
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+                            !std::numeric_limits<T>::is_signed &&
+                            (sizeof(T) * 2 > sizeof(uintmax_t)),
+                        T>::type
+CheckedMul(T x, T y, RangeConstraint* validity) {
+  *validity = (y == 0 || x <= std::numeric_limits<T>::max() / y)
+                  ? RANGE_VALID
+                  : RANGE_OVERFLOW;
+  return static_cast<T>(x * y);
 }
 
-template <typename T, typename U, class Enable = void>
-struct CheckedModOp {};
-
-template <typename T, typename U>
-struct CheckedModOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = typename MaxExponentPromotion<T, U>::type;
-  template <typename V>
-  static bool Do(T x, U y, V* result) {
-    using Promotion = typename BigEnoughPromotion<T, U>::type;
-    Promotion presult;
-    bool is_valid = CheckedModImpl(static_cast<Promotion>(x),
-                                   static_cast<Promotion>(y), &presult);
-    *result = static_cast<V>(presult);
-    return is_valid && IsValueInRangeForNumericType<V>(presult);
+// Division just requires a check for an invalid negation on signed min/-1.
+template <typename T>
+T CheckedDiv(T x,
+             T y,
+             RangeConstraint* validity,
+             typename std::enable_if<std::numeric_limits<T>::is_integer,
+                                     int>::type = 0) {
+  if (std::numeric_limits<T>::is_signed && x == std::numeric_limits<T>::min() &&
+      y == static_cast<T>(-1)) {
+    *validity = RANGE_OVERFLOW;
+    return std::numeric_limits<T>::min();
   }
-};
 
-template <typename T, typename U, class Enable = void>
-struct CheckedLshOp {};
-
-// Left shift. Shifts less than 0 or greater than or equal to the number
-// of bits in the promoted type are undefined. Shifts of negative values
-// are undefined. Otherwise it is defined when the result fits.
-template <typename T, typename U>
-struct CheckedLshOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = T;
-  template <typename V>
-  static bool Do(T x, U shift, V* result) {
-    using ShiftType = typename std::make_unsigned<T>::type;
-    static const ShiftType kBitWidth = IntegerBitsPlusSign<T>::value;
-    const ShiftType real_shift = static_cast<ShiftType>(shift);
-    // Signed shift is not legal on negative values.
-    if (!IsValueNegative(x) && real_shift < kBitWidth) {
-      // Just use a multiplication because it's easy.
-      // TODO(jschuh): This could probably be made more efficient.
-      if (!std::is_signed<T>::value || real_shift != kBitWidth - 1)
-        return CheckedMulOp<T, T>::Do(x, static_cast<T>(1) << shift, result);
-      return !x;  // Special case zero for a full width signed shift.
-    }
-    return false;
-  }
-};
-
-template <typename T, typename U, class Enable = void>
-struct CheckedRshOp {};
-
-// Right shift. Shifts less than 0 or greater than or equal to the number
-// of bits in the promoted type are undefined. Otherwise, it is always defined,
-// but a right shift of a negative value is implementation-dependent.
-template <typename T, typename U>
-struct CheckedRshOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = T;
-  template <typename V = result_type>
-  static bool Do(T x, U shift, V* result) {
-    // Use the type conversion push negative values out of range.
-    using ShiftType = typename std::make_unsigned<T>::type;
-    if (static_cast<ShiftType>(shift) < IntegerBitsPlusSign<T>::value) {
-      T tmp = x >> shift;
-      *result = static_cast<V>(tmp);
-      return IsValueInRangeForNumericType<V>(tmp);
-    }
-    return false;
-  }
-};
-
-template <typename T, typename U, class Enable = void>
-struct CheckedAndOp {};
-
-// For simplicity we support only unsigned integer results.
-template <typename T, typename U>
-struct CheckedAndOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = typename std::make_unsigned<
-      typename MaxExponentPromotion<T, U>::type>::type;
-  template <typename V = result_type>
-  static bool Do(T x, U y, V* result) {
-    result_type tmp = static_cast<result_type>(x) & static_cast<result_type>(y);
-    *result = static_cast<V>(tmp);
-    return IsValueInRangeForNumericType<V>(tmp);
-  }
-};
-
-template <typename T, typename U, class Enable = void>
-struct CheckedOrOp {};
-
-// For simplicity we support only unsigned integers.
-template <typename T, typename U>
-struct CheckedOrOp<T,
-                   U,
-                   typename std::enable_if<std::is_integral<T>::value &&
-                                           std::is_integral<U>::value>::type> {
-  using result_type = typename std::make_unsigned<
-      typename MaxExponentPromotion<T, U>::type>::type;
-  template <typename V = result_type>
-  static bool Do(T x, U y, V* result) {
-    result_type tmp = static_cast<result_type>(x) | static_cast<result_type>(y);
-    *result = static_cast<V>(tmp);
-    return IsValueInRangeForNumericType<V>(tmp);
-  }
-};
-
-template <typename T, typename U, class Enable = void>
-struct CheckedXorOp {};
-
-// For simplicity we support only unsigned integers.
-template <typename T, typename U>
-struct CheckedXorOp<T,
-                    U,
-                    typename std::enable_if<std::is_integral<T>::value &&
-                                            std::is_integral<U>::value>::type> {
-  using result_type = typename std::make_unsigned<
-      typename MaxExponentPromotion<T, U>::type>::type;
-  template <typename V = result_type>
-  static bool Do(T x, U y, V* result) {
-    result_type tmp = static_cast<result_type>(x) ^ static_cast<result_type>(y);
-    *result = static_cast<V>(tmp);
-    return IsValueInRangeForNumericType<V>(tmp);
-  }
-};
-
-// Max doesn't really need to be implemented this way because it can't fail,
-// but it makes the code much cleaner to use the MathOp wrappers.
-template <typename T, typename U, class Enable = void>
-struct CheckedMaxOp {};
-
-template <typename T, typename U>
-struct CheckedMaxOp<
-    T,
-    U,
-    typename std::enable_if<std::is_arithmetic<T>::value &&
-                            std::is_arithmetic<U>::value>::type> {
-  using result_type = typename MaxExponentPromotion<T, U>::type;
-  template <typename V = result_type>
-  static bool Do(T x, U y, V* result) {
-    *result = IsGreater<T, U>::Test(x, y) ? static_cast<result_type>(x)
-                                          : static_cast<result_type>(y);
-    return true;
-  }
-};
-
-// Min doesn't really need to be implemented this way because it can't fail,
-// but it makes the code much cleaner to use the MathOp wrappers.
-template <typename T, typename U, class Enable = void>
-struct CheckedMinOp {};
-
-template <typename T, typename U>
-struct CheckedMinOp<
-    T,
-    U,
-    typename std::enable_if<std::is_arithmetic<T>::value &&
-                            std::is_arithmetic<U>::value>::type> {
-  using result_type = typename LowestValuePromotion<T, U>::type;
-  template <typename V = result_type>
-  static bool Do(T x, U y, V* result) {
-    *result = IsLess<T, U>::Test(x, y) ? static_cast<result_type>(x)
-                                       : static_cast<result_type>(y);
-    return true;
-  }
-};
-
-// This is just boilerplate that wraps the standard floating point arithmetic.
-// A macro isn't the nicest solution, but it beats rewriting these repeatedly.
-#define BASE_FLOAT_ARITHMETIC_OPS(NAME, OP)                                    \
-  template <typename T, typename U>                                            \
-  struct Checked##NAME##Op<                                                    \
-      T, U, typename std::enable_if<std::is_floating_point<T>::value ||        \
-                                    std::is_floating_point<U>::value>::type> { \
-    using result_type = typename MaxExponentPromotion<T, U>::type;             \
-    template <typename V>                                                      \
-    static bool Do(T x, U y, V* result) {                                      \
-      using Promotion = typename MaxExponentPromotion<T, U>::type;             \
-      Promotion presult = x OP y;                                              \
-      *result = static_cast<V>(presult);                                       \
-      return IsValueInRangeForNumericType<V>(presult);                         \
-    }                                                                          \
-  };
-
-BASE_FLOAT_ARITHMETIC_OPS(Add, +)
-BASE_FLOAT_ARITHMETIC_OPS(Sub, -)
-BASE_FLOAT_ARITHMETIC_OPS(Mul, *)
-BASE_FLOAT_ARITHMETIC_OPS(Div, /)
-
-#undef BASE_FLOAT_ARITHMETIC_OPS
-
-// Wrap the unary operations to allow SFINAE when instantiating integrals versus
-// floating points. These don't perform any overflow checking. Rather, they
-// exhibit well-defined overflow semantics and rely on the caller to detect
-// if an overflow occured.
-
-template <typename T,
-          typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
-constexpr T NegateWrapper(T value) {
-  using UnsignedT = typename std::make_unsigned<T>::type;
-  // This will compile to a NEG on Intel, and is normal negation on ARM.
-  return static_cast<T>(UnsignedT(0) - static_cast<UnsignedT>(value));
+  *validity = RANGE_VALID;
+  return static_cast<T>(x / y);
 }
 
-template <
-    typename T,
-    typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
-constexpr T NegateWrapper(T value) {
-  return -value;
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+                            std::numeric_limits<T>::is_signed,
+                        T>::type
+CheckedMod(T x, T y, RangeConstraint* validity) {
+  *validity = y > 0 ? RANGE_VALID : RANGE_INVALID;
+  return static_cast<T>(x % y);
 }
 
-template <typename T,
-          typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
-constexpr typename std::make_unsigned<T>::type InvertWrapper(T value) {
-  return ~value;
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+                            !std::numeric_limits<T>::is_signed,
+                        T>::type
+CheckedMod(T x, T y, RangeConstraint* validity) {
+  *validity = RANGE_VALID;
+  return static_cast<T>(x % y);
 }
 
-template <typename T,
-          typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
-constexpr T AbsWrapper(T value) {
-  return static_cast<T>(SafeUnsignedAbs(value));
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+                            std::numeric_limits<T>::is_signed,
+                        T>::type
+CheckedNeg(T value, RangeConstraint* validity) {
+  *validity =
+      value != std::numeric_limits<T>::min() ? RANGE_VALID : RANGE_OVERFLOW;
+  // The negation of signed min is min, so catch that one.
+  return static_cast<T>(-value);
 }
 
-template <
-    typename T,
-    typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
-constexpr T AbsWrapper(T value) {
-  return value < 0 ? -value : value;
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+                            !std::numeric_limits<T>::is_signed,
+                        T>::type
+CheckedNeg(T value, RangeConstraint* validity) {
+  // The only legal unsigned negation is zero.
+  *validity = value ? RANGE_UNDERFLOW : RANGE_VALID;
+  return static_cast<T>(
+      -static_cast<typename SignedIntegerForSize<T>::type>(value));
+}
+
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+                            std::numeric_limits<T>::is_signed,
+                        T>::type
+CheckedAbs(T value, RangeConstraint* validity) {
+  *validity =
+      value != std::numeric_limits<T>::min() ? RANGE_VALID : RANGE_OVERFLOW;
+  return static_cast<T>(std::abs(value));
+}
+
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+                            !std::numeric_limits<T>::is_signed,
+                        T>::type
+CheckedAbs(T value, RangeConstraint* validity) {
+  // T is unsigned, so |value| must already be positive.
+  *validity = RANGE_VALID;
+  return value;
+}
+
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+                            std::numeric_limits<T>::is_signed,
+                        typename UnsignedIntegerForSize<T>::type>::type
+CheckedUnsignedAbs(T value) {
+  typedef typename UnsignedIntegerForSize<T>::type UnsignedT;
+  return value == std::numeric_limits<T>::min()
+             ? static_cast<UnsignedT>(std::numeric_limits<T>::max()) + 1
+             : static_cast<UnsignedT>(std::abs(value));
+}
+
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+                            !std::numeric_limits<T>::is_signed,
+                        T>::type
+CheckedUnsignedAbs(T value) {
+  // T is unsigned, so |value| must already be positive.
+  return static_cast<T>(value);
+}
+
+// These are the floating point stubs that the compiler needs to see. Only the
+// negation operation is ever called.
+#define BASE_FLOAT_ARITHMETIC_STUBS(NAME)                             \
+  template <typename T>                                               \
+  typename std::enable_if<std::numeric_limits<T>::is_iec559, T>::type \
+      Checked##NAME(T, T, RangeConstraint*) {                         \
+    NOTREACHED();                                                     \
+    return static_cast<T>(0);                                         \
+  }
+
+BASE_FLOAT_ARITHMETIC_STUBS(Add)
+BASE_FLOAT_ARITHMETIC_STUBS(Sub)
+BASE_FLOAT_ARITHMETIC_STUBS(Mul)
+BASE_FLOAT_ARITHMETIC_STUBS(Div)
+BASE_FLOAT_ARITHMETIC_STUBS(Mod)
+
+#undef BASE_FLOAT_ARITHMETIC_STUBS
+
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedNeg(
+    T value,
+    RangeConstraint*) {
+  return static_cast<T>(-value);
+}
+
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedAbs(
+    T value,
+    RangeConstraint*) {
+  return static_cast<T>(std::abs(value));
 }
 
 // Floats carry around their validity state with them, but integers do not. So,
@@ -517,10 +379,10 @@
 template <typename NumericType>
 struct GetNumericRepresentation {
   static const NumericRepresentation value =
-      std::is_integral<NumericType>::value
+      std::numeric_limits<NumericType>::is_integer
           ? NUMERIC_INTEGER
-          : (std::is_floating_point<NumericType>::value ? NUMERIC_FLOATING
-                                                        : NUMERIC_UNKNOWN);
+          : (std::numeric_limits<NumericType>::is_iec559 ? NUMERIC_FLOATING
+                                                         : NUMERIC_UNKNOWN);
 };
 
 template <typename T, NumericRepresentation type =
@@ -531,48 +393,41 @@
 template <typename T>
 class CheckedNumericState<T, NUMERIC_INTEGER> {
  private:
-  // is_valid_ precedes value_ because member intializers in the constructors
-  // are evaluated in field order, and is_valid_ must be read when initializing
-  // value_.
-  bool is_valid_;
   T value_;
-
-  // Ensures that a type conversion does not trigger undefined behavior.
-  template <typename Src>
-  static constexpr T WellDefinedConversionOrZero(const Src value,
-                                                 const bool is_valid) {
-    using SrcType = typename internal::UnderlyingType<Src>::type;
-    return (std::is_integral<SrcType>::value || is_valid)
-               ? static_cast<T>(value)
-               : static_cast<T>(0);
-  }
+  RangeConstraint validity_ : CHAR_BIT;  // Actually requires only two bits.
 
  public:
   template <typename Src, NumericRepresentation type>
   friend class CheckedNumericState;
 
-  constexpr CheckedNumericState() : is_valid_(true), value_(0) {}
+  CheckedNumericState() : value_(0), validity_(RANGE_VALID) {}
 
   template <typename Src>
-  constexpr CheckedNumericState(Src value, bool is_valid)
-      : is_valid_(is_valid && IsValueInRangeForNumericType<T>(value)),
-        value_(WellDefinedConversionOrZero(value, is_valid_)) {
-    static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
+  CheckedNumericState(Src value, RangeConstraint validity)
+      : value_(static_cast<T>(value)),
+        validity_(GetRangeConstraint(validity |
+                                     DstRangeRelationToSrcRange<T>(value))) {
+    static_assert(std::numeric_limits<Src>::is_specialized,
+                  "Argument must be numeric.");
   }
 
   // Copy constructor.
   template <typename Src>
-  constexpr CheckedNumericState(const CheckedNumericState<Src>& rhs)
-      : is_valid_(rhs.IsValid()),
-        value_(WellDefinedConversionOrZero(rhs.value(), is_valid_)) {}
+  CheckedNumericState(const CheckedNumericState<Src>& rhs)
+      : value_(static_cast<T>(rhs.value())),
+        validity_(GetRangeConstraint(
+            rhs.validity() | DstRangeRelationToSrcRange<T>(rhs.value()))) {}
 
   template <typename Src>
-  constexpr explicit CheckedNumericState(Src value)
-      : is_valid_(IsValueInRangeForNumericType<T>(value)),
-        value_(WellDefinedConversionOrZero(value, is_valid_)) {}
+  explicit CheckedNumericState(
+      Src value,
+      typename std::enable_if<std::numeric_limits<Src>::is_specialized,
+                              int>::type = 0)
+      : value_(static_cast<T>(value)),
+        validity_(DstRangeRelationToSrcRange<T>(value)) {}
 
-  constexpr bool is_valid() const { return is_valid_; }
-  constexpr T value() const { return value_; }
+  RangeConstraint validity() const { return validity_; }
+  T value() const { return value_; }
 };
 
 // Floating points maintain their own validity, but need translation wrappers.
@@ -581,58 +436,94 @@
  private:
   T value_;
 
-  // Ensures that a type conversion does not trigger undefined behavior.
-  template <typename Src>
-  static constexpr T WellDefinedConversionOrNaN(const Src value,
-                                                const bool is_valid) {
-    using SrcType = typename internal::UnderlyingType<Src>::type;
-    return (StaticDstRangeRelationToSrcRange<T, SrcType>::value ==
-                NUMERIC_RANGE_CONTAINED ||
-            is_valid)
-               ? static_cast<T>(value)
-               : std::numeric_limits<T>::quiet_NaN();
-  }
-
  public:
   template <typename Src, NumericRepresentation type>
   friend class CheckedNumericState;
 
-  constexpr CheckedNumericState() : value_(0.0) {}
+  CheckedNumericState() : value_(0.0) {}
 
   template <typename Src>
-  constexpr CheckedNumericState(Src value, bool is_valid)
-      : value_(WellDefinedConversionOrNaN(value, is_valid)) {}
+  CheckedNumericState(
+      Src value,
+      RangeConstraint /*validity*/,
+      typename std::enable_if<std::numeric_limits<Src>::is_integer, int>::type =
+          0) {
+    switch (DstRangeRelationToSrcRange<T>(value)) {
+      case RANGE_VALID:
+        value_ = static_cast<T>(value);
+        break;
+
+      case RANGE_UNDERFLOW:
+        value_ = -std::numeric_limits<T>::infinity();
+        break;
+
+      case RANGE_OVERFLOW:
+        value_ = std::numeric_limits<T>::infinity();
+        break;
+
+      case RANGE_INVALID:
+        value_ = std::numeric_limits<T>::quiet_NaN();
+        break;
+
+      default:
+        NOTREACHED();
+    }
+  }
 
   template <typename Src>
-  constexpr explicit CheckedNumericState(Src value)
-      : value_(WellDefinedConversionOrNaN(
-            value,
-            IsValueInRangeForNumericType<T>(value))) {}
+  explicit CheckedNumericState(
+      Src value,
+      typename std::enable_if<std::numeric_limits<Src>::is_specialized,
+                              int>::type = 0)
+      : value_(static_cast<T>(value)) {}
 
   // Copy constructor.
   template <typename Src>
-  constexpr CheckedNumericState(const CheckedNumericState<Src>& rhs)
-      : value_(WellDefinedConversionOrNaN(
-            rhs.value(),
-            rhs.is_valid() && IsValueInRangeForNumericType<T>(rhs.value()))) {}
+  CheckedNumericState(const CheckedNumericState<Src>& rhs)
+      : value_(static_cast<T>(rhs.value())) {}
 
-  constexpr bool is_valid() const {
-    // Written this way because std::isfinite is not reliably constexpr.
-    // TODO(jschuh): Fix this if the libraries ever get fixed.
-    return value_ <= std::numeric_limits<T>::max() &&
-           value_ >= std::numeric_limits<T>::lowest();
+  RangeConstraint validity() const {
+    return GetRangeConstraint(value_ <= std::numeric_limits<T>::max(),
+                              value_ >= -std::numeric_limits<T>::max());
   }
-  constexpr T value() const { return value_; }
+  T value() const { return value_; }
 };
 
-template <template <typename, typename, typename> class M,
-          typename L,
-          typename R>
-struct MathWrapper {
-  using math = M<typename UnderlyingType<L>::type,
-                 typename UnderlyingType<R>::type,
-                 void>;
-  using type = typename math::result_type;
+// For integers less than 128-bit and floats 32-bit or larger, we have the type
+// with the larger maximum exponent take precedence.
+enum ArithmeticPromotionCategory { LEFT_PROMOTION, RIGHT_PROMOTION };
+
+template <typename Lhs,
+          typename Rhs = Lhs,
+          ArithmeticPromotionCategory Promotion =
+              (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value)
+                  ? LEFT_PROMOTION
+                  : RIGHT_PROMOTION>
+struct ArithmeticPromotion;
+
+template <typename Lhs, typename Rhs>
+struct ArithmeticPromotion<Lhs, Rhs, LEFT_PROMOTION> {
+  typedef Lhs type;
+};
+
+template <typename Lhs, typename Rhs>
+struct ArithmeticPromotion<Lhs, Rhs, RIGHT_PROMOTION> {
+  typedef Rhs type;
+};
+
+// We can statically check if operations on the provided types can wrap, so we
+// can skip the checked operations if they're not needed. So, for an integer we
+// care if the destination type preserves the sign and is twice the width of
+// the source.
+template <typename T, typename Lhs, typename Rhs>
+struct IsIntegerArithmeticSafe {
+  static const bool value = !std::numeric_limits<T>::is_iec559 &&
+                            StaticDstRangeRelationToSrcRange<T, Lhs>::value ==
+                                NUMERIC_RANGE_CONTAINED &&
+                            sizeof(T) >= (2 * sizeof(Lhs)) &&
+                            StaticDstRangeRelationToSrcRange<T, Rhs>::value !=
+                                NUMERIC_RANGE_CONTAINED &&
+                            sizeof(T) >= (2 * sizeof(Rhs));
 };
 
 }  // namespace internal
diff --git a/base/numerics/safe_numerics_unittest.cc b/base/numerics/safe_numerics_unittest.cc
index ec6d003..4be7ab5 100644
--- a/base/numerics/safe_numerics_unittest.cc
+++ b/base/numerics/safe_numerics_unittest.cc
@@ -9,10 +9,8 @@
 #include <type_traits>
 
 #include "base/compiler_specific.h"
-#include "base/logging.h"
 #include "base/numerics/safe_conversions.h"
 #include "base/numerics/safe_math.h"
-#include "base/test/gtest_util.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -22,44 +20,33 @@
 
 using std::numeric_limits;
 using base::CheckedNumeric;
-using base::IsValidForType;
-using base::ValueOrDieForType;
-using base::ValueOrDefaultForType;
-using base::MakeCheckedNum;
-using base::CheckMax;
-using base::CheckMin;
-using base::CheckAdd;
-using base::CheckSub;
-using base::CheckMul;
-using base::CheckDiv;
-using base::CheckMod;
-using base::CheckLsh;
-using base::CheckRsh;
 using base::checked_cast;
 using base::IsValueInRangeForNumericType;
 using base::IsValueNegative;
 using base::SizeT;
 using base::StrictNumeric;
-using base::MakeStrictNum;
 using base::saturated_cast;
 using base::strict_cast;
 using base::internal::MaxExponent;
-using base::internal::IntegerBitsPlusSign;
-using base::internal::RangeCheck;
+using base::internal::RANGE_VALID;
+using base::internal::RANGE_INVALID;
+using base::internal::RANGE_OVERFLOW;
+using base::internal::RANGE_UNDERFLOW;
+using base::internal::SignedIntegerForSize;
 
-// These tests deliberately cause arithmetic boundary errors. If the compiler is
-// aggressive enough, it can const detect these errors, so we disable warnings.
+// These tests deliberately cause arithmetic overflows. If the compiler is
+// aggressive enough, it can const fold these overflows. Disable warnings about
+// overflows for const expressions.
 #if defined(OS_WIN)
-#pragma warning(disable : 4756)  // Arithmetic overflow.
-#pragma warning(disable : 4293)  // Invalid shift.
+#pragma warning(disable:4756)
 #endif
 
 // This is a helper function for finding the maximum value in Src that can be
 // wholy represented as the destination floating-point type.
 template <typename Dst, typename Src>
 Dst GetMaxConvertibleToFloat() {
-  using DstLimits = numeric_limits<Dst>;
-  using SrcLimits = numeric_limits<Src>;
+  typedef numeric_limits<Dst> DstLimits;
+  typedef numeric_limits<Src> SrcLimits;
   static_assert(SrcLimits::is_specialized, "Source must be numeric.");
   static_assert(DstLimits::is_specialized, "Destination must be numeric.");
   CHECK(DstLimits::is_iec559);
@@ -74,113 +61,20 @@
   return static_cast<Dst>(max);
 }
 
-namespace base {
-namespace internal {
-
-// Test corner case promotions used
-static_assert(IsIntegerArithmeticSafe<int32_t, int8_t, int8_t>::value, "");
-static_assert(IsIntegerArithmeticSafe<int32_t, int16_t, int8_t>::value, "");
-static_assert(IsIntegerArithmeticSafe<int32_t, int8_t, int16_t>::value, "");
-static_assert(!IsIntegerArithmeticSafe<int32_t, int32_t, int8_t>::value, "");
-static_assert(BigEnoughPromotion<int16_t, int8_t>::is_contained, "");
-static_assert(BigEnoughPromotion<int32_t, uint32_t>::is_contained, "");
-static_assert(BigEnoughPromotion<intmax_t, int8_t>::is_contained, "");
-static_assert(!BigEnoughPromotion<uintmax_t, int8_t>::is_contained, "");
-static_assert(
-    std::is_same<BigEnoughPromotion<int16_t, int8_t>::type, int16_t>::value,
-    "");
-static_assert(
-    std::is_same<BigEnoughPromotion<int32_t, uint32_t>::type, int64_t>::value,
-    "");
-static_assert(
-    std::is_same<BigEnoughPromotion<intmax_t, int8_t>::type, intmax_t>::value,
-    "");
-static_assert(
-    std::is_same<BigEnoughPromotion<uintmax_t, int8_t>::type, uintmax_t>::value,
-    "");
-static_assert(BigEnoughPromotion<int16_t, int8_t>::is_contained, "");
-static_assert(BigEnoughPromotion<int32_t, uint32_t>::is_contained, "");
-static_assert(BigEnoughPromotion<intmax_t, int8_t>::is_contained, "");
-static_assert(!BigEnoughPromotion<uintmax_t, int8_t>::is_contained, "");
-static_assert(
-    std::is_same<FastIntegerArithmeticPromotion<int16_t, int8_t>::type,
-                 int32_t>::value,
-    "");
-static_assert(
-    std::is_same<FastIntegerArithmeticPromotion<int32_t, uint32_t>::type,
-                 int64_t>::value,
-    "");
-static_assert(
-    std::is_same<FastIntegerArithmeticPromotion<intmax_t, int8_t>::type,
-                 intmax_t>::value,
-    "");
-static_assert(
-    std::is_same<FastIntegerArithmeticPromotion<uintmax_t, int8_t>::type,
-                 uintmax_t>::value,
-    "");
-static_assert(FastIntegerArithmeticPromotion<int16_t, int8_t>::is_contained,
-              "");
-static_assert(FastIntegerArithmeticPromotion<int32_t, uint32_t>::is_contained,
-              "");
-static_assert(!FastIntegerArithmeticPromotion<intmax_t, int8_t>::is_contained,
-              "");
-static_assert(!FastIntegerArithmeticPromotion<uintmax_t, int8_t>::is_contained,
-              "");
-
-template <typename U>
-U GetNumericValueForTest(const CheckedNumeric<U>& src) {
-  return src.state_.value();
-}
-}  // namespace internal.
-}  // namespace base.
-
-using base::internal::GetNumericValueForTest;
-
-// Logs the ValueOrDie() failure instead of crashing.
-struct LogOnFailure {
-  template <typename T>
-  static T HandleFailure() {
-    LOG(WARNING) << "ValueOrDie() failed unexpectedly.";
-    return T();
-  }
-};
-
 // Helper macros to wrap displaying the conversion types and line numbers.
 #define TEST_EXPECTED_VALIDITY(expected, actual)                           \
-  EXPECT_EQ(expected, (actual).template Cast<Dst>().IsValid())             \
-      << "Result test: Value " << GetNumericValueForTest(actual) << " as " \
-      << dst << " on line " << line
+  EXPECT_EQ(expected, CheckedNumeric<Dst>(actual).IsValid())               \
+      << "Result test: Value " << +(actual).ValueUnsafe() << " as " << dst \
+      << " on line " << line;
 
 #define TEST_EXPECTED_SUCCESS(actual) TEST_EXPECTED_VALIDITY(true, actual)
 #define TEST_EXPECTED_FAILURE(actual) TEST_EXPECTED_VALIDITY(false, actual)
 
-// We have to handle promotions, so infer the underlying type below from actual.
-#define TEST_EXPECTED_VALUE(expected, actual)                               \
-  EXPECT_EQ(static_cast<typename std::decay<decltype(actual)>::type::type>( \
-                expected),                                                  \
-            ((actual)                                                       \
-                 .template ValueOrDie<                                      \
-                     typename std::decay<decltype(actual)>::type::type,     \
-                     LogOnFailure>()))                                      \
-      << "Result test: Value " << GetNumericValueForTest(actual) << " as "  \
-      << dst << " on line " << line
-
-// Test the simple pointer arithmetic overrides.
-template <typename Dst>
-void TestStrictPointerMath() {
-  Dst dummy_value = 0;
-  Dst* dummy_ptr = &dummy_value;
-  static const Dst kDummyOffset = 2;  // Don't want to go too far.
-  EXPECT_EQ(dummy_ptr + kDummyOffset,
-            dummy_ptr + StrictNumeric<Dst>(kDummyOffset));
-  EXPECT_EQ(dummy_ptr - kDummyOffset,
-            dummy_ptr - StrictNumeric<Dst>(kDummyOffset));
-  EXPECT_NE(dummy_ptr, dummy_ptr + StrictNumeric<Dst>(kDummyOffset));
-  EXPECT_NE(dummy_ptr, dummy_ptr - StrictNumeric<Dst>(kDummyOffset));
-  EXPECT_DEATH_IF_SUPPORTED(
-      dummy_ptr + StrictNumeric<size_t>(std::numeric_limits<size_t>::max()),
-      "");
-}
+#define TEST_EXPECTED_VALUE(expected, actual)                                \
+  EXPECT_EQ(static_cast<Dst>(expected),                                      \
+            CheckedNumeric<Dst>(actual).ValueUnsafe())                       \
+      << "Result test: Value " << +((actual).ValueUnsafe()) << " as " << dst \
+      << " on line " << line;
 
 // Signed integer arithmetic.
 template <typename Dst>
@@ -190,52 +84,34 @@
     typename std::enable_if<numeric_limits<Dst>::is_integer &&
                                 numeric_limits<Dst>::is_signed,
                             int>::type = 0) {
-  using DstLimits = numeric_limits<Dst>;
-  TEST_EXPECTED_FAILURE(-CheckedNumeric<Dst>(DstLimits::lowest()));
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()).Abs());
+  typedef numeric_limits<Dst> DstLimits;
+  TEST_EXPECTED_FAILURE(-CheckedNumeric<Dst>(DstLimits::min()));
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()).Abs());
   TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(-1).Abs());
-  TEST_EXPECTED_VALUE(DstLimits::max(),
-                      MakeCheckedNum(-DstLimits::max()).Abs());
 
   TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::max()) + -1);
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) + -1);
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) +
-                        DstLimits::lowest());
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) + -1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) +
+                        -DstLimits::max());
 
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) - 1);
-  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::lowest()) - -1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) - 1);
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) - -1);
   TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) -
-                        DstLimits::lowest());
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) -
+                        -DstLimits::max());
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) -
                         DstLimits::max());
 
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) * 2);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) * 2);
 
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) / -1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) / -1);
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(-1) / 2);
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) * -1);
-  TEST_EXPECTED_VALUE(DstLimits::max(),
-                      CheckedNumeric<Dst>(DstLimits::lowest() + 1) * Dst(-1));
-  TEST_EXPECTED_VALUE(DstLimits::max(),
-                      CheckedNumeric<Dst>(-1) * Dst(DstLimits::lowest() + 1));
-  TEST_EXPECTED_VALUE(DstLimits::lowest(),
-                      CheckedNumeric<Dst>(DstLimits::lowest()) * Dst(1));
-  TEST_EXPECTED_VALUE(DstLimits::lowest(),
-                      CheckedNumeric<Dst>(1) * Dst(DstLimits::lowest()));
-  TEST_EXPECTED_VALUE(DstLimits::lowest(),
-                      MakeCheckedNum(DstLimits::lowest()).UnsignedAbs());
-  TEST_EXPECTED_VALUE(DstLimits::max(),
-                      MakeCheckedNum(DstLimits::max()).UnsignedAbs());
-  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0).UnsignedAbs());
-  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1).UnsignedAbs());
-  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(-1).UnsignedAbs());
 
   // Modulus is legal only for integers.
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>() % 1);
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % 1);
   TEST_EXPECTED_VALUE(-1, CheckedNumeric<Dst>(-1) % 2);
   TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-1) % -2);
-  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::lowest()) % 2);
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::min()) % 2);
   TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(DstLimits::max()) % 2);
   // Test all the different modulus combinations.
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % CheckedNumeric<Dst>(1));
@@ -243,30 +119,6 @@
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % 1);
   CheckedNumeric<Dst> checked_dst = 1;
   TEST_EXPECTED_VALUE(0, checked_dst %= 1);
-  // Test that div by 0 is avoided but returns invalid result.
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) % 0);
-  // Test bit shifts.
-  volatile Dst negative_one = -1;
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) << negative_one);
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1)
-                        << (IntegerBitsPlusSign<Dst>::value - 1));
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(0)
-                        << IntegerBitsPlusSign<Dst>::value);
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) << 1);
-  TEST_EXPECTED_VALUE(
-      static_cast<Dst>(1) << (IntegerBitsPlusSign<Dst>::value - 2),
-      CheckedNumeric<Dst>(1) << (IntegerBitsPlusSign<Dst>::value - 2));
-  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0)
-                             << (IntegerBitsPlusSign<Dst>::value - 1));
-  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) << 0);
-  TEST_EXPECTED_VALUE(2, CheckedNumeric<Dst>(1) << 1);
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) >>
-                        IntegerBitsPlusSign<Dst>::value);
-  TEST_EXPECTED_VALUE(
-      0, CheckedNumeric<Dst>(1) >> (IntegerBitsPlusSign<Dst>::value - 1));
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) >> negative_one);
-
-  TestStrictPointerMath<Dst>();
 }
 
 // Unsigned integer arithmetic.
@@ -277,30 +129,24 @@
     typename std::enable_if<numeric_limits<Dst>::is_integer &&
                                 !numeric_limits<Dst>::is_signed,
                             int>::type = 0) {
-  using DstLimits = numeric_limits<Dst>;
-  TEST_EXPECTED_SUCCESS(-CheckedNumeric<Dst>(DstLimits::lowest()));
-  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::lowest()).Abs());
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) + -1);
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) - 1);
-  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::lowest()) * 2);
+  typedef numeric_limits<Dst> DstLimits;
+  TEST_EXPECTED_SUCCESS(-CheckedNumeric<Dst>(DstLimits::min()));
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()).Abs());
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) + -1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) - 1);
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::min()) * 2);
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) / 2);
-  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::lowest()).UnsignedAbs());
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()).UnsignedAbs());
   TEST_EXPECTED_SUCCESS(
-      CheckedNumeric<typename std::make_signed<Dst>::type>(
-          std::numeric_limits<typename std::make_signed<Dst>::type>::lowest())
+      CheckedNumeric<typename SignedIntegerForSize<Dst>::type>(
+          std::numeric_limits<typename SignedIntegerForSize<Dst>::type>::min())
           .UnsignedAbs());
-  TEST_EXPECTED_VALUE(DstLimits::lowest(),
-                      MakeCheckedNum(DstLimits::lowest()).UnsignedAbs());
-  TEST_EXPECTED_VALUE(DstLimits::max(),
-                      MakeCheckedNum(DstLimits::max()).UnsignedAbs());
-  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0).UnsignedAbs());
-  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1).UnsignedAbs());
 
   // Modulus is legal only for integers.
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>() % 1);
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % 1);
   TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) % 2);
-  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::lowest()) % 2);
+  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::min()) % 2);
   TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(DstLimits::max()) % 2);
   // Test all the different modulus combinations.
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % CheckedNumeric<Dst>(1));
@@ -308,49 +154,6 @@
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % 1);
   CheckedNumeric<Dst> checked_dst = 1;
   TEST_EXPECTED_VALUE(0, checked_dst %= 1);
-  // Test that div by 0 is avoided but returns invalid result.
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) % 0);
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1)
-                        << IntegerBitsPlusSign<Dst>::value);
-  // Test bit shifts.
-  volatile int negative_one = -1;
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) << negative_one);
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1)
-                        << IntegerBitsPlusSign<Dst>::value);
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(0)
-                        << IntegerBitsPlusSign<Dst>::value);
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) << 1);
-  TEST_EXPECTED_VALUE(
-      static_cast<Dst>(1) << (IntegerBitsPlusSign<Dst>::value - 1),
-      CheckedNumeric<Dst>(1) << (IntegerBitsPlusSign<Dst>::value - 1));
-  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) << 0);
-  TEST_EXPECTED_VALUE(2, CheckedNumeric<Dst>(1) << 1);
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) >>
-                        IntegerBitsPlusSign<Dst>::value);
-  TEST_EXPECTED_VALUE(
-      0, CheckedNumeric<Dst>(1) >> (IntegerBitsPlusSign<Dst>::value - 1));
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) >> negative_one);
-  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) & 1);
-  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) & 0);
-  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0) & 1);
-  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) & 0);
-  TEST_EXPECTED_VALUE(std::numeric_limits<Dst>::max(),
-                      MakeCheckedNum(DstLimits::max()) & -1);
-  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) | 1);
-  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) | 0);
-  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(0) | 1);
-  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0) | 0);
-  TEST_EXPECTED_VALUE(std::numeric_limits<Dst>::max(),
-                      CheckedNumeric<Dst>(0) | static_cast<Dst>(-1));
-  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) ^ 1);
-  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) ^ 0);
-  TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(0) ^ 1);
-  TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0) ^ 0);
-  TEST_EXPECTED_VALUE(std::numeric_limits<Dst>::max(),
-                      CheckedNumeric<Dst>(0) ^ static_cast<Dst>(-1));
-  TEST_EXPECTED_VALUE(DstLimits::max(), ~CheckedNumeric<Dst>(0));
-
-  TestStrictPointerMath<Dst>();
 }
 
 // Floating point arithmetic.
@@ -359,31 +162,32 @@
     const char* dst,
     int line,
     typename std::enable_if<numeric_limits<Dst>::is_iec559, int>::type = 0) {
-  using DstLimits = numeric_limits<Dst>;
-  TEST_EXPECTED_SUCCESS(-CheckedNumeric<Dst>(DstLimits::lowest()));
+  typedef numeric_limits<Dst> DstLimits;
+  TEST_EXPECTED_SUCCESS(-CheckedNumeric<Dst>(DstLimits::min()));
 
-  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::lowest()).Abs());
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()).Abs());
   TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(-1).Abs());
 
-  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::lowest()) + -1);
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) + -1);
   TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::max()) + 1);
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) +
-                        DstLimits::lowest());
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) +
+                        -DstLimits::max());
 
   TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) -
-                        DstLimits::lowest());
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) -
+                        -DstLimits::max());
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) -
                         DstLimits::max());
 
-  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) * 2);
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) * 2);
 
   TEST_EXPECTED_VALUE(-0.5, CheckedNumeric<Dst>(-1.0) / 2);
+  EXPECT_EQ(static_cast<Dst>(1.0), CheckedNumeric<Dst>(1.0).ValueFloating());
 }
 
 // Generic arithmetic tests.
 template <typename Dst>
 static void TestArithmetic(const char* dst, int line) {
-  using DstLimits = numeric_limits<Dst>;
+  typedef numeric_limits<Dst> DstLimits;
 
   EXPECT_EQ(true, CheckedNumeric<Dst>().IsValid());
   EXPECT_EQ(false,
@@ -418,13 +222,11 @@
   TEST_EXPECTED_VALUE(1, checked_dst /= 1);
 
   // Generic negation.
-  if (DstLimits::is_signed) {
-    TEST_EXPECTED_VALUE(0, -CheckedNumeric<Dst>());
-    TEST_EXPECTED_VALUE(-1, -CheckedNumeric<Dst>(1));
-    TEST_EXPECTED_VALUE(1, -CheckedNumeric<Dst>(-1));
-    TEST_EXPECTED_VALUE(static_cast<Dst>(DstLimits::max() * -1),
-                        -CheckedNumeric<Dst>(DstLimits::max()));
-  }
+  TEST_EXPECTED_VALUE(0, -CheckedNumeric<Dst>());
+  TEST_EXPECTED_VALUE(-1, -CheckedNumeric<Dst>(1));
+  TEST_EXPECTED_VALUE(1, -CheckedNumeric<Dst>(-1));
+  TEST_EXPECTED_VALUE(static_cast<Dst>(DstLimits::max() * -1),
+                      -CheckedNumeric<Dst>(DstLimits::max()));
 
   // Generic absolute value.
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>().Abs());
@@ -435,43 +237,32 @@
   // Generic addition.
   TEST_EXPECTED_VALUE(1, (CheckedNumeric<Dst>() + 1));
   TEST_EXPECTED_VALUE(2, (CheckedNumeric<Dst>(1) + 1));
-  if (numeric_limits<Dst>::is_signed)
-    TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(-1) + 1));
-  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::lowest()) + 1);
+  TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(-1) + 1));
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) + 1);
   TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) +
                         DstLimits::max());
 
   // Generic subtraction.
+  TEST_EXPECTED_VALUE(-1, (CheckedNumeric<Dst>() - 1));
   TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(1) - 1));
+  TEST_EXPECTED_VALUE(-2, (CheckedNumeric<Dst>(-1) - 1));
   TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::max()) - 1);
-  if (numeric_limits<Dst>::is_signed) {
-    TEST_EXPECTED_VALUE(-1, (CheckedNumeric<Dst>() - 1));
-    TEST_EXPECTED_VALUE(-2, (CheckedNumeric<Dst>(-1) - 1));
-  } else {
-    TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) - -1);
-  }
 
   // Generic multiplication.
   TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>() * 1));
   TEST_EXPECTED_VALUE(1, (CheckedNumeric<Dst>(1) * 1));
+  TEST_EXPECTED_VALUE(-2, (CheckedNumeric<Dst>(-1) * 2));
   TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(0) * 0));
-  if (numeric_limits<Dst>::is_signed) {
-    TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(-1) * 0));
-    TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(0) * -1));
-    TEST_EXPECTED_VALUE(-2, (CheckedNumeric<Dst>(-1) * 2));
-  } else {
-    TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) * -2);
-    TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) *
-                          CheckedNumeric<uintmax_t>(-2));
-  }
+  TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(-1) * 0));
+  TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(0) * -1));
   TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) *
                         DstLimits::max());
 
   // Generic division.
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>() / 1);
   TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) / 1);
-  TEST_EXPECTED_VALUE(DstLimits::lowest() / 2,
-                      CheckedNumeric<Dst>(DstLimits::lowest()) / 2);
+  TEST_EXPECTED_VALUE(DstLimits::min() / 2,
+                      CheckedNumeric<Dst>(DstLimits::min()) / 2);
   TEST_EXPECTED_VALUE(DstLimits::max() / 2,
                       CheckedNumeric<Dst>(DstLimits::max()) / 2);
 
@@ -513,114 +304,28 @@
 template <typename Dst, typename Src, NumericConversionType conversion>
 struct TestNumericConversion {};
 
-enum RangeConstraint {
-  RANGE_VALID = 0x0,      // Value can be represented by the destination type.
-  RANGE_UNDERFLOW = 0x1,  // Value would underflow.
-  RANGE_OVERFLOW = 0x2,   // Value would overflow.
-  RANGE_INVALID = RANGE_UNDERFLOW | RANGE_OVERFLOW  // Invalid (i.e. NaN).
-};
-
-// These are some wrappers to make the tests a bit cleaner.
-constexpr RangeConstraint RangeCheckToEnum(const RangeCheck constraint) {
-  return static_cast<RangeConstraint>(
-      static_cast<int>(constraint.IsOverflowFlagSet()) << 1 |
-      static_cast<int>(constraint.IsUnderflowFlagSet()));
-}
-
 // EXPECT_EQ wrappers providing specific detail on test failures.
-#define TEST_EXPECTED_RANGE(expected, actual)                               \
-  EXPECT_EQ(expected,                                                       \
-            RangeCheckToEnum(                                               \
-                base::internal::DstRangeRelationToSrcRange<Dst>(actual)))   \
-      << "Conversion test: " << src << " value " << actual << " to " << dst \
-      << " on line " << line
-
-template <typename Dst, typename Src>
-void TestStrictComparison() {
-  using DstLimits = numeric_limits<Dst>;
-  using SrcLimits = numeric_limits<Src>;
-  static_assert(StrictNumeric<Src>(SrcLimits::lowest()) < DstLimits::max(), "");
-  static_assert(StrictNumeric<Src>(SrcLimits::lowest()) < SrcLimits::max(), "");
-  static_assert(!(StrictNumeric<Src>(SrcLimits::lowest()) >= DstLimits::max()),
-                "");
-  static_assert(!(StrictNumeric<Src>(SrcLimits::lowest()) >= SrcLimits::max()),
-                "");
-  static_assert(StrictNumeric<Src>(SrcLimits::lowest()) <= DstLimits::max(),
-                "");
-  static_assert(StrictNumeric<Src>(SrcLimits::lowest()) <= SrcLimits::max(),
-                "");
-  static_assert(!(StrictNumeric<Src>(SrcLimits::lowest()) > DstLimits::max()),
-                "");
-  static_assert(!(StrictNumeric<Src>(SrcLimits::lowest()) > SrcLimits::max()),
-                "");
-  static_assert(StrictNumeric<Src>(SrcLimits::max()) > DstLimits::lowest(), "");
-  static_assert(StrictNumeric<Src>(SrcLimits::max()) > SrcLimits::lowest(), "");
-  static_assert(!(StrictNumeric<Src>(SrcLimits::max()) <= DstLimits::lowest()),
-                "");
-  static_assert(!(StrictNumeric<Src>(SrcLimits::max()) <= SrcLimits::lowest()),
-                "");
-  static_assert(StrictNumeric<Src>(SrcLimits::max()) >= DstLimits::lowest(),
-                "");
-  static_assert(StrictNumeric<Src>(SrcLimits::max()) >= SrcLimits::lowest(),
-                "");
-  static_assert(!(StrictNumeric<Src>(SrcLimits::max()) < DstLimits::lowest()),
-                "");
-  static_assert(!(StrictNumeric<Src>(SrcLimits::max()) < SrcLimits::lowest()),
-                "");
-  static_assert(StrictNumeric<Src>(static_cast<Src>(1)) == static_cast<Dst>(1),
-                "");
-  static_assert(StrictNumeric<Src>(static_cast<Src>(1)) != static_cast<Dst>(0),
-                "");
-  static_assert(StrictNumeric<Src>(SrcLimits::max()) != static_cast<Dst>(0),
-                "");
-  static_assert(StrictNumeric<Src>(SrcLimits::max()) != DstLimits::lowest(),
-                "");
-  static_assert(
-      !(StrictNumeric<Src>(static_cast<Src>(1)) != static_cast<Dst>(1)), "");
-  static_assert(
-      !(StrictNumeric<Src>(static_cast<Src>(1)) == static_cast<Dst>(0)), "");
-
-  // Due to differences in float handling between compilers, these aren't
-  // compile-time constants everywhere. So, we use run-time tests.
-  EXPECT_EQ(
-      SrcLimits::max(),
-      MakeCheckedNum(SrcLimits::max()).Max(DstLimits::lowest()).ValueOrDie());
-  EXPECT_EQ(
-      DstLimits::max(),
-      MakeCheckedNum(SrcLimits::lowest()).Max(DstLimits::max()).ValueOrDie());
-  EXPECT_EQ(
-      DstLimits::lowest(),
-      MakeCheckedNum(SrcLimits::max()).Min(DstLimits::lowest()).ValueOrDie());
-  EXPECT_EQ(
-      SrcLimits::lowest(),
-      MakeCheckedNum(SrcLimits::lowest()).Min(DstLimits::max()).ValueOrDie());
-  EXPECT_EQ(SrcLimits::lowest(), CheckMin(MakeStrictNum(1), MakeCheckedNum(0),
-                                          DstLimits::max(), SrcLimits::lowest())
-                                     .ValueOrDie());
-  EXPECT_EQ(DstLimits::max(), CheckMax(MakeStrictNum(1), MakeCheckedNum(0),
-                                       DstLimits::max(), SrcLimits::lowest())
-                                  .ValueOrDie());
-}
+#define TEST_EXPECTED_RANGE(expected, actual)                                  \
+  EXPECT_EQ(expected, base::internal::DstRangeRelationToSrcRange<Dst>(actual)) \
+      << "Conversion test: " << src << " value " << actual << " to " << dst    \
+      << " on line " << line;
 
 template <typename Dst, typename Src>
 struct TestNumericConversion<Dst, Src, SIGN_PRESERVING_VALUE_PRESERVING> {
   static void Test(const char *dst, const char *src, int line) {
-    using SrcLimits = numeric_limits<Src>;
-    using DstLimits = numeric_limits<Dst>;
-    // Integral to floating.
+    typedef numeric_limits<Src> SrcLimits;
+    typedef numeric_limits<Dst> DstLimits;
+                   // Integral to floating.
     static_assert((DstLimits::is_iec559 && SrcLimits::is_integer) ||
-                      // Not floating to integral and...
-                      (!(DstLimits::is_integer && SrcLimits::is_iec559) &&
-                       // Same sign, same numeric, source is narrower or same.
-                       ((SrcLimits::is_signed == DstLimits::is_signed &&
-                         MaxExponent<Dst>::value >= MaxExponent<Src>::value) ||
-                        // Or signed destination and source is smaller
-                        (DstLimits::is_signed &&
-                         MaxExponent<Dst>::value >= MaxExponent<Src>::value))),
+                  // Not floating to integral and...
+                  (!(DstLimits::is_integer && SrcLimits::is_iec559) &&
+                   // Same sign, same numeric, source is narrower or same.
+                   ((SrcLimits::is_signed == DstLimits::is_signed &&
+                    sizeof(Dst) >= sizeof(Src)) ||
+                   // Or signed destination and source is smaller
+                    (DstLimits::is_signed && sizeof(Dst) > sizeof(Src)))),
                   "Comparison must be sign preserving and value preserving");
 
-    TestStrictComparison<Dst, Src>();
-
     const CheckedNumeric<Dst> checked_dst = SrcLimits::max();
     TEST_EXPECTED_SUCCESS(checked_dst);
     if (MaxExponent<Dst>::value > MaxExponent<Src>::value) {
@@ -645,7 +350,7 @@
       TEST_EXPECTED_RANGE(RANGE_INVALID, SrcLimits::quiet_NaN());
     } else if (numeric_limits<Src>::is_signed) {
       TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(-1));
-      TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::lowest());
+      TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::min());
     }
   }
 };
@@ -653,15 +358,14 @@
 template <typename Dst, typename Src>
 struct TestNumericConversion<Dst, Src, SIGN_PRESERVING_NARROW> {
   static void Test(const char *dst, const char *src, int line) {
-    using SrcLimits = numeric_limits<Src>;
-    using DstLimits = numeric_limits<Dst>;
+    typedef numeric_limits<Src> SrcLimits;
+    typedef numeric_limits<Dst> DstLimits;
     static_assert(SrcLimits::is_signed == DstLimits::is_signed,
                   "Destination and source sign must be the same");
-    static_assert(MaxExponent<Dst>::value <= MaxExponent<Src>::value,
+    static_assert(sizeof(Dst) < sizeof(Src) ||
+                   (DstLimits::is_integer && SrcLimits::is_iec559),
                   "Destination must be narrower than source");
 
-    TestStrictComparison<Dst, Src>();
-
     const CheckedNumeric<Dst> checked_dst;
     TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::max());
     TEST_EXPECTED_VALUE(1, checked_dst + static_cast<Src>(1));
@@ -685,15 +389,15 @@
         TEST_EXPECTED_RANGE(
             RANGE_VALID,
             static_cast<Src>(GetMaxConvertibleToFloat<Src, Dst>()));
-        TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::lowest()));
+        TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::min()));
       }
     } else if (SrcLimits::is_signed) {
       TEST_EXPECTED_VALUE(-1, checked_dst - static_cast<Src>(1));
-      TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::lowest());
+      TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::min());
       TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(-1));
     } else {
       TEST_EXPECTED_FAILURE(checked_dst - static_cast<Src>(1));
-      TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::lowest());
+      TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::min());
     }
   }
 };
@@ -701,21 +405,19 @@
 template <typename Dst, typename Src>
 struct TestNumericConversion<Dst, Src, SIGN_TO_UNSIGN_WIDEN_OR_EQUAL> {
   static void Test(const char *dst, const char *src, int line) {
-    using SrcLimits = numeric_limits<Src>;
-    using DstLimits = numeric_limits<Dst>;
-    static_assert(MaxExponent<Dst>::value >= MaxExponent<Src>::value,
+    typedef numeric_limits<Src> SrcLimits;
+    typedef numeric_limits<Dst> DstLimits;
+    static_assert(sizeof(Dst) >= sizeof(Src),
                   "Destination must be equal or wider than source.");
     static_assert(SrcLimits::is_signed, "Source must be signed");
     static_assert(!DstLimits::is_signed, "Destination must be unsigned");
 
-    TestStrictComparison<Dst, Src>();
-
     const CheckedNumeric<Dst> checked_dst;
     TEST_EXPECTED_VALUE(SrcLimits::max(), checked_dst + SrcLimits::max());
     TEST_EXPECTED_FAILURE(checked_dst + static_cast<Src>(-1));
-    TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::lowest());
+    TEST_EXPECTED_FAILURE(checked_dst + -SrcLimits::max());
 
-    TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::lowest());
+    TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::min());
     TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::max());
     TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(1));
     TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, static_cast<Src>(-1));
@@ -725,32 +427,24 @@
 template <typename Dst, typename Src>
 struct TestNumericConversion<Dst, Src, SIGN_TO_UNSIGN_NARROW> {
   static void Test(const char *dst, const char *src, int line) {
-    using SrcLimits = numeric_limits<Src>;
-    using DstLimits = numeric_limits<Dst>;
-    static_assert(MaxExponent<Dst>::value < MaxExponent<Src>::value,
+    typedef numeric_limits<Src> SrcLimits;
+    typedef numeric_limits<Dst> DstLimits;
+    static_assert((DstLimits::is_integer && SrcLimits::is_iec559) ||
+                   (sizeof(Dst) < sizeof(Src)),
                   "Destination must be narrower than source.");
     static_assert(SrcLimits::is_signed, "Source must be signed.");
     static_assert(!DstLimits::is_signed, "Destination must be unsigned.");
 
-    TestStrictComparison<Dst, Src>();
-
     const CheckedNumeric<Dst> checked_dst;
     TEST_EXPECTED_VALUE(1, checked_dst + static_cast<Src>(1));
     TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::max());
     TEST_EXPECTED_FAILURE(checked_dst + static_cast<Src>(-1));
-    TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::lowest());
+    TEST_EXPECTED_FAILURE(checked_dst + -SrcLimits::max());
 
     TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::max());
     TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(1));
     TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, static_cast<Src>(-1));
-
-    // Additional saturation tests.
-    EXPECT_EQ(DstLimits::max(), saturated_cast<Dst>(SrcLimits::max())) << src;
-    EXPECT_EQ(DstLimits::lowest(), saturated_cast<Dst>(SrcLimits::lowest()));
-
     if (SrcLimits::is_iec559) {
-      EXPECT_EQ(Dst(0), saturated_cast<Dst>(SrcLimits::quiet_NaN()));
-
       TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::max() * -1);
       TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::infinity());
       TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::infinity() * -1);
@@ -765,10 +459,10 @@
         TEST_EXPECTED_RANGE(
             RANGE_VALID,
             static_cast<Src>(GetMaxConvertibleToFloat<Src, Dst>()));
-        TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::lowest()));
+        TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::min()));
       }
     } else {
-      TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::lowest());
+      TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::min());
     }
   }
 };
@@ -776,27 +470,21 @@
 template <typename Dst, typename Src>
 struct TestNumericConversion<Dst, Src, UNSIGN_TO_SIGN_NARROW_OR_EQUAL> {
   static void Test(const char *dst, const char *src, int line) {
-    using SrcLimits = numeric_limits<Src>;
-    using DstLimits = numeric_limits<Dst>;
-    static_assert(MaxExponent<Dst>::value <= MaxExponent<Src>::value,
+    typedef numeric_limits<Src> SrcLimits;
+    typedef numeric_limits<Dst> DstLimits;
+    static_assert(sizeof(Dst) <= sizeof(Src),
                   "Destination must be narrower or equal to source.");
     static_assert(!SrcLimits::is_signed, "Source must be unsigned.");
     static_assert(DstLimits::is_signed, "Destination must be signed.");
 
-    TestStrictComparison<Dst, Src>();
-
     const CheckedNumeric<Dst> checked_dst;
     TEST_EXPECTED_VALUE(1, checked_dst + static_cast<Src>(1));
     TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::max());
-    TEST_EXPECTED_VALUE(SrcLimits::lowest(), checked_dst + SrcLimits::lowest());
+    TEST_EXPECTED_VALUE(SrcLimits::min(), checked_dst + SrcLimits::min());
 
-    TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::lowest());
+    TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::min());
     TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::max());
     TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(1));
-
-    // Additional saturation tests.
-    EXPECT_EQ(DstLimits::max(), saturated_cast<Dst>(SrcLimits::max()));
-    EXPECT_EQ(Dst(0), saturated_cast<Dst>(SrcLimits::lowest()));
   }
 };
 
@@ -895,43 +583,6 @@
   TEST_NUMERIC_CONVERSION(int, size_t, UNSIGN_TO_SIGN_NARROW_OR_EQUAL);
 }
 
-// A one-off test to ensure StrictNumeric won't resolve to an incorrect type.
-// If this fails we'll just get a compiler error on an ambiguous overload.
-int TestOverload(int) {  // Overload fails.
-  return 0;
-}
-uint8_t TestOverload(uint8_t) {  // Overload fails.
-  return 0;
-}
-size_t TestOverload(size_t) {  // Overload succeeds.
-  return 0;
-}
-
-static_assert(
-    std::is_same<decltype(TestOverload(StrictNumeric<int>())), int>::value,
-    "");
-static_assert(std::is_same<decltype(TestOverload(StrictNumeric<size_t>())),
-                           size_t>::value,
-              "");
-
-template <typename T>
-struct CastTest1 {
-  static constexpr T NaN() { return -1; }
-  static constexpr T max() { return numeric_limits<T>::max() - 1; }
-  static constexpr T Overflow() { return max(); }
-  static constexpr T lowest() { return numeric_limits<T>::lowest() + 1; }
-  static constexpr T Underflow() { return lowest(); }
-};
-
-template <typename T>
-struct CastTest2 {
-  static constexpr T NaN() { return 11; }
-  static constexpr T max() { return 10; }
-  static constexpr T Overflow() { return max(); }
-  static constexpr T lowest() { return 1; }
-  static constexpr T Underflow() { return lowest(); }
-};
-
 TEST(SafeNumerics, CastTests) {
 // MSVC catches and warns that we're forcing saturation in these tests.
 // Since that's intentional, we need to shut this warning off.
@@ -945,7 +596,7 @@
   double double_large = numeric_limits<double>::max();
   double double_infinity = numeric_limits<float>::infinity();
   double double_large_int = numeric_limits<int>::max();
-  double double_small_int = numeric_limits<int>::lowest();
+  double double_small_int = numeric_limits<int>::min();
 
   // Just test that the casts compile, since the other tests cover logic.
   EXPECT_EQ(0, checked_cast<int>(static_cast<size_t>(0)));
@@ -961,9 +612,9 @@
   EXPECT_FALSE(CheckedNumeric<unsigned>(StrictNumeric<int>(-1)).IsValid());
 
   EXPECT_TRUE(IsValueNegative(-1));
-  EXPECT_TRUE(IsValueNegative(numeric_limits<int>::lowest()));
-  EXPECT_FALSE(IsValueNegative(numeric_limits<unsigned>::lowest()));
-  EXPECT_TRUE(IsValueNegative(numeric_limits<double>::lowest()));
+  EXPECT_TRUE(IsValueNegative(numeric_limits<int>::min()));
+  EXPECT_FALSE(IsValueNegative(numeric_limits<unsigned>::min()));
+  EXPECT_TRUE(IsValueNegative(-numeric_limits<double>::max()));
   EXPECT_FALSE(IsValueNegative(0));
   EXPECT_FALSE(IsValueNegative(1));
   EXPECT_FALSE(IsValueNegative(0u));
@@ -990,83 +641,27 @@
   EXPECT_EQ(saturated_cast<int>(double_large), numeric_limits<int>::max());
   EXPECT_EQ(saturated_cast<float>(double_large), double_infinity);
   EXPECT_EQ(saturated_cast<float>(-double_large), -double_infinity);
-  EXPECT_EQ(numeric_limits<int>::lowest(),
-            saturated_cast<int>(double_small_int));
+  EXPECT_EQ(numeric_limits<int>::min(), saturated_cast<int>(double_small_int));
   EXPECT_EQ(numeric_limits<int>::max(), saturated_cast<int>(double_large_int));
 
-  // Test the saturated cast overrides.
-  using FloatLimits = numeric_limits<float>;
-  using IntLimits = numeric_limits<int>;
-  EXPECT_EQ(-1, (saturated_cast<int, CastTest1>(FloatLimits::quiet_NaN())));
-  EXPECT_EQ(CastTest1<int>::max(),
-            (saturated_cast<int, CastTest1>(FloatLimits::infinity())));
-  EXPECT_EQ(CastTest1<int>::max(),
-            (saturated_cast<int, CastTest1>(FloatLimits::max())));
-  EXPECT_EQ(CastTest1<int>::max(),
-            (saturated_cast<int, CastTest1>(float(IntLimits::max()))));
-  EXPECT_EQ(CastTest1<int>::lowest(),
-            (saturated_cast<int, CastTest1>(-FloatLimits::infinity())));
-  EXPECT_EQ(CastTest1<int>::lowest(),
-            (saturated_cast<int, CastTest1>(FloatLimits::lowest())));
-  EXPECT_EQ(0, (saturated_cast<int, CastTest1>(0.0)));
-  EXPECT_EQ(1, (saturated_cast<int, CastTest1>(1.0)));
-  EXPECT_EQ(-1, (saturated_cast<int, CastTest1>(-1.0)));
-  EXPECT_EQ(0, (saturated_cast<int, CastTest1>(0)));
-  EXPECT_EQ(1, (saturated_cast<int, CastTest1>(1)));
-  EXPECT_EQ(-1, (saturated_cast<int, CastTest1>(-1)));
-  EXPECT_EQ(CastTest1<int>::lowest(),
-            (saturated_cast<int, CastTest1>(float(IntLimits::lowest()))));
-  EXPECT_EQ(11, (saturated_cast<int, CastTest2>(FloatLimits::quiet_NaN())));
-  EXPECT_EQ(10, (saturated_cast<int, CastTest2>(FloatLimits::infinity())));
-  EXPECT_EQ(10, (saturated_cast<int, CastTest2>(FloatLimits::max())));
-  EXPECT_EQ(1, (saturated_cast<int, CastTest2>(-FloatLimits::infinity())));
-  EXPECT_EQ(1, (saturated_cast<int, CastTest2>(FloatLimits::lowest())));
-  EXPECT_EQ(1, (saturated_cast<int, CastTest2>(0U)));
-
   float not_a_number = std::numeric_limits<float>::infinity() -
                        std::numeric_limits<float>::infinity();
   EXPECT_TRUE(std::isnan(not_a_number));
   EXPECT_EQ(0, saturated_cast<int>(not_a_number));
-
-  // Test the CheckedNumeric value extractions functions.
-  auto int8_min = MakeCheckedNum(numeric_limits<int8_t>::lowest());
-  auto int8_max = MakeCheckedNum(numeric_limits<int8_t>::max());
-  auto double_max = MakeCheckedNum(numeric_limits<double>::max());
-  static_assert(
-      std::is_same<int16_t,
-                   decltype(int8_min.ValueOrDie<int16_t>())::type>::value,
-      "ValueOrDie returning incorrect type.");
-  static_assert(
-      std::is_same<int16_t,
-                   decltype(int8_min.ValueOrDefault<int16_t>(0))::type>::value,
-      "ValueOrDefault returning incorrect type.");
-  EXPECT_FALSE(IsValidForType<uint8_t>(int8_min));
-  EXPECT_TRUE(IsValidForType<uint8_t>(int8_max));
-  EXPECT_EQ(static_cast<int>(numeric_limits<int8_t>::lowest()),
-            ValueOrDieForType<int>(int8_min));
-  EXPECT_TRUE(IsValidForType<uint32_t>(int8_max));
-  EXPECT_EQ(static_cast<int>(numeric_limits<int8_t>::max()),
-            ValueOrDieForType<int>(int8_max));
-  EXPECT_EQ(0, ValueOrDefaultForType<int>(double_max, 0));
-  uint8_t uint8_dest = 0;
-  int16_t int16_dest = 0;
-  double double_dest = 0;
-  EXPECT_TRUE(int8_max.AssignIfValid(&uint8_dest));
-  EXPECT_EQ(static_cast<uint8_t>(numeric_limits<int8_t>::max()), uint8_dest);
-  EXPECT_FALSE(int8_min.AssignIfValid(&uint8_dest));
-  EXPECT_TRUE(int8_max.AssignIfValid(&int16_dest));
-  EXPECT_EQ(static_cast<int16_t>(numeric_limits<int8_t>::max()), int16_dest);
-  EXPECT_TRUE(int8_min.AssignIfValid(&int16_dest));
-  EXPECT_EQ(static_cast<int16_t>(numeric_limits<int8_t>::lowest()), int16_dest);
-  EXPECT_FALSE(double_max.AssignIfValid(&uint8_dest));
-  EXPECT_FALSE(double_max.AssignIfValid(&int16_dest));
-  EXPECT_TRUE(double_max.AssignIfValid(&double_dest));
-  EXPECT_EQ(numeric_limits<double>::max(), double_dest);
-  EXPECT_EQ(1, checked_cast<int>(StrictNumeric<int>(1)));
-  EXPECT_EQ(1, saturated_cast<int>(StrictNumeric<int>(1)));
-  EXPECT_EQ(1, strict_cast<int>(StrictNumeric<int>(1)));
 }
 
+#if GTEST_HAS_DEATH_TEST
+
+TEST(SafeNumerics, SaturatedCastChecks) {
+  float not_a_number = std::numeric_limits<float>::infinity() -
+                       std::numeric_limits<float>::infinity();
+  EXPECT_TRUE(std::isnan(not_a_number));
+  EXPECT_DEATH((saturated_cast<int, base::SaturatedCastNaNBehaviorCheck>(
+      not_a_number)), "");
+}
+
+#endif  // GTEST_HAS_DEATH_TEST
+
 TEST(SafeNumerics, IsValueInRangeForNumericType) {
   EXPECT_TRUE(IsValueInRangeForNumericType<uint32_t>(0));
   EXPECT_TRUE(IsValueInRangeForNumericType<uint32_t>(1));
@@ -1077,9 +672,9 @@
   EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(UINT64_C(0x100000000)));
   EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(UINT64_C(0x100000001)));
   EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(
-      std::numeric_limits<int32_t>::lowest()));
+      std::numeric_limits<int32_t>::min()));
   EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(
-      std::numeric_limits<int64_t>::lowest()));
+      std::numeric_limits<int64_t>::min()));
 
   EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(0));
   EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(1));
@@ -1093,13 +688,13 @@
   EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(INT64_C(0xffffffff)));
   EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(INT64_C(0x100000000)));
   EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(
-      std::numeric_limits<int32_t>::lowest()));
+      std::numeric_limits<int32_t>::min()));
   EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(
-      static_cast<int64_t>(std::numeric_limits<int32_t>::lowest())));
+      static_cast<int64_t>(std::numeric_limits<int32_t>::min())));
   EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(
-      static_cast<int64_t>(std::numeric_limits<int32_t>::lowest()) - 1));
+      static_cast<int64_t>(std::numeric_limits<int32_t>::min()) - 1));
   EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(
-      std::numeric_limits<int64_t>::lowest()));
+      std::numeric_limits<int64_t>::min()));
 
   EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(0));
   EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(1));
@@ -1110,10 +705,10 @@
   EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(UINT64_C(0x100000000)));
   EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(UINT64_C(0x100000001)));
   EXPECT_FALSE(IsValueInRangeForNumericType<uint64_t>(
-      std::numeric_limits<int32_t>::lowest()));
+      std::numeric_limits<int32_t>::min()));
   EXPECT_FALSE(IsValueInRangeForNumericType<uint64_t>(INT64_C(-1)));
   EXPECT_FALSE(IsValueInRangeForNumericType<uint64_t>(
-      std::numeric_limits<int64_t>::lowest()));
+      std::numeric_limits<int64_t>::min()));
 
   EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(0));
   EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(1));
@@ -1135,11 +730,11 @@
   EXPECT_FALSE(
       IsValueInRangeForNumericType<int64_t>(UINT64_C(0xffffffffffffffff)));
   EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(
-      std::numeric_limits<int32_t>::lowest()));
+      std::numeric_limits<int32_t>::min()));
   EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(
-      static_cast<int64_t>(std::numeric_limits<int32_t>::lowest())));
+      static_cast<int64_t>(std::numeric_limits<int32_t>::min())));
   EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(
-      std::numeric_limits<int64_t>::lowest()));
+      std::numeric_limits<int64_t>::min()));
 }
 
 TEST(SafeNumerics, CompoundNumericOperations) {
@@ -1165,22 +760,3 @@
   too_large /= d;
   EXPECT_FALSE(too_large.IsValid());
 }
-
-TEST(SafeNumerics, VariadicNumericOperations) {
-  auto a = CheckAdd(1, 2UL, MakeCheckedNum(3LL), 4).ValueOrDie();
-  EXPECT_EQ(static_cast<decltype(a)::type>(10), a);
-  auto b = CheckSub(MakeCheckedNum(20.0), 2UL, 4).ValueOrDie();
-  EXPECT_EQ(static_cast<decltype(b)::type>(14.0), b);
-  auto c = CheckMul(20.0, MakeCheckedNum(1), 5, 3UL).ValueOrDie();
-  EXPECT_EQ(static_cast<decltype(c)::type>(300.0), c);
-  auto d = CheckDiv(20.0, 2.0, MakeCheckedNum(5LL), -4).ValueOrDie();
-  EXPECT_EQ(static_cast<decltype(d)::type>(-.5), d);
-  auto e = CheckMod(MakeCheckedNum(20), 3).ValueOrDie();
-  EXPECT_EQ(static_cast<decltype(e)::type>(2), e);
-  auto f = CheckLsh(1, MakeCheckedNum(2)).ValueOrDie();
-  EXPECT_EQ(static_cast<decltype(f)::type>(4), f);
-  auto g = CheckRsh(4, MakeCheckedNum(2)).ValueOrDie();
-  EXPECT_EQ(static_cast<decltype(g)::type>(1), g);
-  auto h = CheckRsh(CheckAdd(1, 1, 1, 1), CheckSub(4, 2)).ValueOrDie();
-  EXPECT_EQ(static_cast<decltype(h)::type>(1), h);
-}
diff --git a/base/observer_list.h b/base/observer_list.h
index 0572ba6..afe1f46 100644
--- a/base/observer_list.h
+++ b/base/observer_list.h
@@ -11,7 +11,6 @@
 #include <limits>
 #include <vector>
 
-#include "base/gtest_prod_util.h"
 #include "base/logging.h"
 #include "base/macros.h"
 #include "base/memory/weak_ptr.h"
@@ -47,14 +46,11 @@
 //     }
 //
 //     void NotifyFoo() {
-//       for (auto& observer : observer_list_)
-//         observer.OnFoo(this);
+//       FOR_EACH_OBSERVER(Observer, observer_list_, OnFoo(this));
 //     }
 //
 //     void NotifyBar(int x, int y) {
-//       for (FooList::iterator i = observer_list.begin(),
-//           e = observer_list.end(); i != e; ++i)
-//        i->OnBar(this, x, y);
+//       FOR_EACH_OBSERVER(Observer, observer_list_, OnBar(this, x, y));
 //     }
 //
 //    private:
@@ -84,66 +80,20 @@
     NOTIFY_EXISTING_ONLY
   };
 
-  // An iterator class that can be used to access the list of observers.
-  template <class ContainerType>
-  class Iter {
+  // An iterator class that can be used to access the list of observers.  See
+  // also the FOR_EACH_OBSERVER macro defined below.
+  class Iterator {
    public:
-    Iter();
-    explicit Iter(ContainerType* list);
-    ~Iter();
-
-    // A workaround for C2244. MSVC requires fully qualified type name for
-    // return type on a function definition to match a function declaration.
-    using ThisType =
-        typename ObserverListBase<ObserverType>::template Iter<ContainerType>;
-
-    bool operator==(const Iter& other) const;
-    bool operator!=(const Iter& other) const;
-    ThisType& operator++();
-    ObserverType* operator->() const;
-    ObserverType& operator*() const;
+    explicit Iterator(ObserverListBase<ObserverType>* list);
+    ~Iterator();
+    ObserverType* GetNext();
 
    private:
-    FRIEND_TEST_ALL_PREFIXES(ObserverListTest, BasicStdIterator);
-    FRIEND_TEST_ALL_PREFIXES(ObserverListTest, StdIteratorRemoveFront);
-
-    ObserverType* GetCurrent() const;
-    void EnsureValidIndex();
-
-    size_t clamped_max_index() const {
-      return std::min(max_index_, list_->observers_.size());
-    }
-
-    bool is_end() const { return !list_ || index_ == clamped_max_index(); }
-
     WeakPtr<ObserverListBase<ObserverType>> list_;
-    // When initially constructed and each time the iterator is incremented,
-    // |index_| is guaranteed to point to a non-null index if the iterator
-    // has not reached the end of the ObserverList.
     size_t index_;
     size_t max_index_;
   };
 
-  using Iterator = Iter<ObserverListBase<ObserverType>>;
-
-  using iterator = Iter<ObserverListBase<ObserverType>>;
-  iterator begin() {
-    // An optimization: do not involve weak pointers for empty list.
-    // Note: can't use ?: operator here due to some MSVC bug (unit tests fail)
-    if (observers_.empty())
-      return iterator();
-    return iterator(this);
-  }
-  iterator end() { return iterator(); }
-
-  using const_iterator = Iter<const ObserverListBase<ObserverType>>;
-  const_iterator begin() const {
-    if (observers_.empty())
-      return const_iterator();
-    return const_iterator(this);
-  }
-  const_iterator end() const { return const_iterator(); }
-
   ObserverListBase() : notify_depth_(0), type_(NOTIFY_ALL) {}
   explicit ObserverListBase(NotificationType type)
       : notify_depth_(0), type_(type) {}
@@ -174,99 +124,37 @@
   int notify_depth_;
   NotificationType type_;
 
-  template <class ContainerType>
-  friend class Iter;
+  friend class ObserverListBase::Iterator;
 
   DISALLOW_COPY_AND_ASSIGN(ObserverListBase);
 };
 
 template <class ObserverType>
-template <class ContainerType>
-ObserverListBase<ObserverType>::Iter<ContainerType>::Iter()
-    : index_(0), max_index_(0) {}
-
-template <class ObserverType>
-template <class ContainerType>
-ObserverListBase<ObserverType>::Iter<ContainerType>::Iter(ContainerType* list)
-    : list_(const_cast<ObserverListBase<ObserverType>*>(list)->AsWeakPtr()),
+ObserverListBase<ObserverType>::Iterator::Iterator(
+    ObserverListBase<ObserverType>* list)
+    : list_(list->AsWeakPtr()),
       index_(0),
       max_index_(list->type_ == NOTIFY_ALL ? std::numeric_limits<size_t>::max()
                                            : list->observers_.size()) {
-  EnsureValidIndex();
-  DCHECK(list_);
   ++list_->notify_depth_;
 }
 
 template <class ObserverType>
-template <class ContainerType>
-ObserverListBase<ObserverType>::Iter<ContainerType>::~Iter() {
-  if (list_ && --list_->notify_depth_ == 0)
+ObserverListBase<ObserverType>::Iterator::~Iterator() {
+  if (list_.get() && --list_->notify_depth_ == 0)
     list_->Compact();
 }
 
 template <class ObserverType>
-template <class ContainerType>
-bool ObserverListBase<ObserverType>::Iter<ContainerType>::operator==(
-    const Iter& other) const {
-  if (is_end() && other.is_end())
-    return true;
-  return list_.get() == other.list_.get() && index_ == other.index_;
-}
-
-template <class ObserverType>
-template <class ContainerType>
-bool ObserverListBase<ObserverType>::Iter<ContainerType>::operator!=(
-    const Iter& other) const {
-  return !operator==(other);
-}
-
-template <class ObserverType>
-template <class ContainerType>
-typename ObserverListBase<ObserverType>::template Iter<ContainerType>&
-    ObserverListBase<ObserverType>::Iter<ContainerType>::operator++() {
-  if (list_) {
-    ++index_;
-    EnsureValidIndex();
-  }
-  return *this;
-}
-
-template <class ObserverType>
-template <class ContainerType>
-ObserverType* ObserverListBase<ObserverType>::Iter<ContainerType>::operator->()
-    const {
-  ObserverType* current = GetCurrent();
-  DCHECK(current);
-  return current;
-}
-
-template <class ObserverType>
-template <class ContainerType>
-ObserverType& ObserverListBase<ObserverType>::Iter<ContainerType>::operator*()
-    const {
-  ObserverType* current = GetCurrent();
-  DCHECK(current);
-  return *current;
-}
-
-template <class ObserverType>
-template <class ContainerType>
-ObserverType* ObserverListBase<ObserverType>::Iter<ContainerType>::GetCurrent()
-    const {
-  if (!list_)
+ObserverType* ObserverListBase<ObserverType>::Iterator::GetNext() {
+  if (!list_.get())
     return nullptr;
-  return index_ < clamped_max_index() ? list_->observers_[index_] : nullptr;
-}
-
-template <class ObserverType>
-template <class ContainerType>
-void ObserverListBase<ObserverType>::Iter<ContainerType>::EnsureValidIndex() {
-  if (!list_)
-    return;
-
-  size_t max_index = clamped_max_index();
-  while (index_ < max_index && !list_->observers_[index_])
+  ListType& observers = list_->observers_;
+  // Advance if the current element is null
+  size_t max_index = std::min(max_index_, observers.size());
+  while (index_ < max_index && !observers[index_])
     ++index_;
+  return index_ < max_index ? observers[index_++] : nullptr;
 }
 
 template <class ObserverType>
@@ -317,8 +205,9 @@
 
 template <class ObserverType>
 void ObserverListBase<ObserverType>::Compact() {
-  observers_.erase(std::remove(observers_.begin(), observers_.end(), nullptr),
-                   observers_.end());
+  observers_.erase(
+      std::remove(observers_.begin(), observers_.end(), nullptr),
+      observers_.end());
 }
 
 template <class ObserverType, bool check_empty = false>
@@ -344,6 +233,17 @@
   }
 };
 
+#define FOR_EACH_OBSERVER(ObserverType, observer_list, func)             \
+  do {                                                                   \
+    if ((observer_list).might_have_observers()) {                        \
+      typename base::ObserverListBase<ObserverType>::Iterator            \
+          it_inside_observer_macro(&observer_list);                      \
+      ObserverType* obs;                                                 \
+      while ((obs = it_inside_observer_macro.GetNext()) != nullptr)      \
+        obs->func;                                                       \
+    }                                                                    \
+  } while (0)
+
 }  // namespace base
 
 #endif  // BASE_OBSERVER_LIST_H_
diff --git a/base/observer_list_threadsafe.h b/base/observer_list_threadsafe.h
index afb1010..fe78354 100644
--- a/base/observer_list_threadsafe.h
+++ b/base/observer_list_threadsafe.h
@@ -7,17 +7,17 @@
 
 #include <algorithm>
 #include <map>
-#include <memory>
 #include <tuple>
 
 #include "base/bind.h"
 #include "base/location.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/ptr_util.h"
 #include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop.h"
 #include "base/observer_list.h"
 #include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
 #include "base/threading/platform_thread.h"
 #include "base/threading/thread_task_runner_handle.h"
 
@@ -55,27 +55,56 @@
 ///////////////////////////////////////////////////////////////////////////////
 
 namespace base {
+
+// Forward declaration for ObserverListThreadSafeTraits.
+template <class ObserverType>
+class ObserverListThreadSafe;
+
 namespace internal {
 
-template <typename ObserverType, typename Method>
-struct Dispatcher;
-
-template <typename ObserverType, typename ReceiverType, typename... Params>
-struct Dispatcher<ObserverType, void(ReceiverType::*)(Params...)> {
-  static void Run(void(ReceiverType::* m)(Params...),
-                  Params... params, ObserverType* obj) {
-    (obj->*m)(std::forward<Params>(params)...);
+// An UnboundMethod is a wrapper for a method where the actual object is
+// provided at Run dispatch time.
+template <class T, class Method, class Params>
+class UnboundMethod {
+ public:
+  UnboundMethod(Method m, const Params& p) : m_(m), p_(p) {
+    static_assert((internal::ParamsUseScopedRefptrCorrectly<Params>::value),
+                  "bad unbound method params");
   }
+  void Run(T* obj) const {
+    DispatchToMethod(obj, m_, p_);
+  }
+ private:
+  Method m_;
+  Params p_;
 };
 
 }  // namespace internal
 
+// This class is used to work around VS2005 not accepting:
+//
+// friend class
+//     base::RefCountedThreadSafe<ObserverListThreadSafe<ObserverType>>;
+//
+// Instead of friending the class, we could friend the actual function
+// which calls delete.  However, this ends up being
+// RefCountedThreadSafe::DeleteInternal(), which is private.  So we
+// define our own templated traits class so we can friend it.
+template <class T>
+struct ObserverListThreadSafeTraits {
+  static void Destruct(const ObserverListThreadSafe<T>* x) {
+    delete x;
+  }
+};
+
 template <class ObserverType>
 class ObserverListThreadSafe
-    : public RefCountedThreadSafe<ObserverListThreadSafe<ObserverType>> {
+    : public RefCountedThreadSafe<
+        ObserverListThreadSafe<ObserverType>,
+        ObserverListThreadSafeTraits<ObserverType>> {
  public:
-  using NotificationType =
-      typename ObserverList<ObserverType>::NotificationType;
+  typedef typename ObserverList<ObserverType>::NotificationType
+      NotificationType;
 
   ObserverListThreadSafe()
       : type_(ObserverListBase<ObserverType>::NOTIFY_ALL) {}
@@ -84,19 +113,17 @@
   // Add an observer to the list.  An observer should not be added to
   // the same list more than once.
   void AddObserver(ObserverType* obs) {
-    // If there is no ThreadTaskRunnerHandle, it is impossible to notify on it,
+    // If there is not a current MessageLoop, it is impossible to notify on it,
     // so do not add the observer.
-    if (!ThreadTaskRunnerHandle::IsSet())
+    if (!MessageLoop::current())
       return;
 
     ObserverList<ObserverType>* list = nullptr;
     PlatformThreadId thread_id = PlatformThread::CurrentId();
     {
       AutoLock lock(list_lock_);
-      if (observer_lists_.find(thread_id) == observer_lists_.end()) {
-        observer_lists_[thread_id] =
-            base::MakeUnique<ObserverListContext>(type_);
-      }
+      if (observer_lists_.find(thread_id) == observer_lists_.end())
+        observer_lists_[thread_id] = new ObserverListContext(type_);
       list = &(observer_lists_[thread_id]->list);
     }
     list->AddObserver(obs);
@@ -108,24 +135,32 @@
   // If the observer to be removed is in the list, RemoveObserver MUST
   // be called from the same thread which called AddObserver.
   void RemoveObserver(ObserverType* obs) {
+    ObserverListContext* context = nullptr;
+    ObserverList<ObserverType>* list = nullptr;
     PlatformThreadId thread_id = PlatformThread::CurrentId();
     {
       AutoLock lock(list_lock_);
-      auto it = observer_lists_.find(thread_id);
+      typename ObserversListMap::iterator it = observer_lists_.find(thread_id);
       if (it == observer_lists_.end()) {
         // This will happen if we try to remove an observer on a thread
         // we never added an observer for.
         return;
       }
-      ObserverList<ObserverType>& list = it->second->list;
+      context = it->second;
+      list = &context->list;
 
-      list.RemoveObserver(obs);
-
-      // If that was the last observer in the list, remove the ObserverList
-      // entirely.
-      if (list.size() == 0)
+      // If we're about to remove the last observer from the list,
+      // then we can remove this observer_list entirely.
+      if (list->HasObserver(obs) && list->size() == 1)
         observer_lists_.erase(it);
     }
+    list->RemoveObserver(obs);
+
+    // If RemoveObserver is called from a notification, the size will be
+    // nonzero.  Instead of deleting here, the NotifyWrapper will delete
+    // when it finishes iterating.
+    if (list->size() == 0)
+      delete context;
   }
 
   // Verifies that the list is currently empty (i.e. there are no observers).
@@ -139,25 +174,27 @@
   // Note, these calls are effectively asynchronous.  You cannot assume
   // that at the completion of the Notify call that all Observers have
   // been Notified.  The notification may still be pending delivery.
-  template <typename Method, typename... Params>
+  template <class Method, class... Params>
   void Notify(const tracked_objects::Location& from_here,
-              Method m, Params&&... params) {
-    Callback<void(ObserverType*)> method =
-        Bind(&internal::Dispatcher<ObserverType, Method>::Run,
-             m, std::forward<Params>(params)...);
+              Method m,
+              const Params&... params) {
+    internal::UnboundMethod<ObserverType, Method, std::tuple<Params...>> method(
+        m, std::make_tuple(params...));
 
     AutoLock lock(list_lock_);
     for (const auto& entry : observer_lists_) {
-      ObserverListContext* context = entry.second.get();
+      ObserverListContext* context = entry.second;
       context->task_runner->PostTask(
           from_here,
-          Bind(&ObserverListThreadSafe<ObserverType>::NotifyWrapper,
+          Bind(&ObserverListThreadSafe<ObserverType>::template NotifyWrapper<
+                   Method, std::tuple<Params...>>,
                this, context, method));
     }
   }
 
  private:
-  friend class RefCountedThreadSafe<ObserverListThreadSafe<ObserverType>>;
+  // See comment above ObserverListThreadSafeTraits' definition.
+  friend struct ObserverListThreadSafeTraits<ObserverType>;
 
   struct ObserverListContext {
     explicit ObserverListContext(NotificationType type)
@@ -171,28 +208,35 @@
   };
 
   ~ObserverListThreadSafe() {
+    STLDeleteValues(&observer_lists_);
   }
 
   // Wrapper which is called to fire the notifications for each thread's
   // ObserverList.  This function MUST be called on the thread which owns
   // the unsafe ObserverList.
-  void NotifyWrapper(ObserverListContext* context,
-                     const Callback<void(ObserverType*)>& method) {
+  template <class Method, class Params>
+  void NotifyWrapper(
+      ObserverListContext* context,
+      const internal::UnboundMethod<ObserverType, Method, Params>& method) {
     // Check that this list still needs notifications.
     {
       AutoLock lock(list_lock_);
-      auto it = observer_lists_.find(PlatformThread::CurrentId());
+      typename ObserversListMap::iterator it =
+          observer_lists_.find(PlatformThread::CurrentId());
 
       // The ObserverList could have been removed already.  In fact, it could
       // have been removed and then re-added!  If the master list's loop
       // does not match this one, then we do not need to finish this
       // notification.
-      if (it == observer_lists_.end() || it->second.get() != context)
+      if (it == observer_lists_.end() || it->second != context)
         return;
     }
 
-    for (auto& observer : context->list) {
-      method.Run(&observer);
+    {
+      typename ObserverList<ObserverType>::Iterator it(&context->list);
+      ObserverType* obs;
+      while ((obs = it.GetNext()) != nullptr)
+        method.Run(obs);
     }
 
     // If there are no more observers on the list, we can now delete it.
@@ -202,22 +246,23 @@
         // Remove |list| if it's not already removed.
         // This can happen if multiple observers got removed in a notification.
         // See http://crbug.com/55725.
-        auto it = observer_lists_.find(PlatformThread::CurrentId());
-        if (it != observer_lists_.end() && it->second.get() == context)
+        typename ObserversListMap::iterator it =
+            observer_lists_.find(PlatformThread::CurrentId());
+        if (it != observer_lists_.end() && it->second == context)
           observer_lists_.erase(it);
       }
+      delete context;
     }
   }
 
-  mutable Lock list_lock_;  // Protects the observer_lists_.
-
   // Key by PlatformThreadId because in tests, clients can attempt to remove
-  // observers without a SingleThreadTaskRunner. If this were keyed by
-  // SingleThreadTaskRunner, that operation would be silently ignored, leaving
-  // garbage in the ObserverList.
-  std::map<PlatformThreadId, std::unique_ptr<ObserverListContext>>
-      observer_lists_;
+  // observers without a MessageLoop. If this were keyed by MessageLoop, that
+  // operation would be silently ignored, leaving garbage in the ObserverList.
+  typedef std::map<PlatformThreadId, ObserverListContext*>
+      ObserversListMap;
 
+  mutable Lock list_lock_;  // Protects the observer_lists_.
+  ObserversListMap observer_lists_;
   const NotificationType type_;
 
   DISALLOW_COPY_AND_ASSIGN(ObserverListThreadSafe);
diff --git a/base/observer_list_unittest.cc b/base/observer_list_unittest.cc
index c5e556b..097a2ed 100644
--- a/base/observer_list_unittest.cc
+++ b/base/observer_list_unittest.cc
@@ -22,17 +22,13 @@
  public:
   virtual void Observe(int x) = 0;
   virtual ~Foo() {}
-  virtual int GetValue() const { return 0; }
 };
 
 class Adder : public Foo {
  public:
   explicit Adder(int scaler) : total(0), scaler_(scaler) {}
-  ~Adder() override {}
-
   void Observe(int x) override { total += x * scaler_; }
-  int GetValue() const override { return total; }
-
+  ~Adder() override {}
   int total;
 
  private:
@@ -41,28 +37,16 @@
 
 class Disrupter : public Foo {
  public:
-  Disrupter(ObserverList<Foo>* list, Foo* doomed, bool remove_self)
-      : list_(list), doomed_(doomed), remove_self_(remove_self) {}
   Disrupter(ObserverList<Foo>* list, Foo* doomed)
-      : Disrupter(list, doomed, false) {}
-  Disrupter(ObserverList<Foo>* list, bool remove_self)
-      : Disrupter(list, nullptr, remove_self) {}
-
-  ~Disrupter() override {}
-
-  void Observe(int x) override {
-    if (remove_self_)
-      list_->RemoveObserver(this);
-    if (doomed_)
-      list_->RemoveObserver(doomed_);
+      : list_(list),
+        doomed_(doomed) {
   }
-
-  void SetDoomed(Foo* doomed) { doomed_ = doomed; }
+  ~Disrupter() override {}
+  void Observe(int x) override { list_->RemoveObserver(doomed_); }
 
  private:
   ObserverList<Foo>* list_;
   Foo* doomed_;
-  bool remove_self_;
 };
 
 class ThreadSafeDisrupter : public Foo {
@@ -83,19 +67,21 @@
 class AddInObserve : public Foo {
  public:
   explicit AddInObserve(ObserverListType* observer_list)
-      : observer_list(observer_list), to_add_() {}
-
-  void SetToAdd(Foo* to_add) { to_add_ = to_add; }
+      : added(false),
+        observer_list(observer_list),
+        adder(1) {
+  }
 
   void Observe(int x) override {
-    if (to_add_) {
-      observer_list->AddObserver(to_add_);
-      to_add_ = nullptr;
+    if (!added) {
+      added = true;
+      observer_list->AddObserver(&adder);
     }
   }
 
+  bool added;
   ObserverListType* observer_list;
-  Foo* to_add_;
+  Adder adder;
 };
 
 
@@ -126,6 +112,8 @@
         FROM_HERE,
         base::Bind(&AddRemoveThread::AddTask, weak_factory_.GetWeakPtr()));
     RunLoop().Run();
+    //LOG(ERROR) << "Loop 0x" << std::hex << loop_ << " done. " <<
+    //    count_observes_ << ", " << count_addtask_;
     delete loop_;
     loop_ = reinterpret_cast<MessageLoop*>(0xdeadbeef);
     delete this;
@@ -188,8 +176,6 @@
   base::WeakPtrFactory<AddRemoveThread> weak_factory_;
 };
 
-}  // namespace
-
 TEST(ObserverListTest, BasicTest) {
   ObserverList<Foo> observer_list;
   Adder a(1), b(-1), c(1), d(-1), e(-1);
@@ -201,8 +187,7 @@
   EXPECT_TRUE(observer_list.HasObserver(&a));
   EXPECT_FALSE(observer_list.HasObserver(&c));
 
-  for (auto& observer : observer_list)
-    observer.Observe(10);
+  FOR_EACH_OBSERVER(Foo, observer_list, Observe(10));
 
   observer_list.AddObserver(&evil);
   observer_list.AddObserver(&c);
@@ -211,8 +196,7 @@
   // Removing an observer not in the list should do nothing.
   observer_list.RemoveObserver(&e);
 
-  for (auto& observer : observer_list)
-    observer.Observe(10);
+  FOR_EACH_OBSERVER(Foo, observer_list, Observe(10));
 
   EXPECT_EQ(20, a.total);
   EXPECT_EQ(-20, b.total);
@@ -221,52 +205,6 @@
   EXPECT_EQ(0, e.total);
 }
 
-TEST(ObserverListTest, DisruptSelf) {
-  ObserverList<Foo> observer_list;
-  Adder a(1), b(-1), c(1), d(-1);
-  Disrupter evil(&observer_list, true);
-
-  observer_list.AddObserver(&a);
-  observer_list.AddObserver(&b);
-
-  for (auto& observer : observer_list)
-    observer.Observe(10);
-
-  observer_list.AddObserver(&evil);
-  observer_list.AddObserver(&c);
-  observer_list.AddObserver(&d);
-
-  for (auto& observer : observer_list)
-    observer.Observe(10);
-
-  EXPECT_EQ(20, a.total);
-  EXPECT_EQ(-20, b.total);
-  EXPECT_EQ(10, c.total);
-  EXPECT_EQ(-10, d.total);
-}
-
-TEST(ObserverListTest, DisruptBefore) {
-  ObserverList<Foo> observer_list;
-  Adder a(1), b(-1), c(1), d(-1);
-  Disrupter evil(&observer_list, &b);
-
-  observer_list.AddObserver(&a);
-  observer_list.AddObserver(&b);
-  observer_list.AddObserver(&evil);
-  observer_list.AddObserver(&c);
-  observer_list.AddObserver(&d);
-
-  for (auto& observer : observer_list)
-    observer.Observe(10);
-  for (auto& observer : observer_list)
-    observer.Observe(10);
-
-  EXPECT_EQ(20, a.total);
-  EXPECT_EQ(-10, b.total);
-  EXPECT_EQ(20, c.total);
-  EXPECT_EQ(-20, d.total);
-}
-
 TEST(ObserverListThreadSafeTest, BasicTest) {
   MessageLoop loop;
 
@@ -495,24 +433,20 @@
   ObserverList<Foo> observer_list(ObserverList<Foo>::NOTIFY_EXISTING_ONLY);
   Adder a(1);
   AddInObserve<ObserverList<Foo> > b(&observer_list);
-  Adder c(1);
-  b.SetToAdd(&c);
 
   observer_list.AddObserver(&a);
   observer_list.AddObserver(&b);
 
-  for (auto& observer : observer_list)
-    observer.Observe(1);
+  FOR_EACH_OBSERVER(Foo, observer_list, Observe(1));
 
-  EXPECT_FALSE(b.to_add_);
+  EXPECT_TRUE(b.added);
   // B's adder should not have been notified because it was added during
   // notification.
-  EXPECT_EQ(0, c.total);
+  EXPECT_EQ(0, b.adder.total);
 
   // Notify again to make sure b's adder is notified.
-  for (auto& observer : observer_list)
-    observer.Observe(1);
-  EXPECT_EQ(1, c.total);
+  FOR_EACH_OBSERVER(Foo, observer_list, Observe(1));
+  EXPECT_EQ(1, b.adder.total);
 }
 
 // Same as above, but for ObserverListThreadSafe
@@ -522,8 +456,6 @@
       new ObserverListThreadSafe<Foo>(ObserverList<Foo>::NOTIFY_EXISTING_ONLY));
   Adder a(1);
   AddInObserve<ObserverListThreadSafe<Foo> > b(observer_list.get());
-  Adder c(1);
-  b.SetToAdd(&c);
 
   observer_list->AddObserver(&a);
   observer_list->AddObserver(&b);
@@ -531,15 +463,15 @@
   observer_list->Notify(FROM_HERE, &Foo::Observe, 1);
   RunLoop().RunUntilIdle();
 
-  EXPECT_FALSE(b.to_add_);
+  EXPECT_TRUE(b.added);
   // B's adder should not have been notified because it was added during
   // notification.
-  EXPECT_EQ(0, c.total);
+  EXPECT_EQ(0, b.adder.total);
 
   // Notify again to make sure b's adder is notified.
   observer_list->Notify(FROM_HERE, &Foo::Observe, 1);
   RunLoop().RunUntilIdle();
-  EXPECT_EQ(1, c.total);
+  EXPECT_EQ(1, b.adder.total);
 }
 
 class AddInClearObserve : public Foo {
@@ -569,8 +501,7 @@
 
   observer_list.AddObserver(&a);
 
-  for (auto& observer : observer_list)
-    observer.Observe(1);
+  FOR_EACH_OBSERVER(Foo, observer_list, Observe(1));
   EXPECT_TRUE(a.added());
   EXPECT_EQ(1, a.adder().total)
       << "Adder should observe once and have sum of 1.";
@@ -582,8 +513,7 @@
 
   observer_list.AddObserver(&a);
 
-  for (auto& observer : observer_list)
-    observer.Observe(1);
+  FOR_EACH_OBSERVER(Foo, observer_list, Observe(1));
   EXPECT_TRUE(a.added());
   EXPECT_EQ(0, a.adder().total)
       << "Adder should not observe, so sum should still be 0.";
@@ -606,330 +536,10 @@
   ListDestructor a(observer_list);
   observer_list->AddObserver(&a);
 
-  for (auto& observer : *observer_list)
-    observer.Observe(0);
+  FOR_EACH_OBSERVER(Foo, *observer_list, Observe(0));
   // If this test fails, there'll be Valgrind errors when this function goes out
   // of scope.
 }
 
-TEST(ObserverListTest, BasicStdIterator) {
-  using FooList = ObserverList<Foo>;
-  FooList observer_list;
-
-  // An optimization: begin() and end() do not involve weak pointers on
-  // empty list.
-  EXPECT_FALSE(observer_list.begin().list_);
-  EXPECT_FALSE(observer_list.end().list_);
-
-  // Iterate over empty list: no effect, no crash.
-  for (auto& i : observer_list)
-    i.Observe(10);
-
-  Adder a(1), b(-1), c(1), d(-1);
-
-  observer_list.AddObserver(&a);
-  observer_list.AddObserver(&b);
-  observer_list.AddObserver(&c);
-  observer_list.AddObserver(&d);
-
-  for (FooList::iterator i = observer_list.begin(), e = observer_list.end();
-       i != e; ++i)
-    i->Observe(1);
-
-  EXPECT_EQ(1, a.total);
-  EXPECT_EQ(-1, b.total);
-  EXPECT_EQ(1, c.total);
-  EXPECT_EQ(-1, d.total);
-
-  // Check an iteration over a 'const view' for a given container.
-  const FooList& const_list = observer_list;
-  for (FooList::const_iterator i = const_list.begin(), e = const_list.end();
-       i != e; ++i) {
-    EXPECT_EQ(1, std::abs(i->GetValue()));
-  }
-
-  for (const auto& o : const_list)
-    EXPECT_EQ(1, std::abs(o.GetValue()));
-}
-
-TEST(ObserverListTest, StdIteratorRemoveItself) {
-  ObserverList<Foo> observer_list;
-  Adder a(1), b(-1), c(1), d(-1);
-  Disrupter disrupter(&observer_list, true);
-
-  observer_list.AddObserver(&a);
-  observer_list.AddObserver(&b);
-  observer_list.AddObserver(&disrupter);
-  observer_list.AddObserver(&c);
-  observer_list.AddObserver(&d);
-
-  for (auto& o : observer_list)
-    o.Observe(1);
-
-  for (auto& o : observer_list)
-    o.Observe(10);
-
-  EXPECT_EQ(11, a.total);
-  EXPECT_EQ(-11, b.total);
-  EXPECT_EQ(11, c.total);
-  EXPECT_EQ(-11, d.total);
-}
-
-TEST(ObserverListTest, StdIteratorRemoveBefore) {
-  ObserverList<Foo> observer_list;
-  Adder a(1), b(-1), c(1), d(-1);
-  Disrupter disrupter(&observer_list, &b);
-
-  observer_list.AddObserver(&a);
-  observer_list.AddObserver(&b);
-  observer_list.AddObserver(&disrupter);
-  observer_list.AddObserver(&c);
-  observer_list.AddObserver(&d);
-
-  for (auto& o : observer_list)
-    o.Observe(1);
-
-  for (auto& o : observer_list)
-    o.Observe(10);
-
-  EXPECT_EQ(11, a.total);
-  EXPECT_EQ(-1, b.total);
-  EXPECT_EQ(11, c.total);
-  EXPECT_EQ(-11, d.total);
-}
-
-TEST(ObserverListTest, StdIteratorRemoveAfter) {
-  ObserverList<Foo> observer_list;
-  Adder a(1), b(-1), c(1), d(-1);
-  Disrupter disrupter(&observer_list, &c);
-
-  observer_list.AddObserver(&a);
-  observer_list.AddObserver(&b);
-  observer_list.AddObserver(&disrupter);
-  observer_list.AddObserver(&c);
-  observer_list.AddObserver(&d);
-
-  for (auto& o : observer_list)
-    o.Observe(1);
-
-  for (auto& o : observer_list)
-    o.Observe(10);
-
-  EXPECT_EQ(11, a.total);
-  EXPECT_EQ(-11, b.total);
-  EXPECT_EQ(0, c.total);
-  EXPECT_EQ(-11, d.total);
-}
-
-TEST(ObserverListTest, StdIteratorRemoveAfterFront) {
-  ObserverList<Foo> observer_list;
-  Adder a(1), b(-1), c(1), d(-1);
-  Disrupter disrupter(&observer_list, &a);
-
-  observer_list.AddObserver(&a);
-  observer_list.AddObserver(&disrupter);
-  observer_list.AddObserver(&b);
-  observer_list.AddObserver(&c);
-  observer_list.AddObserver(&d);
-
-  for (auto& o : observer_list)
-    o.Observe(1);
-
-  for (auto& o : observer_list)
-    o.Observe(10);
-
-  EXPECT_EQ(1, a.total);
-  EXPECT_EQ(-11, b.total);
-  EXPECT_EQ(11, c.total);
-  EXPECT_EQ(-11, d.total);
-}
-
-TEST(ObserverListTest, StdIteratorRemoveBeforeBack) {
-  ObserverList<Foo> observer_list;
-  Adder a(1), b(-1), c(1), d(-1);
-  Disrupter disrupter(&observer_list, &d);
-
-  observer_list.AddObserver(&a);
-  observer_list.AddObserver(&b);
-  observer_list.AddObserver(&c);
-  observer_list.AddObserver(&disrupter);
-  observer_list.AddObserver(&d);
-
-  for (auto& o : observer_list)
-    o.Observe(1);
-
-  for (auto& o : observer_list)
-    o.Observe(10);
-
-  EXPECT_EQ(11, a.total);
-  EXPECT_EQ(-11, b.total);
-  EXPECT_EQ(11, c.total);
-  EXPECT_EQ(0, d.total);
-}
-
-TEST(ObserverListTest, StdIteratorRemoveFront) {
-  using FooList = ObserverList<Foo>;
-  FooList observer_list;
-  Adder a(1), b(-1), c(1), d(-1);
-  Disrupter disrupter(&observer_list, true);
-
-  observer_list.AddObserver(&disrupter);
-  observer_list.AddObserver(&a);
-  observer_list.AddObserver(&b);
-  observer_list.AddObserver(&c);
-  observer_list.AddObserver(&d);
-
-  bool test_disruptor = true;
-  for (FooList::iterator i = observer_list.begin(), e = observer_list.end();
-       i != e; ++i) {
-    i->Observe(1);
-    // Check that second call to i->Observe() would crash here.
-    if (test_disruptor) {
-      EXPECT_FALSE(i.GetCurrent());
-      test_disruptor = false;
-    }
-  }
-
-  for (auto& o : observer_list)
-    o.Observe(10);
-
-  EXPECT_EQ(11, a.total);
-  EXPECT_EQ(-11, b.total);
-  EXPECT_EQ(11, c.total);
-  EXPECT_EQ(-11, d.total);
-}
-
-TEST(ObserverListTest, StdIteratorRemoveBack) {
-  ObserverList<Foo> observer_list;
-  Adder a(1), b(-1), c(1), d(-1);
-  Disrupter disrupter(&observer_list, true);
-
-  observer_list.AddObserver(&a);
-  observer_list.AddObserver(&b);
-  observer_list.AddObserver(&c);
-  observer_list.AddObserver(&d);
-  observer_list.AddObserver(&disrupter);
-
-  for (auto& o : observer_list)
-    o.Observe(1);
-
-  for (auto& o : observer_list)
-    o.Observe(10);
-
-  EXPECT_EQ(11, a.total);
-  EXPECT_EQ(-11, b.total);
-  EXPECT_EQ(11, c.total);
-  EXPECT_EQ(-11, d.total);
-}
-
-TEST(ObserverListTest, NestedLoop) {
-  ObserverList<Foo> observer_list;
-  Adder a(1), b(-1), c(1), d(-1);
-  Disrupter disrupter(&observer_list, true);
-
-  observer_list.AddObserver(&disrupter);
-  observer_list.AddObserver(&a);
-  observer_list.AddObserver(&b);
-  observer_list.AddObserver(&c);
-  observer_list.AddObserver(&d);
-
-  for (auto& o : observer_list) {
-    o.Observe(10);
-
-    for (auto& o : observer_list)
-      o.Observe(1);
-  }
-
-  EXPECT_EQ(15, a.total);
-  EXPECT_EQ(-15, b.total);
-  EXPECT_EQ(15, c.total);
-  EXPECT_EQ(-15, d.total);
-}
-
-TEST(ObserverListTest, NonCompactList) {
-  ObserverList<Foo> observer_list;
-  Adder a(1), b(-1);
-
-  Disrupter disrupter1(&observer_list, true);
-  Disrupter disrupter2(&observer_list, true);
-
-  // Disrupt itself and another one.
-  disrupter1.SetDoomed(&disrupter2);
-
-  observer_list.AddObserver(&disrupter1);
-  observer_list.AddObserver(&disrupter2);
-  observer_list.AddObserver(&a);
-  observer_list.AddObserver(&b);
-
-  for (auto& o : observer_list) {
-    // Get the { nullptr, nullptr, &a, &b } non-compact list
-    // on the first inner pass.
-    o.Observe(10);
-
-    for (auto& o : observer_list)
-      o.Observe(1);
-  }
-
-  EXPECT_EQ(13, a.total);
-  EXPECT_EQ(-13, b.total);
-}
-
-TEST(ObserverListTest, BecomesEmptyThanNonEmpty) {
-  ObserverList<Foo> observer_list;
-  Adder a(1), b(-1);
-
-  Disrupter disrupter1(&observer_list, true);
-  Disrupter disrupter2(&observer_list, true);
-
-  // Disrupt itself and another one.
-  disrupter1.SetDoomed(&disrupter2);
-
-  observer_list.AddObserver(&disrupter1);
-  observer_list.AddObserver(&disrupter2);
-
-  bool add_observers = true;
-  for (auto& o : observer_list) {
-    // Get the { nullptr, nullptr } empty list on the first inner pass.
-    o.Observe(10);
-
-    for (auto& o : observer_list)
-      o.Observe(1);
-
-    if (add_observers) {
-      observer_list.AddObserver(&a);
-      observer_list.AddObserver(&b);
-      add_observers = false;
-    }
-  }
-
-  EXPECT_EQ(12, a.total);
-  EXPECT_EQ(-12, b.total);
-}
-
-TEST(ObserverListTest, AddObserverInTheLastObserve) {
-  using FooList = ObserverList<Foo>;
-  FooList observer_list;
-
-  AddInObserve<FooList> a(&observer_list);
-  Adder b(-1);
-
-  a.SetToAdd(&b);
-  observer_list.AddObserver(&a);
-
-  auto it = observer_list.begin();
-  while (it != observer_list.end()) {
-    auto& observer = *it;
-    // Intentionally increment the iterator before calling Observe(). The
-    // ObserverList starts with only one observer, and it == observer_list.end()
-    // should be true after the next line.
-    ++it;
-    // However, the first Observe() call will add a second observer: at this
-    // point, it != observer_list.end() should be true, and Observe() should be
-    // called on the newly added observer on the next iteration of the loop.
-    observer.Observe(10);
-  }
-
-  EXPECT_EQ(-10, b.total);
-}
-
+}  // namespace
 }  // namespace base
diff --git a/base/optional.h b/base/optional.h
index cf65ad7..b468964 100644
--- a/base/optional.h
+++ b/base/optional.h
@@ -8,6 +8,7 @@
 #include <type_traits>
 
 #include "base/logging.h"
+#include "base/memory/aligned_memory.h"
 #include "base/template_util.h"
 
 namespace base {
@@ -34,70 +35,28 @@
 
 template <typename T, bool = base::is_trivially_destructible<T>::value>
 struct OptionalStorage {
-  // Initializing |empty_| here instead of using default member initializing
-  // to avoid errors in g++ 4.8.
-  constexpr OptionalStorage() : empty_('\0') {}
-
-  constexpr explicit OptionalStorage(const T& value)
-      : is_null_(false), value_(value) {}
-
-  // TODO(alshabalin): Can't use 'constexpr' with std::move until C++14.
-  explicit OptionalStorage(T&& value)
-      : is_null_(false), value_(std::move(value)) {}
-
-  // TODO(alshabalin): Can't use 'constexpr' with std::forward until C++14.
-  template <class... Args>
-  explicit OptionalStorage(base::in_place_t, Args&&... args)
-      : is_null_(false), value_(std::forward<Args>(args)...) {}
-
   // When T is not trivially destructible we must call its
   // destructor before deallocating its memory.
   ~OptionalStorage() {
     if (!is_null_)
-      value_.~T();
+      buffer_.template data_as<T>()->~T();
   }
 
   bool is_null_ = true;
-  union {
-    // |empty_| exists so that the union will always be initialized, even when
-    // it doesn't contain a value. Union members must be initialized for the
-    // constructor to be 'constexpr'.
-    char empty_;
-    T value_;
-  };
+  base::AlignedMemory<sizeof(T), ALIGNOF(T)> buffer_;
 };
 
 template <typename T>
 struct OptionalStorage<T, true> {
-  // Initializing |empty_| here instead of using default member initializing
-  // to avoid errors in g++ 4.8.
-  constexpr OptionalStorage() : empty_('\0') {}
-
-  constexpr explicit OptionalStorage(const T& value)
-      : is_null_(false), value_(value) {}
-
-  // TODO(alshabalin): Can't use 'constexpr' with std::move until C++14.
-  explicit OptionalStorage(T&& value)
-      : is_null_(false), value_(std::move(value)) {}
-
-  // TODO(alshabalin): Can't use 'constexpr' with std::forward until C++14.
-  template <class... Args>
-  explicit OptionalStorage(base::in_place_t, Args&&... args)
-      : is_null_(false), value_(std::forward<Args>(args)...) {}
-
-  // When T is trivially destructible (i.e. its destructor does nothing) there
-  // is no need to call it. Explicitly defaulting the destructor means it's not
-  // user-provided. Those two together make this destructor trivial.
+  // When T is trivially destructible (i.e. its destructor does nothing)
+  // there is no need to call it.
+  // Since |base::AlignedMemory| is just an array its destructor
+  // is trivial. Explicitly defaulting the destructor means it's not
+  // user-provided. All of this together make this destructor trivial.
   ~OptionalStorage() = default;
 
   bool is_null_ = true;
-  union {
-    // |empty_| exists so that the union will always be initialized, even when
-    // it doesn't contain a value. Union members must be initialized for the
-    // constructor to be 'constexpr'.
-    char empty_;
-    T value_;
-  };
+  base::AlignedMemory<sizeof(T), ALIGNOF(T)> buffer_;
 };
 
 }  // namespace internal
@@ -120,9 +79,8 @@
  public:
   using value_type = T;
 
-  constexpr Optional() {}
-
-  constexpr Optional(base::nullopt_t) {}
+  constexpr Optional() = default;
+  Optional(base::nullopt_t) : Optional() {}
 
   Optional(const Optional& other) {
     if (!other.storage_.is_null_)
@@ -134,15 +92,14 @@
       Init(std::move(other.value()));
   }
 
-  constexpr Optional(const T& value) : storage_(value) {}
+  Optional(const T& value) { Init(value); }
 
-  // TODO(alshabalin): Can't use 'constexpr' with std::move until C++14.
-  Optional(T&& value) : storage_(std::move(value)) {}
+  Optional(T&& value) { Init(std::move(value)); }
 
-  // TODO(alshabalin): Can't use 'constexpr' with std::forward until C++14.
   template <class... Args>
-  explicit Optional(base::in_place_t, Args&&... args)
-      : storage_(base::in_place, std::forward<Args>(args)...) {}
+  explicit Optional(base::in_place_t, Args&&... args) {
+    emplace(std::forward<Args>(args)...);
+  }
 
   ~Optional() = default;
 
@@ -206,32 +163,30 @@
 
   constexpr explicit operator bool() const { return !storage_.is_null_; }
 
-  constexpr bool has_value() const { return !storage_.is_null_; }
-
   // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
   // meant to be 'constexpr const'.
   T& value() & {
     DCHECK(!storage_.is_null_);
-    return storage_.value_;
+    return *storage_.buffer_.template data_as<T>();
   }
 
   // TODO(mlamouri): can't use 'constexpr' with DCHECK.
   const T& value() const& {
     DCHECK(!storage_.is_null_);
-    return storage_.value_;
+    return *storage_.buffer_.template data_as<T>();
   }
 
   // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
   // meant to be 'constexpr const'.
   T&& value() && {
     DCHECK(!storage_.is_null_);
-    return std::move(storage_.value_);
+    return std::move(*storage_.buffer_.template data_as<T>());
   }
 
   // TODO(mlamouri): can't use 'constexpr' with DCHECK.
   const T&& value() const&& {
     DCHECK(!storage_.is_null_);
-    return std::move(storage_.value_);
+    return std::move(*storage_.buffer_.template data_as<T>());
   }
 
   template <class U>
@@ -262,10 +217,10 @@
 
     if (storage_.is_null_ != other.storage_.is_null_) {
       if (storage_.is_null_) {
-        Init(std::move(other.storage_.value_));
+        Init(std::move(*other.storage_.buffer_.template data_as<T>()));
         other.FreeIfNeeded();
       } else {
-        other.Init(std::move(storage_.value_));
+        other.Init(std::move(*storage_.buffer_.template data_as<T>()));
         FreeIfNeeded();
       }
       return;
@@ -276,10 +231,6 @@
     swap(**this, *other);
   }
 
-  void reset() {
-    FreeIfNeeded();
-  }
-
   template <class... Args>
   void emplace(Args&&... args) {
     FreeIfNeeded();
@@ -289,20 +240,20 @@
  private:
   void Init(const T& value) {
     DCHECK(storage_.is_null_);
-    new (&storage_.value_) T(value);
+    new (storage_.buffer_.void_data()) T(value);
     storage_.is_null_ = false;
   }
 
   void Init(T&& value) {
     DCHECK(storage_.is_null_);
-    new (&storage_.value_) T(std::move(value));
+    new (storage_.buffer_.void_data()) T(std::move(value));
     storage_.is_null_ = false;
   }
 
   template <class... Args>
   void Init(Args&&... args) {
     DCHECK(storage_.is_null_);
-    new (&storage_.value_) T(std::forward<Args>(args)...);
+    new (storage_.buffer_.void_data()) T(std::forward<Args>(args)...);
     storage_.is_null_ = false;
   }
 
@@ -310,20 +261,20 @@
     if (storage_.is_null_)
       Init(value);
     else
-      storage_.value_ = value;
+      *storage_.buffer_.template data_as<T>() = value;
   }
 
   void InitOrAssign(T&& value) {
     if (storage_.is_null_)
       Init(std::move(value));
     else
-      storage_.value_ = std::move(value);
+      *storage_.buffer_.template data_as<T>() = std::move(value);
   }
 
   void FreeIfNeeded() {
     if (storage_.is_null_)
       return;
-    storage_.value_.~T();
+    storage_.buffer_.template data_as<T>()->~T();
     storage_.is_null_ = true;
   }
 
diff --git a/base/optional_unittest.cc b/base/optional_unittest.cc
index 83025e8..d6bf263 100644
--- a/base/optional_unittest.cc
+++ b/base/optional_unittest.cc
@@ -98,7 +98,7 @@
 
 TEST(OptionalTest, DefaultConstructor) {
   {
-    constexpr Optional<float> o;
+    Optional<float> o;
     EXPECT_FALSE(o);
   }
 
@@ -144,28 +144,21 @@
 
 TEST(OptionalTest, ValueConstructor) {
   {
-    constexpr float value = 0.1f;
-    constexpr Optional<float> o(value);
-
+    Optional<float> o(0.1f);
     EXPECT_TRUE(o);
-    EXPECT_EQ(value, o.value());
+    EXPECT_EQ(o.value(), 0.1f);
   }
 
   {
-    std::string value("foo");
-    Optional<std::string> o(value);
-
+    Optional<std::string> o("foo");
     EXPECT_TRUE(o);
-    EXPECT_EQ(value, o.value());
+    EXPECT_EQ(o.value(), "foo");
   }
 
   {
-    TestObject value(3, 0.1);
-    Optional<TestObject> o(value);
-
-    EXPECT_TRUE(o);
-    EXPECT_EQ(TestObject::State::COPY_CONSTRUCTED, o->state());
-    EXPECT_EQ(value, o.value());
+    Optional<TestObject> o(TestObject(3, 0.1));
+    EXPECT_TRUE(!!o);
+    EXPECT_TRUE(o.value() == TestObject(3, 0.1));
   }
 }
 
@@ -205,28 +198,35 @@
 
 TEST(OptionalTest, MoveValueConstructor) {
   {
-    float value = 0.1f;
-    Optional<float> o(std::move(value));
+    Optional<float> first(0.1f);
+    Optional<float> second(std::move(first.value()));
 
-    EXPECT_TRUE(o);
-    EXPECT_EQ(0.1f, o.value());
+    EXPECT_TRUE(second);
+    EXPECT_EQ(second.value(), 0.1f);
+
+    EXPECT_TRUE(first);
   }
 
   {
-    std::string value("foo");
-    Optional<std::string> o(std::move(value));
+    Optional<std::string> first("foo");
+    Optional<std::string> second(std::move(first.value()));
 
-    EXPECT_TRUE(o);
-    EXPECT_EQ("foo", o.value());
+    EXPECT_TRUE(second);
+    EXPECT_EQ("foo", second.value());
+
+    EXPECT_TRUE(first);
   }
 
   {
-    TestObject value(3, 0.1);
-    Optional<TestObject> o(std::move(value));
+    Optional<TestObject> first(TestObject(3, 0.1));
+    Optional<TestObject> second(std::move(first.value()));
 
-    EXPECT_TRUE(o);
-    EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED, o->state());
-    EXPECT_EQ(TestObject(3, 0.1), o.value());
+    EXPECT_TRUE(!!second);
+    EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED, second->state());
+    EXPECT_TRUE(TestObject(3, 0.1) == second.value());
+
+    EXPECT_TRUE(!!first);
+    EXPECT_EQ(TestObject::State::MOVED_FROM, first->state());
   }
 }
 
@@ -251,7 +251,7 @@
 }
 
 TEST(OptionalTest, NulloptConstructor) {
-  constexpr Optional<int> a(base::nullopt);
+  Optional<int> a = base::nullopt;
   EXPECT_FALSE(a);
 }
 
@@ -1298,49 +1298,4 @@
   EXPECT_NE(setOptInt.end(), setOptInt.find(3));
 }
 
-TEST(OptionalTest, HasValue) {
-  Optional<int> a;
-  EXPECT_FALSE(a.has_value());
-
-  a = 42;
-  EXPECT_TRUE(a.has_value());
-
-  a = nullopt;
-  EXPECT_FALSE(a.has_value());
-
-  a = 0;
-  EXPECT_TRUE(a.has_value());
-
-  a = Optional<int>();
-  EXPECT_FALSE(a.has_value());
-}
-
-TEST(OptionalTest, Reset_int) {
-  Optional<int> a(0);
-  EXPECT_TRUE(a.has_value());
-  EXPECT_EQ(0, a.value());
-
-  a.reset();
-  EXPECT_FALSE(a.has_value());
-  EXPECT_EQ(-1, a.value_or(-1));
-}
-
-TEST(OptionalTest, Reset_Object) {
-  Optional<TestObject> a(TestObject(0, 0.1));
-  EXPECT_TRUE(a.has_value());
-  EXPECT_EQ(TestObject(0, 0.1), a.value());
-
-  a.reset();
-  EXPECT_FALSE(a.has_value());
-  EXPECT_EQ(TestObject(42, 0.0), a.value_or(TestObject(42, 0.0)));
-}
-
-TEST(OptionalTest, Reset_NoOp) {
-  Optional<int> a;
-  EXPECT_FALSE(a.has_value());
-
-  a.reset();
-  EXPECT_FALSE(a.has_value());
-}
-
 }  // namespace base
diff --git a/base/pending_task.cc b/base/pending_task.cc
index b2f95b4..73834bd 100644
--- a/base/pending_task.cc
+++ b/base/pending_task.cc
@@ -4,17 +4,22 @@
 
 #include "base/pending_task.h"
 
-#include "base/message_loop/message_loop.h"
 #include "base/tracked_objects.h"
 
 namespace base {
 
 PendingTask::PendingTask(const tracked_objects::Location& posted_from,
-                         OnceClosure task)
-    : PendingTask(posted_from, std::move(task), TimeTicks(), true) {}
+                         base::Closure task)
+    : base::TrackingInfo(posted_from, TimeTicks()),
+      task(std::move(task)),
+      posted_from(posted_from),
+      sequence_num(0),
+      nestable(true),
+      is_high_res(false) {
+}
 
 PendingTask::PendingTask(const tracked_objects::Location& posted_from,
-                         OnceClosure task,
+                         base::Closure task,
                          TimeTicks delayed_run_time,
                          bool nestable)
     : base::TrackingInfo(posted_from, delayed_run_time),
@@ -23,17 +28,6 @@
       sequence_num(0),
       nestable(nestable),
       is_high_res(false) {
-  const PendingTask* parent_task =
-      MessageLoop::current() ? MessageLoop::current()->current_pending_task_
-                             : nullptr;
-  if (parent_task) {
-    task_backtrace[0] = parent_task->posted_from.program_counter();
-    std::copy(parent_task->task_backtrace.begin(),
-              parent_task->task_backtrace.end() - 1,
-              task_backtrace.begin() + 1);
-  } else {
-    task_backtrace.fill(nullptr);
-  }
 }
 
 PendingTask::PendingTask(PendingTask&& other) = default;
diff --git a/base/pending_task.h b/base/pending_task.h
index 7f3fccd..5761653 100644
--- a/base/pending_task.h
+++ b/base/pending_task.h
@@ -5,7 +5,6 @@
 #ifndef BASE_PENDING_TASK_H_
 #define BASE_PENDING_TASK_H_
 
-#include <array>
 #include <queue>
 
 #include "base/base_export.h"
@@ -19,9 +18,10 @@
 // Contains data about a pending task. Stored in TaskQueue and DelayedTaskQueue
 // for use by classes that queue and execute tasks.
 struct BASE_EXPORT PendingTask : public TrackingInfo {
-  PendingTask(const tracked_objects::Location& posted_from, OnceClosure task);
   PendingTask(const tracked_objects::Location& posted_from,
-              OnceClosure task,
+              Closure task);
+  PendingTask(const tracked_objects::Location& posted_from,
+              Closure task,
               TimeTicks delayed_run_time,
               bool nestable);
   PendingTask(PendingTask&& other);
@@ -33,14 +33,11 @@
   bool operator<(const PendingTask& other) const;
 
   // The task to run.
-  OnceClosure task;
+  Closure task;
 
   // The site this PendingTask was posted from.
   tracked_objects::Location posted_from;
 
-  // Task backtrace.
-  std::array<const void*, 4> task_backtrace;
-
   // Secondary sort key for run time.
   int sequence_num;
 
diff --git a/base/pickle.cc b/base/pickle.cc
index 0079b39..4ef167b 100644
--- a/base/pickle.cc
+++ b/base/pickle.cc
@@ -233,6 +233,7 @@
 
 void PickleSizer::AddAttachment() {
   // From IPC::Message::WriteAttachment
+  AddBool();
   AddInt();
 }
 
diff --git a/base/posix/global_descriptors.cc b/base/posix/global_descriptors.cc
index 8da808e..6c18783 100644
--- a/base/posix/global_descriptors.cc
+++ b/base/posix/global_descriptors.cc
@@ -47,22 +47,6 @@
   return -1;
 }
 
-base::ScopedFD GlobalDescriptors::TakeFD(
-    Key key,
-    base::MemoryMappedFile::Region* region) {
-  base::ScopedFD fd;
-  for (Mapping::iterator i = descriptors_.begin(); i != descriptors_.end();
-       ++i) {
-    if (i->key == key) {
-      *region = i->region;
-      fd.reset(i->fd);
-      descriptors_.erase(i);
-      break;
-    }
-  }
-  return fd;
-}
-
 void GlobalDescriptors::Set(Key key, int fd) {
   Set(key, fd, base::MemoryMappedFile::Region::kWholeFile);
 }
diff --git a/base/posix/global_descriptors.h b/base/posix/global_descriptors.h
index 9d68761..edb299d 100644
--- a/base/posix/global_descriptors.h
+++ b/base/posix/global_descriptors.h
@@ -13,7 +13,6 @@
 #include <stdint.h>
 
 #include "base/files/memory_mapped_file.h"
-#include "base/files/scoped_file.h"
 #include "base/memory/singleton.h"
 
 namespace base {
@@ -35,10 +34,6 @@
 // It maps from an abstract key to a descriptor. If independent modules each
 // need to define keys, then values should be chosen randomly so as not to
 // collide.
-//
-// Note that this class is deprecated and passing file descriptor should ideally
-// be done through the command line and using FileDescriptorStore.
-// See https://crbugs.com/detail?id=692619
 class BASE_EXPORT GlobalDescriptors {
  public:
   typedef uint32_t Key;
@@ -57,7 +52,18 @@
 
   // Often we want a canonical descriptor for a given Key. In this case, we add
   // the following constant to the key value:
+#if !defined(OS_ANDROID)
   static const int kBaseDescriptor = 3;  // 0, 1, 2 are already taken.
+#else
+  // 3 used by __android_log_write().
+  // 4 used by... something important on Android M.
+  // 5 used by... something important on Android L... on low-end devices.
+  // TODO(amistry): An Android, this mechanism is only used for tests since the
+  // content child launcher spawns a process by creating a new Activity using
+  // the Android APIs. For tests, come up with a way that doesn't require using
+  // a pre-defined fd.
+  static const int kBaseDescriptor = 6;
+#endif
 
   // Return the singleton instance of GlobalDescriptors.
   static GlobalDescriptors* GetInstance();
@@ -68,11 +74,6 @@
   // Get a descriptor given a key. Returns -1 on error.
   int MaybeGet(Key key) const;
 
-  // Returns a descriptor given a key and removes it from this class mappings.
-  // Also populates |region|.
-  // It is a fatal error if the key is not known.
-  base::ScopedFD TakeFD(Key key, base::MemoryMappedFile::Region* region);
-
   // Get a region given a key. It is a fatal error if the key is not known.
   base::MemoryMappedFile::Region GetRegion(Key key) const;
 
diff --git a/base/post_task_and_reply_with_result_internal.h b/base/post_task_and_reply_with_result_internal.h
deleted file mode 100644
index 1456129..0000000
--- a/base/post_task_and_reply_with_result_internal.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_POST_TASK_AND_REPLY_WITH_RESULT_INTERNAL_H_
-#define BASE_POST_TASK_AND_REPLY_WITH_RESULT_INTERNAL_H_
-
-#include <utility>
-
-#include "base/callback.h"
-
-namespace base {
-
-namespace internal {
-
-// Adapts a function that produces a result via a return value to
-// one that returns via an output parameter.
-template <typename ReturnType>
-void ReturnAsParamAdapter(const Callback<ReturnType(void)>& func,
-                          ReturnType* result) {
-  *result = func.Run();
-}
-
-// Adapts a T* result to a callblack that expects a T.
-template <typename TaskReturnType, typename ReplyArgType>
-void ReplyAdapter(const Callback<void(ReplyArgType)>& callback,
-                  TaskReturnType* result) {
-  callback.Run(std::move(*result));
-}
-
-}  // namespace internal
-
-}  // namespace base
-
-#endif  // BASE_POST_TASK_AND_REPLY_WITH_RESULT_INTERNAL_H_
diff --git a/base/power_monitor/power_monitor_device_source.h b/base/power_monitor/power_monitor_device_source.h
index 4ecd2d9..2dabac8 100644
--- a/base/power_monitor/power_monitor_device_source.h
+++ b/base/power_monitor/power_monitor_device_source.h
@@ -7,6 +7,8 @@
 
 #include "base/base_export.h"
 #include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/observer_list_threadsafe.h"
 #include "base/power_monitor/power_monitor_source.h"
 #include "base/power_monitor/power_observer.h"
 #include "build/build_config.h"
@@ -14,8 +16,17 @@
 #if defined(OS_WIN)
 #include <windows.h>
 
+// Windows HiRes timers drain the battery faster so we need to know the battery
+// status.  This isn't true for other platforms.
+#define ENABLE_BATTERY_MONITORING 1
+#else
+#undef ENABLE_BATTERY_MONITORING
 #endif  // !OS_WIN
 
+#if defined(ENABLE_BATTERY_MONITORING)
+#include "base/timer/timer.h"
+#endif  // defined(ENABLE_BATTERY_MONITORING)
+
 #if defined(OS_IOS)
 #include <objc/runtime.h>
 #endif  // OS_IOS
@@ -82,11 +93,19 @@
   // false otherwise.
   bool IsOnBatteryPowerImpl() override;
 
+  // Checks the battery status and notifies observers if the battery
+  // status has changed.
+  void BatteryCheck();
+
 #if defined(OS_IOS)
   // Holds pointers to system event notification observers.
   std::vector<id> notification_observers_;
 #endif
 
+#if defined(ENABLE_BATTERY_MONITORING)
+  base::OneShotTimer delayed_battery_check_;
+#endif
+
 #if defined(OS_WIN)
   PowerMessageWindow power_message_window_;
 #endif
diff --git a/base/power_monitor/power_monitor_source.h b/base/power_monitor/power_monitor_source.h
index b69cbf8..e63f4f8 100644
--- a/base/power_monitor/power_monitor_source.h
+++ b/base/power_monitor/power_monitor_source.h
@@ -49,14 +49,9 @@
   // false otherwise.
   virtual bool IsOnBatteryPowerImpl() = 0;
 
-  // Sets the initial state for |on_battery_power_|, which defaults to false
-  // since not all implementations can provide the value at construction. May
-  // only be called before a base::PowerMonitor has been created.
-  void SetInitialOnBatteryPowerState(bool on_battery_power);
-
  private:
-  bool on_battery_power_ = false;
-  bool suspended_ = false;
+  bool on_battery_power_;
+  bool suspended_;
 
   // This lock guards access to on_battery_power_, to ensure that
   // IsOnBatteryPower can be called from any thread.
diff --git a/base/process/internal_linux.cc b/base/process/internal_linux.cc
index c782004..d286f4e 100644
--- a/base/process/internal_linux.cc
+++ b/base/process/internal_linux.cc
@@ -133,10 +133,9 @@
   return StringToSizeT(proc_stats[field_num], &value) ? value : 0;
 }
 
-int64_t ReadStatFileAndGetFieldAsInt64(const FilePath& stat_file,
-                                       ProcStatsFields field_num) {
+int64_t ReadProcStatsAndGetFieldAsInt64(pid_t pid, ProcStatsFields field_num) {
   std::string stats_data;
-  if (!ReadProcFile(stat_file, &stats_data))
+  if (!ReadProcStats(pid, &stats_data))
     return 0;
   std::vector<std::string> proc_stats;
   if (!ParseProcStats(stats_data, &proc_stats))
@@ -144,16 +143,6 @@
   return GetProcStatsFieldAsInt64(proc_stats, field_num);
 }
 
-int64_t ReadProcStatsAndGetFieldAsInt64(pid_t pid, ProcStatsFields field_num) {
-  FilePath stat_file = internal::GetProcPidDir(pid).Append(kStatFile);
-  return ReadStatFileAndGetFieldAsInt64(stat_file, field_num);
-}
-
-int64_t ReadProcSelfStatsAndGetFieldAsInt64(ProcStatsFields field_num) {
-  FilePath stat_file = FilePath(kProcDir).Append("self").Append(kStatFile);
-  return ReadStatFileAndGetFieldAsInt64(stat_file, field_num);
-}
-
 size_t ReadProcStatsAndGetFieldAsSizeT(pid_t pid,
                                        ProcStatsFields field_num) {
   std::string stats_data;
@@ -181,32 +170,6 @@
   return Time::FromTimeT(btime);
 }
 
-TimeDelta GetUserCpuTimeSinceBoot() {
-  FilePath path("/proc/stat");
-  std::string contents;
-  if (!ReadProcFile(path, &contents))
-    return TimeDelta();
-
-  ProcStatMap proc_stat;
-  ParseProcStat(contents, &proc_stat);
-  ProcStatMap::const_iterator cpu_it = proc_stat.find("cpu");
-  if (cpu_it == proc_stat.end())
-    return TimeDelta();
-
-  std::vector<std::string> cpu = SplitString(
-      cpu_it->second, kWhitespaceASCII, TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
-
-  if (cpu.size() < 2 || cpu[0] != "cpu")
-    return TimeDelta();
-
-  uint64_t user;
-  uint64_t nice;
-  if (!StringToUint64(cpu[0], &user) || !StringToUint64(cpu[1], &nice))
-    return TimeDelta();
-
-  return ClockTicksToTimeDelta(user + nice);
-}
-
 TimeDelta ClockTicksToTimeDelta(int clock_ticks) {
   // This queries the /proc-specific scaling factor which is
   // conceptually the system hertz.  To dump this value on another
diff --git a/base/process/internal_linux.h b/base/process/internal_linux.h
index 99d0fd5..ba793f7 100644
--- a/base/process/internal_linux.h
+++ b/base/process/internal_linux.h
@@ -72,12 +72,9 @@
 size_t GetProcStatsFieldAsSizeT(const std::vector<std::string>& proc_stats,
                                 ProcStatsFields field_num);
 
-// Convenience wrappers around GetProcStatsFieldAsInt64(), ParseProcStats() and
+// Convenience wrapper around GetProcStatsFieldAsInt64(), ParseProcStats() and
 // ReadProcStats(). See GetProcStatsFieldAsInt64() for details.
-int64_t ReadStatsFilendGetFieldAsInt64(const FilePath& stat_file,
-                                       ProcStatsFields field_num);
 int64_t ReadProcStatsAndGetFieldAsInt64(pid_t pid, ProcStatsFields field_num);
-int64_t ReadProcSelfStatsAndGetFieldAsInt64(ProcStatsFields field_num);
 
 // Same as ReadProcStatsAndGetFieldAsInt64() but for size_t values.
 size_t ReadProcStatsAndGetFieldAsSizeT(pid_t pid,
@@ -86,9 +83,6 @@
 // Returns the time that the OS started. Clock ticks are relative to this.
 Time GetBootTime();
 
-// Returns the amount of time spent in user space since boot across all CPUs.
-TimeDelta GetUserCpuTimeSinceBoot();
-
 // Converts Linux clock ticks to a wall time delta.
 TimeDelta ClockTicksToTimeDelta(int clock_ticks);
 
diff --git a/base/process/kill.h b/base/process/kill.h
index 6d410e0..c664f33 100644
--- a/base/process/kill.h
+++ b/base/process/kill.h
@@ -18,16 +18,6 @@
 
 class ProcessFilter;
 
-#if defined(OS_WIN)
-namespace win {
-
-// See definition in sandbox/win/src/sandbox_types.h
-const DWORD kSandboxFatalMemoryExceeded = 7012;
-
-}  // namespace win
-
-#endif  // OS_WIN
-
 // Return status values from GetTerminationStatus.  Don't use these as
 // exit code arguments to KillProcess*(), use platform/application
 // specific values instead.
@@ -49,7 +39,6 @@
   TERMINATION_STATUS_OOM_PROTECTED,        // child was protected from oom kill
 #endif
   TERMINATION_STATUS_LAUNCH_FAILED,        // child process never launched
-  TERMINATION_STATUS_OOM,                  // Process died due to oom
   TERMINATION_STATUS_MAX_ENUM
 };
 
diff --git a/base/process/kill_posix.cc b/base/process/kill_posix.cc
index 4dc60ef..85470e0 100644
--- a/base/process/kill_posix.cc
+++ b/base/process/kill_posix.cc
@@ -52,8 +52,6 @@
       case SIGFPE:
       case SIGILL:
       case SIGSEGV:
-      case SIGTRAP:
-      case SIGSYS:
         return TERMINATION_STATUS_PROCESS_CRASHED;
       case SIGKILL:
 #if defined(OS_CHROMEOS)
diff --git a/base/process/launch.cc b/base/process/launch.cc
index c03e1a7..3ca5155 100644
--- a/base/process/launch.cc
+++ b/base/process/launch.cc
@@ -7,11 +7,43 @@
 
 namespace base {
 
-LaunchOptions::LaunchOptions() = default;
+LaunchOptions::LaunchOptions()
+    : wait(false),
+#if defined(OS_WIN)
+      start_hidden(false),
+      handles_to_inherit(NULL),
+      inherit_handles(false),
+      as_user(NULL),
+      empty_desktop_name(false),
+      job_handle(NULL),
+      stdin_handle(NULL),
+      stdout_handle(NULL),
+      stderr_handle(NULL),
+      force_breakaway_from_job_(false)
+#else
+      clear_environ(false),
+      fds_to_remap(NULL),
+      maximize_rlimits(NULL),
+      new_process_group(false)
+#if defined(OS_LINUX)
+      , clone_flags(0)
+      , allow_new_privs(false)
+      , kill_on_parent_death(false)
+#endif  // OS_LINUX
+#if defined(OS_POSIX)
+      , pre_exec_delegate(NULL)
+#endif  // OS_POSIX
+#if defined(OS_CHROMEOS)
+      , ctrl_terminal_fd(-1)
+#endif  // OS_CHROMEOS
+#endif  // !defined(OS_WIN)
+    {
+}
 
 LaunchOptions::LaunchOptions(const LaunchOptions& other) = default;
 
-LaunchOptions::~LaunchOptions() = default;
+LaunchOptions::~LaunchOptions() {
+}
 
 LaunchOptions LaunchOptionsForTest() {
   LaunchOptions options;
diff --git a/base/process/launch.h b/base/process/launch.h
index be8f6e7..b8c0259 100644
--- a/base/process/launch.h
+++ b/base/process/launch.h
@@ -63,17 +63,17 @@
   ~LaunchOptions();
 
   // If true, wait for the process to complete.
-  bool wait = false;
+  bool wait;
 
   // If not empty, change to this directory before executing the new process.
   base::FilePath current_directory;
 
 #if defined(OS_WIN)
-  bool start_hidden = false;
+  bool start_hidden;
 
   // If non-null, inherit exactly the list of handles in this vector (these
   // handles must be inheritable).
-  HandlesToInheritVector* handles_to_inherit = nullptr;
+  HandlesToInheritVector* handles_to_inherit;
 
   // If true, the new process inherits handles from the parent. In production
   // code this flag should be used only when running short-lived, trusted
@@ -81,7 +81,7 @@
   // leak to the child process, causing errors such as open socket hangs.
   // Note: If |handles_to_inherit| is non-null, this flag is ignored and only
   // those handles will be inherited.
-  bool inherit_handles = false;
+  bool inherit_handles;
 
   // If non-null, runs as if the user represented by the token had launched it.
   // Whether the application is visible on the interactive desktop depends on
@@ -90,29 +90,29 @@
   // To avoid hard to diagnose problems, when specified this loads the
   // environment variables associated with the user and if this operation fails
   // the entire call fails as well.
-  UserTokenHandle as_user = nullptr;
+  UserTokenHandle as_user;
 
   // If true, use an empty string for the desktop name.
-  bool empty_desktop_name = false;
+  bool empty_desktop_name;
 
   // If non-null, launches the application in that job object. The process will
   // be terminated immediately and LaunchProcess() will fail if assignment to
   // the job object fails.
-  HANDLE job_handle = nullptr;
+  HANDLE job_handle;
 
   // Handles for the redirection of stdin, stdout and stderr. The handles must
   // be inheritable. Caller should either set all three of them or none (i.e.
   // there is no way to redirect stderr without redirecting stdin). The
   // |inherit_handles| flag must be set to true when redirecting stdio stream.
-  HANDLE stdin_handle = nullptr;
-  HANDLE stdout_handle = nullptr;
-  HANDLE stderr_handle = nullptr;
+  HANDLE stdin_handle;
+  HANDLE stdout_handle;
+  HANDLE stderr_handle;
 
   // If set to true, ensures that the child process is launched with the
   // CREATE_BREAKAWAY_FROM_JOB flag which allows it to breakout of the parent
   // job if any.
-  bool force_breakaway_from_job_ = false;
-#else  // !defined(OS_WIN)
+  bool force_breakaway_from_job_;
+#else
   // Set/unset environment variables. These are applied on top of the parent
   // process environment.  Empty (the default) means to inherit the same
   // environment. See AlterEnvironment().
@@ -120,58 +120,53 @@
 
   // Clear the environment for the new process before processing changes from
   // |environ|.
-  bool clear_environ = false;
+  bool clear_environ;
 
   // If non-null, remap file descriptors according to the mapping of
   // src fd->dest fd to propagate FDs into the child process.
   // This pointer is owned by the caller and must live through the
   // call to LaunchProcess().
-  const FileHandleMappingVector* fds_to_remap = nullptr;
+  const FileHandleMappingVector* fds_to_remap;
 
   // Each element is an RLIMIT_* constant that should be raised to its
   // rlim_max.  This pointer is owned by the caller and must live through
   // the call to LaunchProcess().
-  const std::vector<int>* maximize_rlimits = nullptr;
+  const std::vector<int>* maximize_rlimits;
 
   // If true, start the process in a new process group, instead of
   // inheriting the parent's process group.  The pgid of the child process
   // will be the same as its pid.
-  bool new_process_group = false;
+  bool new_process_group;
 
 #if defined(OS_LINUX)
   // If non-zero, start the process using clone(), using flags as provided.
   // Unlike in clone, clone_flags may not contain a custom termination signal
   // that is sent to the parent when the child dies. The termination signal will
   // always be set to SIGCHLD.
-  int clone_flags = 0;
+  int clone_flags;
 
   // By default, child processes will have the PR_SET_NO_NEW_PRIVS bit set. If
   // true, then this bit will not be set in the new child process.
-  bool allow_new_privs = false;
+  bool allow_new_privs;
 
   // Sets parent process death signal to SIGKILL.
-  bool kill_on_parent_death = false;
+  bool kill_on_parent_death;
 #endif  // defined(OS_LINUX)
 
 #if defined(OS_POSIX)
-  // If not empty, launch the specified executable instead of
-  // cmdline.GetProgram(). This is useful when it is necessary to pass a custom
-  // argv[0].
-  base::FilePath real_path;
-
   // If non-null, a delegate to be run immediately prior to executing the new
   // program in the child process.
   //
   // WARNING: If LaunchProcess is called in the presence of multiple threads,
   // code running in this delegate essentially needs to be async-signal safe
   // (see man 7 signal for a list of allowed functions).
-  PreExecDelegate* pre_exec_delegate = nullptr;
+  PreExecDelegate* pre_exec_delegate;
 #endif  // defined(OS_POSIX)
 
 #if defined(OS_CHROMEOS)
   // If non-negative, the specified file descriptor will be set as the launched
   // process' controlling terminal.
-  int ctrl_terminal_fd = -1;
+  int ctrl_terminal_fd;
 #endif  // defined(OS_CHROMEOS)
 #endif  // !defined(OS_WIN)
 };
@@ -275,12 +270,6 @@
 BASE_EXPORT void RaiseProcessToHighPriority();
 
 #if defined(OS_MACOSX)
-// An implementation of LaunchProcess() that uses posix_spawn() instead of
-// fork()+exec(). This does not support the |pre_exec_delegate| and
-// |current_directory| options.
-Process LaunchProcessPosixSpawn(const std::vector<std::string>& argv,
-                                const LaunchOptions& options);
-
 // Restore the default exception handler, setting it to Apple Crash Reporter
 // (ReportCrash).  When forking and execing a new process, the child will
 // inherit the parent's exception ports, which may be set to the Breakpad
diff --git a/base/process/launch_mac.cc b/base/process/launch_mac.cc
index 3732bc1..5895eae 100644
--- a/base/process/launch_mac.cc
+++ b/base/process/launch_mac.cc
@@ -4,75 +4,13 @@
 
 #include "base/process/launch.h"
 
-#include <crt_externs.h>
 #include <mach/mach.h>
-#include <spawn.h>
-#include <string.h>
-#include <sys/wait.h>
+#include <servers/bootstrap.h>
 
 #include "base/logging.h"
-#include "base/posix/eintr_wrapper.h"
-#include "base/threading/thread_restrictions.h"
 
 namespace base {
 
-namespace {
-
-// DPSXCHECK is a Debug Posix Spawn Check macro. The posix_spawn* family of
-// functions return an errno value, as opposed to setting errno directly. This
-// macro emulates a DPCHECK().
-#define DPSXCHECK(expr)                                              \
-  do {                                                               \
-    int rv = (expr);                                                 \
-    DCHECK_EQ(rv, 0) << #expr << ": -" << rv << " " << strerror(rv); \
-  } while (0)
-
-class PosixSpawnAttr {
- public:
-  PosixSpawnAttr() { DPSXCHECK(posix_spawnattr_init(&attr_)); }
-
-  ~PosixSpawnAttr() { DPSXCHECK(posix_spawnattr_destroy(&attr_)); }
-
-  posix_spawnattr_t* get() { return &attr_; }
-
- private:
-  posix_spawnattr_t attr_;
-};
-
-class PosixSpawnFileActions {
- public:
-  PosixSpawnFileActions() {
-    DPSXCHECK(posix_spawn_file_actions_init(&file_actions_));
-  }
-
-  ~PosixSpawnFileActions() {
-    DPSXCHECK(posix_spawn_file_actions_destroy(&file_actions_));
-  }
-
-  void Open(int filedes, const char* path, int mode) {
-    DPSXCHECK(posix_spawn_file_actions_addopen(&file_actions_, filedes, path,
-                                               mode, 0));
-  }
-
-  void Dup2(int filedes, int newfiledes) {
-    DPSXCHECK(
-        posix_spawn_file_actions_adddup2(&file_actions_, filedes, newfiledes));
-  }
-
-  void Inherit(int filedes) {
-    DPSXCHECK(posix_spawn_file_actions_addinherit_np(&file_actions_, filedes));
-  }
-
-  const posix_spawn_file_actions_t* get() const { return &file_actions_; }
-
- private:
-  posix_spawn_file_actions_t file_actions_;
-
-  DISALLOW_COPY_AND_ASSIGN(PosixSpawnFileActions);
-};
-
-}  // namespace
-
 void RestoreDefaultExceptionHandler() {
   // This function is tailored to remove the Breakpad exception handler.
   // exception_mask matches s_exception_mask in
@@ -90,93 +28,4 @@
                            EXCEPTION_DEFAULT, THREAD_STATE_NONE);
 }
 
-Process LaunchProcessPosixSpawn(const std::vector<std::string>& argv,
-                                const LaunchOptions& options) {
-  DCHECK(!options.pre_exec_delegate)
-      << "LaunchProcessPosixSpawn does not support PreExecDelegate";
-  DCHECK(options.current_directory.empty())
-      << "LaunchProcessPosixSpawn does not support current_directory";
-
-  PosixSpawnAttr attr;
-
-  short flags = POSIX_SPAWN_CLOEXEC_DEFAULT;
-  if (options.new_process_group) {
-    flags |= POSIX_SPAWN_SETPGROUP;
-    DPSXCHECK(posix_spawnattr_setpgroup(attr.get(), 0));
-  }
-  DPSXCHECK(posix_spawnattr_setflags(attr.get(), flags));
-
-  PosixSpawnFileActions file_actions;
-
-  // Process file descriptors for the child. By default, LaunchProcess will
-  // open stdin to /dev/null and inherit stdout and stderr.
-  bool inherit_stdout = true, inherit_stderr = true;
-  bool null_stdin = true;
-  if (options.fds_to_remap) {
-    for (const auto& dup2_pair : *options.fds_to_remap) {
-      if (dup2_pair.second == STDIN_FILENO) {
-        null_stdin = false;
-      } else if (dup2_pair.second == STDOUT_FILENO) {
-        inherit_stdout = false;
-      } else if (dup2_pair.second == STDERR_FILENO) {
-        inherit_stderr = false;
-      }
-
-      if (dup2_pair.first == dup2_pair.second) {
-        file_actions.Inherit(dup2_pair.second);
-      } else {
-        file_actions.Dup2(dup2_pair.first, dup2_pair.second);
-      }
-    }
-  }
-
-  if (null_stdin) {
-    file_actions.Open(STDIN_FILENO, "/dev/null", O_RDONLY);
-  }
-  if (inherit_stdout) {
-    file_actions.Inherit(STDOUT_FILENO);
-  }
-  if (inherit_stderr) {
-    file_actions.Inherit(STDERR_FILENO);
-  }
-
-  std::unique_ptr<char* []> argv_cstr(new char*[argv.size() + 1]);
-  for (size_t i = 0; i < argv.size(); i++) {
-    argv_cstr[i] = const_cast<char*>(argv[i].c_str());
-  }
-  argv_cstr[argv.size()] = nullptr;
-
-  std::unique_ptr<char* []> owned_environ;
-  char** new_environ = options.clear_environ ? nullptr : *_NSGetEnviron();
-  if (!options.environ.empty()) {
-    owned_environ = AlterEnvironment(new_environ, options.environ);
-    new_environ = owned_environ.get();
-  }
-
-  const char* executable_path = !options.real_path.empty()
-                                    ? options.real_path.value().c_str()
-                                    : argv_cstr[0];
-
-  // Use posix_spawnp as some callers expect to have PATH consulted.
-  pid_t pid;
-  int rv = posix_spawnp(&pid, executable_path, file_actions.get(), attr.get(),
-                        &argv_cstr[0], new_environ);
-
-  if (rv != 0) {
-    DLOG(ERROR) << "posix_spawnp(" << executable_path << "): -" << rv << " "
-                << strerror(rv);
-    return Process();
-  }
-
-  if (options.wait) {
-    // While this isn't strictly disk IO, waiting for another process to
-    // finish is the sort of thing ThreadRestrictions is trying to prevent.
-    base::ThreadRestrictions::AssertIOAllowed();
-    pid_t ret = HANDLE_EINTR(waitpid(pid, nullptr, 0));
-    DPCHECK(ret > 0);
-  }
-
-  return Process(pid);
-}
-
 }  // namespace base
diff --git a/base/process/launch_posix.cc b/base/process/launch_posix.cc
index 19effa2..4fb1018 100644
--- a/base/process/launch_posix.cc
+++ b/base/process/launch_posix.cc
@@ -60,8 +60,6 @@
 #if defined(OS_MACOSX)
 #include <crt_externs.h>
 #include <sys/event.h>
-
-#include "base/feature_list.h"
 #else
 extern char** environ;
 #endif
@@ -72,11 +70,6 @@
 
 namespace {
 
-#if defined(OS_MACOSX)
-const Feature kMacLaunchProcessPosixSpawn{"MacLaunchProcessPosixSpawn",
-                                          FEATURE_ENABLED_BY_DEFAULT};
-#endif
-
 // Get the process's "environment" (i.e. the thing that setenv/getenv
 // work with).
 char** GetEnvironment() {
@@ -303,15 +296,6 @@
 
 Process LaunchProcess(const std::vector<std::string>& argv,
                       const LaunchOptions& options) {
-#if defined(OS_MACOSX)
-  if (FeatureList::IsEnabled(kMacLaunchProcessPosixSpawn)) {
-    // TODO(rsesek): Do this unconditionally. There is one user for each of
-    // these two options. https://crbug.com/179923.
-    if (!options.pre_exec_delegate && options.current_directory.empty())
-      return LaunchProcessPosixSpawn(argv, options);
-  }
-#endif
-
   size_t fd_shuffle_size = 0;
   if (options.fds_to_remap) {
     fd_shuffle_size = options.fds_to_remap->size();
@@ -508,10 +492,7 @@
       options.pre_exec_delegate->RunAsyncSafe();
     }
 
-    const char* executable_path = !options.real_path.empty() ?
-        options.real_path.value().c_str() : argv_cstr[0];
-
-    execvp(executable_path, argv_cstr.get());
+    execvp(argv_cstr[0], argv_cstr.get());
 
     RAW_LOG(ERROR, "LaunchProcess: failed to execvp:");
     RAW_LOG(ERROR, argv_cstr[0]);
diff --git a/base/process/memory.cc b/base/process/memory.cc
deleted file mode 100644
index 6349c08..0000000
--- a/base/process/memory.cc
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/debug/alias.h"
-#include "base/logging.h"
-#include "base/process/memory.h"
-#include "build/build_config.h"
-
-namespace base {
-
-// Defined in memory_win.cc for Windows.
-#if !defined(OS_WIN)
-
-namespace {
-
-// Breakpad server classifies base::`anonymous namespace'::OnNoMemory as
-// out-of-memory crash.
-NOINLINE void OnNoMemory(size_t size) {
-  size_t tmp_size = size;
-  base::debug::Alias(&tmp_size);
-  LOG(FATAL) << "Out of memory. size=" << tmp_size;
-}
-
-}  // namespace
-
-void TerminateBecauseOutOfMemory(size_t size) {
-  OnNoMemory(size);
-}
-
-#endif
-
-// Defined in memory_mac.mm for Mac.
-#if !defined(OS_MACOSX)
-
-bool UncheckedCalloc(size_t num_items, size_t size, void** result) {
-  const size_t alloc_size = num_items * size;
-
-  // Overflow check
-  if (size && ((alloc_size / size) != num_items)) {
-    *result = NULL;
-    return false;
-  }
-
-  if (!UncheckedMalloc(alloc_size, result))
-    return false;
-
-  memset(*result, 0, alloc_size);
-  return true;
-}
-
-#endif
-
-}  // namespace base
diff --git a/base/process/memory.h b/base/process/memory.h
deleted file mode 100644
index 77911cf..0000000
--- a/base/process/memory.h
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_PROCESS_MEMORY_H_
-#define BASE_PROCESS_MEMORY_H_
-
-#include <stddef.h>
-
-#include "base/base_export.h"
-#include "base/process/process_handle.h"
-#include "build/build_config.h"
-
-#ifdef PVALLOC_AVAILABLE
-// Build config explicitly tells us whether or not pvalloc is available.
-#elif defined(LIBC_GLIBC) && !defined(USE_TCMALLOC)
-#define PVALLOC_AVAILABLE 1
-#else
-#define PVALLOC_AVAILABLE 0
-#endif
-
-namespace base {
-
-// Enables 'terminate on heap corruption' flag. Helps protect against heap
-// overflow. Has no effect if the OS doesn't provide the necessary facility.
-BASE_EXPORT void EnableTerminationOnHeapCorruption();
-
-// Turns on process termination if memory runs out.
-BASE_EXPORT void EnableTerminationOnOutOfMemory();
-
-// Terminates process. Should be called only for out of memory errors.
-// Crash reporting classifies such crashes as OOM.
-BASE_EXPORT void TerminateBecauseOutOfMemory(size_t size);
-
-#if defined(OS_LINUX) || defined(OS_ANDROID)
-BASE_EXPORT extern size_t g_oom_size;
-
-// The maximum allowed value for the OOM score.
-const int kMaxOomScore = 1000;
-
-// This adjusts /proc/<pid>/oom_score_adj so the Linux OOM killer will
-// prefer to kill certain process types over others. The range for the
-// adjustment is [-1000, 1000], with [0, 1000] being user accessible.
-// If the Linux system doesn't support the newer oom_score_adj range
-// of [0, 1000], then we revert to using the older oom_adj, and
-// translate the given value into [0, 15].  Some aliasing of values
-// may occur in that case, of course.
-BASE_EXPORT bool AdjustOOMScore(ProcessId process, int score);
-#endif
-
-#if defined(OS_WIN)
-namespace win {
-
-// Custom Windows exception code chosen to indicate an out of memory error.
-// See https://msdn.microsoft.com/en-us/library/het71c37.aspx.
-// "To make sure that you do not define a code that conflicts with an existing
-// exception code" ... "The resulting error code should therefore have the
-// highest four bits set to hexadecimal E."
-// 0xe0000008 was chosen arbitrarily, as 0x00000008 is ERROR_NOT_ENOUGH_MEMORY.
-const DWORD kOomExceptionCode = 0xe0000008;
-
-}  // namespace win
-#endif
-
-// Special allocator functions for callers that want to check for OOM.
-// These will not abort if the allocation fails even if
-// EnableTerminationOnOutOfMemory has been called.
-// This can be useful for huge and/or unpredictable size memory allocations.
-// Please only use this if you really handle the case when the allocation
-// fails. Doing otherwise would risk security.
-// These functions may still crash on OOM when running under memory tools,
-// specifically ASan and other sanitizers.
-// Return value tells whether the allocation succeeded. If it fails |result| is
-// set to NULL, otherwise it holds the memory address.
-BASE_EXPORT WARN_UNUSED_RESULT bool UncheckedMalloc(size_t size,
-                                                    void** result);
-BASE_EXPORT WARN_UNUSED_RESULT bool UncheckedCalloc(size_t num_items,
-                                                    size_t size,
-                                                    void** result);
-
-}  // namespace base
-
-#endif  // BASE_PROCESS_MEMORY_H_
diff --git a/base/process/memory_linux.cc b/base/process/memory_linux.cc
deleted file mode 100644
index 985bc54..0000000
--- a/base/process/memory_linux.cc
+++ /dev/null
@@ -1,212 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/process/memory.h"
-
-#include <stddef.h>
-
-#include <new>
-
-#include "base/allocator/allocator_shim.h"
-#include "base/allocator/features.h"
-#include "base/files/file_path.h"
-#include "base/files/file_util.h"
-#include "base/logging.h"
-#include "base/process/internal_linux.h"
-#include "base/strings/string_number_conversions.h"
-#include "build/build_config.h"
-
-#if defined(USE_TCMALLOC)
-#include "third_party/tcmalloc/chromium/src/gperftools/tcmalloc.h"
-#endif
-
-namespace base {
-
-size_t g_oom_size = 0U;
-
-namespace {
-
-void OnNoMemorySize(size_t size) {
-  g_oom_size = size;
-
-  if (size != 0)
-    LOG(FATAL) << "Out of memory, size = " << size;
-  LOG(FATAL) << "Out of memory.";
-}
-
-void OnNoMemory() {
-  OnNoMemorySize(0);
-}
-
-}  // namespace
-
-// TODO(primiano): Once the unified shim is on by default (crbug.com/550886)
-// get rid of the code in this entire #if section. The whole termination-on-OOM
-// logic is implemented in the shim.
-#if !defined(ADDRESS_SANITIZER) && !defined(MEMORY_SANITIZER) && \
-    !defined(THREAD_SANITIZER) && !defined(LEAK_SANITIZER) &&    \
-    !BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
-
-#if defined(LIBC_GLIBC) && !defined(USE_TCMALLOC)
-
-extern "C" {
-void* __libc_malloc(size_t size);
-void* __libc_realloc(void* ptr, size_t size);
-void* __libc_calloc(size_t nmemb, size_t size);
-void* __libc_valloc(size_t size);
-#if PVALLOC_AVAILABLE == 1
-void* __libc_pvalloc(size_t size);
-#endif
-void* __libc_memalign(size_t alignment, size_t size);
-
-// Overriding the system memory allocation functions:
-//
-// For security reasons, we want malloc failures to be fatal. Too much code
-// doesn't check for a NULL return value from malloc and unconditionally uses
-// the resulting pointer. If the first offset that they try to access is
-// attacker controlled, then the attacker can direct the code to access any
-// part of memory.
-//
-// Thus, we define all the standard malloc functions here and mark them as
-// visibility 'default'. This means that they replace the malloc functions for
-// all Chromium code and also for all code in shared libraries. There are tests
-// for this in process_util_unittest.cc.
-//
-// If we are using tcmalloc, then the problem is moot since tcmalloc handles
-// this for us. Thus this code is in a !defined(USE_TCMALLOC) block.
-//
-// If we are testing the binary with AddressSanitizer, we should not
-// redefine malloc and let AddressSanitizer do it instead.
-//
-// We call the real libc functions in this code by using __libc_malloc etc.
-// Previously we tried using dlsym(RTLD_NEXT, ...) but that failed depending on
-// the link order. Since ld.so needs calloc during symbol resolution, it
-// defines its own versions of several of these functions in dl-minimal.c.
-// Depending on the runtime library order, dlsym ended up giving us those
-// functions and bad things happened. See crbug.com/31809
-//
-// This means that any code which calls __libc_* gets the raw libc versions of
-// these functions.
-
-#define DIE_ON_OOM_1(function_name) \
-  void* function_name(size_t) __attribute__ ((visibility("default"))); \
-  \
-  void* function_name(size_t size) { \
-    void* ret = __libc_##function_name(size); \
-    if (ret == NULL && size != 0) \
-      OnNoMemorySize(size); \
-    return ret; \
-  }
-
-#define DIE_ON_OOM_2(function_name, arg1_type) \
-  void* function_name(arg1_type, size_t) \
-      __attribute__ ((visibility("default"))); \
-  \
-  void* function_name(arg1_type arg1, size_t size) { \
-    void* ret = __libc_##function_name(arg1, size); \
-    if (ret == NULL && size != 0) \
-      OnNoMemorySize(size); \
-    return ret; \
-  }
-
-DIE_ON_OOM_1(malloc)
-DIE_ON_OOM_1(valloc)
-#if PVALLOC_AVAILABLE == 1
-DIE_ON_OOM_1(pvalloc)
-#endif
-
-DIE_ON_OOM_2(calloc, size_t)
-DIE_ON_OOM_2(realloc, void*)
-DIE_ON_OOM_2(memalign, size_t)
-
-// posix_memalign has a unique signature and doesn't have a __libc_ variant.
-int posix_memalign(void** ptr, size_t alignment, size_t size)
-    __attribute__ ((visibility("default")));
-
-int posix_memalign(void** ptr, size_t alignment, size_t size) {
-  // This will use the safe version of memalign, above.
-  *ptr = memalign(alignment, size);
-  return 0;
-}
-
-}  // extern C
-
-#else
-
-// TODO(mostynb@opera.com): dlsym dance
-
-#endif  // LIBC_GLIBC && !USE_TCMALLOC
-
-#endif  // !*_SANITIZER
-
-void EnableTerminationOnHeapCorruption() {
-  // On Linux, there nothing to do AFAIK.
-}
-
-void EnableTerminationOnOutOfMemory() {
-  // Set the new-out of memory handler.
-  std::set_new_handler(&OnNoMemory);
-  // If we're using glibc's allocator, the above functions will override
-  // malloc and friends and make them die on out of memory.
-
-#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
-  allocator::SetCallNewHandlerOnMallocFailure(true);
-#elif defined(USE_TCMALLOC)
-  // For tcmalloc, we need to tell it to behave like new.
-  tc_set_new_mode(1);
-#endif
-}
-
-// NOTE: This is not the only version of this function in the source:
-// the setuid sandbox (in process_util_linux.c, in the sandbox source)
-// also has its own C version.
-bool AdjustOOMScore(ProcessId process, int score) {
-  if (score < 0 || score > kMaxOomScore)
-    return false;
-
-  FilePath oom_path(internal::GetProcPidDir(process));
-
-  // Attempt to write the newer oom_score_adj file first.
-  FilePath oom_file = oom_path.AppendASCII("oom_score_adj");
-  if (PathExists(oom_file)) {
-    std::string score_str = IntToString(score);
-    DVLOG(1) << "Adjusting oom_score_adj of " << process << " to "
-             << score_str;
-    int score_len = static_cast<int>(score_str.length());
-    return (score_len == WriteFile(oom_file, score_str.c_str(), score_len));
-  }
-
-  // If the oom_score_adj file doesn't exist, then we write the old
-  // style file and translate the oom_adj score to the range 0-15.
-  oom_file = oom_path.AppendASCII("oom_adj");
-  if (PathExists(oom_file)) {
-    // Max score for the old oom_adj range.  Used for conversion of new
-    // values to old values.
-    const int kMaxOldOomScore = 15;
-
-    int converted_score = score * kMaxOldOomScore / kMaxOomScore;
-    std::string score_str = IntToString(converted_score);
-    DVLOG(1) << "Adjusting oom_adj of " << process << " to " << score_str;
-    int score_len = static_cast<int>(score_str.length());
-    return (score_len == WriteFile(oom_file, score_str.c_str(), score_len));
-  }
-
-  return false;
-}
-
-bool UncheckedMalloc(size_t size, void** result) {
-#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
-  *result = allocator::UncheckedAlloc(size);
-#elif defined(MEMORY_TOOL_REPLACES_ALLOCATOR) || \
-    (!defined(LIBC_GLIBC) && !defined(USE_TCMALLOC))
-  *result = malloc(size);
-#elif defined(LIBC_GLIBC) && !defined(USE_TCMALLOC)
-  *result = __libc_malloc(size);
-#elif defined(USE_TCMALLOC)
-  *result = tc_malloc_skip_new_handler(size);
-#endif
-  return *result != NULL;
-}
-
-}  // namespace base
diff --git a/base/process/memory_stubs.cc b/base/process/memory_stubs.cc
deleted file mode 100644
index 67deb4f..0000000
--- a/base/process/memory_stubs.cc
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/process/memory.h"
-
-#include <stddef.h>
-#include <stdlib.h>
-
-#include "base/compiler_specific.h"
-
-namespace base {
-
-void EnableTerminationOnOutOfMemory() {
-}
-
-void EnableTerminationOnHeapCorruption() {
-}
-
-bool AdjustOOMScore(ProcessId process, int score) {
-  ALLOW_UNUSED_PARAM(process);
-  ALLOW_UNUSED_PARAM(score);
-  return false;
-}
-
-void TerminateBecauseOutOfMemory(size_t size) {
-  ALLOW_UNUSED_PARAM(size);
-  abort();
-}
-
-// UncheckedMalloc and Calloc exist so that platforms making use of
-// EnableTerminationOnOutOfMemory have a way to allocate memory without
-// crashing. This _stubs.cc file is for platforms that do not support
-// EnableTerminationOnOutOfMemory (note the empty implementation above). As
-// such, these two Unchecked.alloc functions need only trivially pass-through to
-// their respective stdlib function since those functions will return null on a
-// failure to allocate.
-
-bool UncheckedMalloc(size_t size, void** result) {
-  *result = malloc(size);
-  return *result != nullptr;
-}
-
-bool UncheckedCalloc(size_t num_items, size_t size, void** result) {
-  *result = calloc(num_items, size);
-  return *result != nullptr;
-}
-
-}  // namespace base
diff --git a/base/process/port_provider_mac.cc b/base/process/port_provider_mac.cc
index 23d214c..ac13949 100644
--- a/base/process/port_provider_mac.cc
+++ b/base/process/port_provider_mac.cc
@@ -21,8 +21,7 @@
 
 void PortProvider::NotifyObservers(ProcessHandle process) {
   base::AutoLock l(lock_);
-  for (auto& observer : observer_list_)
-    observer.OnReceivedTaskPort(process);
+  FOR_EACH_OBSERVER(Observer, observer_list_, OnReceivedTaskPort(process));
 }
 
 }  // namespace base
diff --git a/base/process/process.h b/base/process/process.h
index fc2add2..70c8260 100644
--- a/base/process/process.h
+++ b/base/process/process.h
@@ -15,17 +15,8 @@
 #include "base/win/scoped_handle.h"
 #endif
 
-#if defined(OS_MACOSX)
-#include "base/feature_list.h"
-#include "base/process/port_provider_mac.h"
-#endif
-
 namespace base {
 
-#if defined(OS_MACOSX)
-extern const Feature kMacAllowBackgroundingProcesses;
-#endif
-
 // Provides a move-only encapsulation of a process.
 //
 // This object is not tied to the lifetime of the underlying process: the
@@ -76,9 +67,6 @@
   // Returns true if processes can be backgrounded.
   static bool CanBackgroundProcesses();
 
-  // Terminates the current process immediately with |exit_code|.
-  static void TerminateCurrentProcessImmediately(int exit_code);
-
   // Returns true if this objects represents a valid process.
   bool IsValid() const;
 
@@ -111,35 +99,13 @@
   // any process.
   // NOTE: |exit_code| is optional, nullptr can be passed if the exit code is
   // not required.
-  bool WaitForExit(int* exit_code) const;
+  bool WaitForExit(int* exit_code);
 
   // Same as WaitForExit() but only waits for up to |timeout|.
   // NOTE: |exit_code| is optional, nullptr can be passed if the exit code
   // is not required.
-  bool WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) const;
+  bool WaitForExitWithTimeout(TimeDelta timeout, int* exit_code);
 
-#if defined(OS_MACOSX)
-  // The Mac needs a Mach port in order to manipulate a process's priority,
-  // and there's no good way to get that from base given the pid. These Mac
-  // variants of the IsProcessBackgrounded and SetProcessBackgrounded API take
-  // a port provider for this reason. See crbug.com/460102
-  //
-  // A process is backgrounded when its task priority is
-  // |TASK_BACKGROUND_APPLICATION|.
-  //
-  // Returns true if the port_provider can locate a task port for the process
-  // and it is backgrounded. If port_provider is null, returns false.
-  bool IsProcessBackgrounded(PortProvider* port_provider) const;
-
-  // Set the process as backgrounded. If value is
-  // true, the priority of the associated task will be set to
-  // TASK_BACKGROUND_APPLICATION. If value is false, the
-  // priority of the process will be set to TASK_FOREGROUND_APPLICATION.
-  //
-  // Returns true if the priority was changed, false otherwise. If
-  // |port_provider| is null, this is a no-op and it returns false.
-  bool SetProcessBackgrounded(PortProvider* port_provider, bool value);
-#else
   // A process is backgrounded when it's priority is lower than normal.
   // Return true if this process is backgrounded, false otherwise.
   bool IsProcessBackgrounded() const;
@@ -149,7 +115,7 @@
   // will be made "normal" - equivalent to default process priority.
   // Returns true if the priority was changed, false otherwise.
   bool SetProcessBackgrounded(bool value);
-#endif  // defined(OS_MACOSX)
+
   // Returns an integer representing the priority of a process. The meaning
   // of this value is OS dependent.
   int GetPriority() const;
diff --git a/base/process/process_metrics.cc b/base/process/process_metrics.cc
index a38930a..0b38726 100644
--- a/base/process/process_metrics.cc
+++ b/base/process/process_metrics.cc
@@ -46,7 +46,7 @@
   return std::move(res);
 }
 
-std::unique_ptr<ProcessMetrics> ProcessMetrics::CreateCurrentProcessMetrics() {
+ProcessMetrics* ProcessMetrics::CreateCurrentProcessMetrics() {
 #if !defined(OS_MACOSX) || defined(OS_IOS)
   return CreateProcessMetrics(base::GetCurrentProcessHandle());
 #else
@@ -84,9 +84,8 @@
   last_idle_wakeups_time_ = time;
   last_absolute_idle_wakeups_ = absolute_idle_wakeups;
 
+  // Round to average wakeups per second.
   int64_t wakeups_delta_for_ms = wakeups_delta * Time::kMicrosecondsPerSecond;
-  // Round the result up by adding 1/2 (the second term resolves to 1/2 without
-  // dropping down into floating point).
   return (wakeups_delta_for_ms + time_delta / 2) / time_delta;
 }
 #else
diff --git a/base/process/process_metrics.h b/base/process/process_metrics.h
index 71d6042..57cb3ab 100644
--- a/base/process/process_metrics.h
+++ b/base/process/process_metrics.h
@@ -11,7 +11,6 @@
 #include <stddef.h>
 #include <stdint.h>
 
-#include <memory>
 #include <string>
 
 #include "base/base_export.h"
@@ -104,22 +103,22 @@
   ~ProcessMetrics();
 
   // Creates a ProcessMetrics for the specified process.
+  // The caller owns the returned object.
 #if !defined(OS_MACOSX) || defined(OS_IOS)
-  static std::unique_ptr<ProcessMetrics> CreateProcessMetrics(
-      ProcessHandle process);
+  static ProcessMetrics* CreateProcessMetrics(ProcessHandle process);
 #else
 
   // The port provider needs to outlive the ProcessMetrics object returned by
   // this function. If NULL is passed as provider, the returned object
   // only returns valid metrics if |process| is the current process.
-  static std::unique_ptr<ProcessMetrics> CreateProcessMetrics(
-      ProcessHandle process,
-      PortProvider* port_provider);
+  static ProcessMetrics* CreateProcessMetrics(ProcessHandle process,
+                                              PortProvider* port_provider);
 #endif  // !defined(OS_MACOSX) || defined(OS_IOS)
 
   // Creates a ProcessMetrics for the current process. This a cross-platform
   // convenience wrapper for CreateProcessMetrics().
-  static std::unique_ptr<ProcessMetrics> CreateCurrentProcessMetrics();
+  // The caller owns the returned object.
+  static ProcessMetrics* CreateCurrentProcessMetrics();
 
   // Returns the current space allocated for the pagefile, in bytes (these pages
   // may or may not be in memory).  On Linux, this returns the total virtual
@@ -136,7 +135,8 @@
   // memory currently allocated to a process that cannot be shared. Returns
   // false on platform specific error conditions.  Note: |private_bytes|
   // returns 0 on unsupported OSes: prior to XP SP2.
-  bool GetMemoryBytes(size_t* private_bytes, size_t* shared_bytes) const;
+  bool GetMemoryBytes(size_t* private_bytes,
+                      size_t* shared_bytes);
   // Fills a CommittedKBytes with both resident and paged
   // memory usage as per definition of CommittedBytes.
   void GetCommittedKBytes(CommittedKBytes* usage) const;
@@ -144,9 +144,6 @@
   // usage in bytes, as per definition of WorkingSetBytes. Note that this
   // function is somewhat expensive on Windows (a few ms per process).
   bool GetWorkingSetKBytes(WorkingSetKBytes* ws_usage) const;
-  // Computes pss (proportional set size) of a process. Note that this
-  // function is somewhat expensive on Windows (a few ms per process).
-  bool GetProportionalSetSizeBytes(uint64_t* pss_bytes) const;
 
 #if defined(OS_MACOSX)
   // Fills both CommitedKBytes and WorkingSetKBytes in a single operation. This
@@ -154,10 +151,6 @@
   // system call.
   bool GetCommittedAndWorkingSetKBytes(CommittedKBytes* usage,
                                        WorkingSetKBytes* ws_usage) const;
-  // Returns private, shared, and total resident bytes.
-  bool GetMemoryBytes(size_t* private_bytes,
-                      size_t* shared_bytes,
-                      size_t* resident_bytes) const;
 #endif
 
   // Returns the CPU usage in percent since the last time this method or
@@ -302,9 +295,9 @@
   int dirty;
 
   // vmstats data.
-  unsigned long pswpin;
-  unsigned long pswpout;
-  unsigned long pgmajfault;
+  int pswpin;
+  int pswpout;
+  int pgmajfault;
 #endif  // defined(OS_ANDROID) || defined(OS_LINUX)
 
 #if defined(OS_CHROMEOS)
@@ -381,9 +374,6 @@
 // Retrieves data from /proc/diskstats about system-wide disk I/O.
 // Fills in the provided |diskinfo| structure. Returns true on success.
 BASE_EXPORT bool GetSystemDiskInfo(SystemDiskInfo* diskinfo);
-
-// Returns the amount of time spent in user space since boot across all CPUs.
-BASE_EXPORT TimeDelta GetUserCpuTimeSinceBoot();
 #endif  // defined(OS_LINUX) || defined(OS_ANDROID)
 
 #if defined(OS_CHROMEOS)
diff --git a/base/process/process_metrics_linux.cc b/base/process/process_metrics_linux.cc
index 5d542cc..3d27656 100644
--- a/base/process/process_metrics_linux.cc
+++ b/base/process/process_metrics_linux.cc
@@ -17,7 +17,6 @@
 #include "base/files/dir_reader_posix.h"
 #include "base/files/file_util.h"
 #include "base/logging.h"
-#include "base/memory/ptr_util.h"
 #include "base/process/internal_linux.h"
 #include "base/strings/string_number_conversions.h"
 #include "base/strings/string_split.h"
@@ -73,8 +72,8 @@
     const std::string& key = pairs[i].first;
     const std::string& value_str = pairs[i].second;
     if (key == field) {
-      std::vector<StringPiece> split_value_str =
-          SplitStringPiece(value_str, " ", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+      std::vector<StringPiece> split_value_str = SplitStringPiece(
+          value_str, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
       if (split_value_str.size() != 2 || split_value_str[1] != "kB") {
         NOTREACHED();
         return 0;
@@ -164,9 +163,8 @@
 }  // namespace
 
 // static
-std::unique_ptr<ProcessMetrics> ProcessMetrics::CreateProcessMetrics(
-    ProcessHandle process) {
-  return WrapUnique(new ProcessMetrics(process));
+ProcessMetrics* ProcessMetrics::CreateProcessMetrics(ProcessHandle process) {
+  return new ProcessMetrics(process);
 }
 
 // On linux, we return vsize.
@@ -192,7 +190,7 @@
 }
 
 bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
-                                    size_t* shared_bytes) const {
+                                    size_t* shared_bytes) {
   WorkingSetKBytes ws_usage;
   if (!GetWorkingSetKBytes(&ws_usage))
     return false;
@@ -356,7 +354,8 @@
   }
 
   std::vector<std::string> totmaps_fields = SplitString(
-      totmaps_data, kWhitespaceASCII, KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY);
+      totmaps_data, base::kWhitespaceASCII, base::KEEP_WHITESPACE,
+      base::SPLIT_WANT_NONEMPTY);
 
   DCHECK_EQ("Pss:", totmaps_fields[kPssIndex-1]);
   DCHECK_EQ("Private_Clean:", totmaps_fields[kPrivate_CleanIndex - 1]);
@@ -407,8 +406,8 @@
       return false;
   }
 
-  std::vector<StringPiece> statm_vec =
-      SplitStringPiece(statm, " ", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+  std::vector<StringPiece> statm_vec = SplitStringPiece(
+      statm, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
   if (statm_vec.size() != 7)
     return false;  // Not the format we expect.
 
@@ -687,16 +686,12 @@
     if (tokens.size() != 2)
       continue;
 
-    uint64_t val;
-    if (!StringToUint64(tokens[1], &val))
-      continue;
-
     if (tokens[0] == "pswpin") {
-      meminfo->pswpin = val;
+      StringToInt(tokens[1], &meminfo->pswpin);
     } else if (tokens[0] == "pswpout") {
-      meminfo->pswpout = val;
+      StringToInt(tokens[1], &meminfo->pswpout);
     } else if (tokens[0] == "pgmajfault") {
-      meminfo->pgmajfault = val;
+      StringToInt(tokens[1], &meminfo->pgmajfault);
     }
   }
 
@@ -912,10 +907,6 @@
   return true;
 }
 
-TimeDelta GetUserCpuTimeSinceBoot() {
-  return internal::GetUserCpuTimeSinceBoot();
-}
-
 #if defined(OS_CHROMEOS)
 std::unique_ptr<Value> SwapInfo::ToValue() const {
   std::unique_ptr<DictionaryValue> res(new DictionaryValue());
diff --git a/base/process/process_metrics_mac.cc b/base/process/process_metrics_mac.cc
index 51f5fd4..8b5d564 100644
--- a/base/process/process_metrics_mac.cc
+++ b/base/process/process_metrics_mac.cc
@@ -15,7 +15,6 @@
 #include "base/logging.h"
 #include "base/mac/mach_logging.h"
 #include "base/mac/scoped_mach_port.h"
-#include "base/memory/ptr_util.h"
 #include "base/sys_info.h"
 
 #if !defined(TASK_POWER_INFO)
@@ -80,7 +79,10 @@
 
 }  // namespace
 
-SystemMemoryInfoKB::SystemMemoryInfoKB() : total(0), free(0) {}
+SystemMemoryInfoKB::SystemMemoryInfoKB() {
+  total = 0;
+  free = 0;
+}
 
 SystemMemoryInfoKB::SystemMemoryInfoKB(const SystemMemoryInfoKB& other) =
     default;
@@ -92,10 +94,10 @@
 // otherwise return 0.
 
 // static
-std::unique_ptr<ProcessMetrics> ProcessMetrics::CreateProcessMetrics(
+ProcessMetrics* ProcessMetrics::CreateProcessMetrics(
     ProcessHandle process,
     PortProvider* port_provider) {
-  return WrapUnique(new ProcessMetrics(process, port_provider));
+  return new ProcessMetrics(process, port_provider);
 }
 
 size_t ProcessMetrics::GetPagefileUsage() const {
@@ -110,12 +112,10 @@
 }
 
 size_t ProcessMetrics::GetWorkingSetSize() const {
-  size_t private_bytes = 0;
-  size_t shared_bytes = 0;
-  size_t resident_bytes = 0;
-  if (!GetMemoryBytes(&private_bytes, &shared_bytes, &resident_bytes))
+  task_basic_info_64 task_info_data;
+  if (!GetTaskInfo(TaskForPid(process_), &task_info_data))
     return 0;
-  return resident_bytes;
+  return task_info_data.resident_size;
 }
 
 size_t ProcessMetrics::GetPeakWorkingSetSize() const {
@@ -126,7 +126,7 @@
 // private_bytes is the size of private resident memory.
 // shared_bytes is the size of shared resident memory.
 bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
-                                    size_t* shared_bytes) const {
+                                    size_t* shared_bytes) {
   size_t private_pages_count = 0;
   size_t shared_pages_count = 0;
 
@@ -145,7 +145,7 @@
 
   // The same region can be referenced multiple times. To avoid double counting
   // we need to keep track of which regions we've already counted.
-  hash_set<int> seen_objects;
+  base::hash_set<int> seen_objects;
 
   // We iterate through each VM region in the task's address map. For shared
   // memory we add up all the pages that are marked as shared. Like libtop we
@@ -191,7 +191,6 @@
       info.share_mode = SM_PRIVATE;
 
     switch (info.share_mode) {
-      case SM_LARGE_PAGE:
       case SM_PRIVATE:
         private_pages_count += info.private_pages_resident;
         private_pages_count += info.shared_pages_resident;
@@ -200,9 +199,6 @@
         private_pages_count += info.private_pages_resident;
         // Fall through
       case SM_SHARED:
-      case SM_PRIVATE_ALIASED:
-      case SM_TRUESHARED:
-      case SM_SHARED_ALIASED:
         if (seen_objects.count(info.obj_id) == 0) {
           // Only count the first reference to this region.
           seen_objects.insert(info.obj_id);
@@ -252,15 +248,6 @@
   return true;
 }
 
-bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
-                                    size_t* shared_bytes,
-                                    size_t* resident_bytes) const {
-  if (!GetMemoryBytes(private_bytes, shared_bytes))
-    return false;
-  *resident_bytes = *private_bytes + *shared_bytes;
-  return true;
-}
-
 #define TIME_VALUE_TO_TIMEVAL(a, r) do {  \
   (r)->tv_sec = (a)->seconds;             \
   (r)->tv_usec = (a)->microseconds;       \
@@ -339,17 +326,6 @@
     // where TASK_POWER_INFO isn't supported yet.
     return 0;
   }
-
-  // The task_power_info struct contains two wakeup counters:
-  // task_interrupt_wakeups and task_platform_idle_wakeups.
-  // task_interrupt_wakeups is the total number of wakeups generated by the
-  // process, and is the number that Activity Monitor reports.
-  // task_platform_idle_wakeups is a subset of task_interrupt_wakeups that
-  // tallies the number of times the processor was taken out of its low-power
-  // idle state to handle a wakeup. task_platform_idle_wakeups therefore result
-  // in a greater power increase than the other interrupts which occur while the
-  // CPU is already working, and reducing them has a greater overall impact on
-  // power usage. See the powermetrics man page for more info.
   return CalculateIdleWakeupsPerSecond(
       power_info_data.task_platform_idle_wakeups);
 }
diff --git a/base/process/process_metrics_posix.cc b/base/process/process_metrics_posix.cc
index 13acf2e..fad581e 100644
--- a/base/process/process_metrics_posix.cc
+++ b/base/process/process_metrics_posix.cc
@@ -33,8 +33,6 @@
 static const rlim_t kSystemDefaultMaxFds = 8192;
 #elif defined(OS_FREEBSD)
 static const rlim_t kSystemDefaultMaxFds = 8192;
-#elif defined(OS_NETBSD)
-static const rlim_t kSystemDefaultMaxFds = 1024;
 #elif defined(OS_OPENBSD)
 static const rlim_t kSystemDefaultMaxFds = 256;
 #elif defined(OS_ANDROID)
diff --git a/base/process/process_metrics_unittest.cc b/base/process/process_metrics_unittest.cc
index b0bd7ea..94a2ffe 100644
--- a/base/process/process_metrics_unittest.cc
+++ b/base/process/process_metrics_unittest.cc
@@ -286,13 +286,13 @@
     "pgrefill_high 0\n"
     "pgrefill_movable 0\n";
   EXPECT_TRUE(ParseProcVmstat(valid_input1, &meminfo));
-  EXPECT_EQ(179LU, meminfo.pswpin);
-  EXPECT_EQ(406LU, meminfo.pswpout);
-  EXPECT_EQ(487192LU, meminfo.pgmajfault);
+  EXPECT_EQ(meminfo.pswpin, 179);
+  EXPECT_EQ(meminfo.pswpout, 406);
+  EXPECT_EQ(meminfo.pgmajfault, 487192);
   EXPECT_TRUE(ParseProcVmstat(valid_input2, &meminfo));
-  EXPECT_EQ(12LU, meminfo.pswpin);
-  EXPECT_EQ(901LU, meminfo.pswpout);
-  EXPECT_EQ(2023LU, meminfo.pgmajfault);
+  EXPECT_EQ(meminfo.pswpin, 12);
+  EXPECT_EQ(meminfo.pswpout, 901);
+  EXPECT_EQ(meminfo.pgmajfault, 2023);
 }
 #endif  // defined(OS_LINUX) || defined(OS_ANDROID)
 
@@ -485,13 +485,10 @@
 
 }  // namespace
 
-// Arc++ note: don't compile as SpawnMultiProcessTestChild brings in a lot of
-// extra dependency.
-#if !defined(OS_ANDROID) && !defined(__ANDROID__) && !defined(__ANDROID_HOST__)
 TEST(ProcessMetricsTest, GetOpenFdCount) {
   ScopedTempDir temp_dir;
   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
-  const FilePath temp_path = temp_dir.GetPath();
+  const FilePath temp_path = temp_dir.path();
   CommandLine child_command_line(GetMultiProcessTestChildBaseCommandLine());
   child_command_line.AppendSwitchPath(kTempDirFlag, temp_path);
   Process child = SpawnMultiProcessTestChild(
@@ -516,8 +513,6 @@
   EXPECT_EQ(0, open_fds);
   ASSERT_TRUE(child.Terminate(0, true));
 }
-#endif  // !defined(__ANDROID__)
-
 #endif  // defined(OS_LINUX)
 
 }  // namespace debug
diff --git a/base/process/process_posix.cc b/base/process/process_posix.cc
index a1d84e9..ba9b544 100644
--- a/base/process/process_posix.cc
+++ b/base/process/process_posix.cc
@@ -9,7 +9,6 @@
 #include <sys/resource.h>
 #include <sys/wait.h>
 
-#include "base/debug/activity_tracker.h"
 #include "base/files/scoped_file.h"
 #include "base/logging.h"
 #include "base/posix/eintr_wrapper.h"
@@ -267,17 +266,12 @@
   return Process(handle);
 }
 
-#if !defined(OS_LINUX) && !defined(OS_MACOSX)
+#if !defined(OS_LINUX)
 // static
 bool Process::CanBackgroundProcesses() {
   return false;
 }
-#endif  // !defined(OS_LINUX) && !defined(OS_MACOSX)
-
-// static
-void Process::TerminateCurrentProcessImmediately(int exit_code) {
-  _exit(exit_code);
-}
+#endif  // !defined(OS_LINUX)
 
 bool Process::IsValid() const {
   return process_ != kNullProcessHandle;
@@ -320,12 +314,6 @@
   if (result && wait) {
     int tries = 60;
 
-    if (RunningOnValgrind()) {
-      // Wait for some extra time when running under Valgrind since the child
-      // processes may take some time doing leak checking.
-      tries *= 2;
-    }
-
     unsigned sleep_ms = 4;
 
     // The process may not end immediately due to pending I/O
@@ -365,18 +353,15 @@
 }
 #endif  // !defined(OS_NACL_NONSFI)
 
-bool Process::WaitForExit(int* exit_code) const {
+bool Process::WaitForExit(int* exit_code) {
   return WaitForExitWithTimeout(TimeDelta::Max(), exit_code);
 }
 
-bool Process::WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) const {
-  // Record the event that this thread is blocking upon (for hang diagnosis).
-  base::debug::ScopedProcessWaitActivity process_activity(this);
-
+bool Process::WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) {
   return WaitForExitWithTimeoutImpl(Handle(), exit_code, timeout);
 }
 
-#if !defined(OS_LINUX) && !defined(OS_MACOSX)
+#if !defined(OS_LINUX)
 bool Process::IsProcessBackgrounded() const {
   // See SetProcessBackgrounded().
   DCHECK(IsValid());
@@ -384,13 +369,13 @@
 }
 
 bool Process::SetProcessBackgrounded(bool /*value*/) {
-  // Not implemented for POSIX systems other than Linux and Mac. With POSIX, if
-  // we were to lower the process priority we wouldn't be able to raise it back
-  // to its initial priority.
+  // Not implemented for POSIX systems other than Linux. With POSIX, if we were
+  // to lower the process priority we wouldn't be able to raise it back to its
+  // initial priority.
   NOTIMPLEMENTED();
   return false;
 }
-#endif  // !defined(OS_LINUX) && !defined(OS_MACOSX)
+#endif  // !defined(OS_LINUX)
 
 int Process::GetPriority() const {
   DCHECK(IsValid());
diff --git a/base/profiler/scoped_profile.h b/base/profiler/scoped_profile.h
index 4df6a1b..657150a 100644
--- a/base/profiler/scoped_profile.h
+++ b/base/profiler/scoped_profile.h
@@ -16,38 +16,28 @@
 #include "base/location.h"
 #include "base/macros.h"
 #include "base/profiler/tracked_time.h"
-#include "base/trace_event/heap_profiler.h"
 #include "base/tracked_objects.h"
 
-// Two level indirection is required for correct macro substitution.
-#define PASTE_COUNTER_ON_NAME2(name, counter) name##counter
-#define PASTE_COUNTER_ON_NAME(name, counter) \
-  PASTE_COUNTER_ON_NAME2(name, counter)
+#define PASTE_LINE_NUMBER_ON_NAME(name, line) name##line
 
-#define COUNTER_BASED_VARIABLE_NAME_FOR_PROFILING \
-  PASTE_COUNTER_ON_NAME(some_profiler_variable_, __COUNTER__)
+#define LINE_BASED_VARIABLE_NAME_FOR_PROFILING                                 \
+    PASTE_LINE_NUMBER_ON_NAME(some_profiler_variable_, __LINE__)
 
 // Defines the containing scope as a profiled region. This allows developers to
 // profile their code and see results on their about:profiler page, as well as
-// on the UMA dashboard and heap profiler.
-#define TRACK_RUN_IN_THIS_SCOPED_REGION(dispatch_function_name)               \
-  const ::tracked_objects::Location& location =                               \
-      FROM_HERE_WITH_EXPLICIT_FUNCTION(#dispatch_function_name);              \
-  TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION                               \
-  COUNTER_BASED_VARIABLE_NAME_FOR_PROFILING(location.file_name());            \
-  ::tracked_objects::ScopedProfile COUNTER_BASED_VARIABLE_NAME_FOR_PROFILING( \
-      location, ::tracked_objects::ScopedProfile::ENABLED)
+// on the UMA dashboard.
+#define TRACK_RUN_IN_THIS_SCOPED_REGION(dispatch_function_name)            \
+  ::tracked_objects::ScopedProfile LINE_BASED_VARIABLE_NAME_FOR_PROFILING( \
+      FROM_HERE_WITH_EXPLICIT_FUNCTION(#dispatch_function_name),           \
+      ::tracked_objects::ScopedProfile::ENABLED)
 
 // Same as TRACK_RUN_IN_THIS_SCOPED_REGION except that there's an extra param
 // which is concatenated with the function name for better filtering.
-#define TRACK_SCOPED_REGION(category_name, dispatch_function_name)            \
-  const ::tracked_objects::Location& location =                               \
-      FROM_HERE_WITH_EXPLICIT_FUNCTION("[" category_name                      \
-                                       "]" dispatch_function_name);           \
-  TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION                               \
-  COUNTER_BASED_VARIABLE_NAME_FOR_PROFILING(location.file_name());            \
-  ::tracked_objects::ScopedProfile COUNTER_BASED_VARIABLE_NAME_FOR_PROFILING( \
-      location, ::tracked_objects::ScopedProfile::ENABLED)
+#define TRACK_SCOPED_REGION(category_name, dispatch_function_name)         \
+  ::tracked_objects::ScopedProfile LINE_BASED_VARIABLE_NAME_FOR_PROFILING( \
+      FROM_HERE_WITH_EXPLICIT_FUNCTION(                                    \
+          "[" category_name "]" dispatch_function_name),                   \
+      ::tracked_objects::ScopedProfile::ENABLED)
 
 namespace tracked_objects {
 class Births;
diff --git a/base/rand_util_posix.cc b/base/rand_util_posix.cc
index 469f7af..6a6e05a 100644
--- a/base/rand_util_posix.cc
+++ b/base/rand_util_posix.cc
@@ -13,7 +13,6 @@
 #include "base/files/file_util.h"
 #include "base/lazy_instance.h"
 #include "base/logging.h"
-#include "base/posix/eintr_wrapper.h"
 
 namespace {
 
@@ -23,7 +22,7 @@
 // we can use LazyInstance to handle opening it on the first access.
 class URandomFd {
  public:
-  URandomFd() : fd_(HANDLE_EINTR(open("/dev/urandom", O_RDONLY | O_CLOEXEC))) {
+  URandomFd() : fd_(open("/dev/urandom", O_RDONLY)) {
     DCHECK_GE(fd_, 0) << "Cannot open /dev/urandom: " << errno;
   }
 
diff --git a/base/run_loop.cc b/base/run_loop.cc
index 4c19d35..a2322f8 100644
--- a/base/run_loop.cc
+++ b/base/run_loop.cc
@@ -19,14 +19,12 @@
       running_(false),
       quit_when_idle_received_(false),
       weak_factory_(this) {
-  DCHECK(loop_);
 }
 
 RunLoop::~RunLoop() {
 }
 
 void RunLoop::Run() {
-  DCHECK(thread_checker_.CalledOnValidThread());
   if (!BeforeRun())
     return;
 
@@ -46,7 +44,6 @@
 }
 
 void RunLoop::Quit() {
-  DCHECK(thread_checker_.CalledOnValidThread());
   quit_called_ = true;
   if (running_ && loop_->run_loop_ == this) {
     // This is the inner-most RunLoop, so quit now.
@@ -55,7 +52,6 @@
 }
 
 void RunLoop::QuitWhenIdle() {
-  DCHECK(thread_checker_.CalledOnValidThread());
   quit_when_idle_received_ = true;
 }
 
diff --git a/base/run_loop.h b/base/run_loop.h
index 077d097..635018f 100644
--- a/base/run_loop.h
+++ b/base/run_loop.h
@@ -10,7 +10,6 @@
 #include "base/macros.h"
 #include "base/memory/weak_ptr.h"
 #include "base/message_loop/message_loop.h"
-#include "base/threading/thread_checker.h"
 #include "build/build_config.h"
 
 namespace base {
@@ -106,8 +105,6 @@
   // that we should quit Run once it becomes idle.
   bool quit_when_idle_received_;
 
-  base::ThreadChecker thread_checker_;
-
   // WeakPtrFactory for QuitClosure safety.
   base::WeakPtrFactory<RunLoop> weak_factory_;
 
diff --git a/base/scoped_generic.h b/base/scoped_generic.h
index c2d51cf..84de6b7 100644
--- a/base/scoped_generic.h
+++ b/base/scoped_generic.h
@@ -14,7 +14,7 @@
 
 namespace base {
 
-// This class acts like unique_ptr with a custom deleter (although is slightly
+// This class acts like ScopedPtr with a custom deleter (although is slightly
 // less fancy in some of the more escoteric respects) except that it keeps a
 // copy of the object rather than a pointer, and we require that the contained
 // object has some kind of "invalid" value.
@@ -22,12 +22,12 @@
 // Defining a scoper based on this class allows you to get a scoper for
 // non-pointer types without having to write custom code for set, reset, and
 // move, etc. and get almost identical semantics that people are used to from
-// unique_ptr.
+// scoped_ptr.
 //
 // It is intended that you will typedef this class with an appropriate deleter
 // to implement clean up tasks for objects that act like pointers from a
 // resource management standpoint but aren't, such as file descriptors and
-// various types of operating system handles. Using unique_ptr for these
+// various types of operating system handles. Using scoped_ptr for these
 // things requires that you keep a pointer to the handle valid for the lifetime
 // of the scoper (which is easy to mess up).
 //
@@ -97,7 +97,7 @@
   }
 
   // Frees the currently owned object, if any. Then takes ownership of a new
-  // object, if given. Self-resets are not allowd as on unique_ptr. See
+  // object, if given. Self-resets are not allowd as on scoped_ptr. See
   // http://crbug.com/162971
   void reset(const element_type& value = traits_type::InvalidValue()) {
     if (data_.generic != traits_type::InvalidValue() && data_.generic == value)
diff --git a/base/scoped_observer.h b/base/scoped_observer.h
index 7f1d6fb..13d7ca8 100644
--- a/base/scoped_observer.h
+++ b/base/scoped_observer.h
@@ -47,7 +47,7 @@
   }
 
   bool IsObserving(Source* source) const {
-    return base::ContainsValue(sources_, source);
+    return ContainsValue(sources_, source);
   }
 
   bool IsObservingSources() const { return !sources_.empty(); }
diff --git a/base/security_unittest.cc b/base/security_unittest.cc
index 519c997..af9d2bf 100644
--- a/base/security_unittest.cc
+++ b/base/security_unittest.cc
@@ -87,30 +87,31 @@
   }
 }
 
-#if defined(OS_IOS) || defined(ADDRESS_SANITIZER) || \
-    defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER)
+#if defined(OS_IOS) || defined(OS_WIN) || defined(OS_LINUX)
 #define MAYBE_NewOverflow DISABLED_NewOverflow
 #else
 #define MAYBE_NewOverflow NewOverflow
 #endif
 // Test array[TooBig][X] and array[X][TooBig] allocations for int overflows.
 // IOS doesn't honor nothrow, so disable the test there.
-// Disabled under XSan because asan aborts when new returns nullptr,
-// https://bugs.chromium.org/p/chromium/issues/detail?id=690271#c15
+// Crashes on Windows Dbg builds, disable there as well.
+// Disabled on Linux because failing Linux Valgrind bot, and Valgrind exclusions
+// are not currently read. See http://crbug.com/582398
 TEST(SecurityTest, MAYBE_NewOverflow) {
   const size_t kArraySize = 4096;
   // We want something "dynamic" here, so that the compiler doesn't
   // immediately reject crazy arrays.
   const size_t kDynamicArraySize = HideValueFromCompiler(kArraySize);
-  const size_t kMaxSizeT = std::numeric_limits<size_t>::max();
+  // numeric_limits are still not constexpr until we switch to C++11, so we
+  // use an ugly cast.
+  const size_t kMaxSizeT = ~static_cast<size_t>(0);
+  ASSERT_EQ(numeric_limits<size_t>::max(), kMaxSizeT);
   const size_t kArraySize2 = kMaxSizeT / kArraySize + 10;
   const size_t kDynamicArraySize2 = HideValueFromCompiler(kArraySize2);
   {
     std::unique_ptr<char[][kArraySize]> array_pointer(
         new (nothrow) char[kDynamicArraySize2][kArraySize]);
-    // Prevent clang from optimizing away the whole test.
-    char* volatile p = reinterpret_cast<char*>(array_pointer.get());
-    OverflowTestsSoftExpectTrue(!p);
+    OverflowTestsSoftExpectTrue(!array_pointer);
   }
   // On windows, the compiler prevents static array sizes of more than
   // 0x7fffffff (error C2148).
@@ -120,9 +121,7 @@
   {
     std::unique_ptr<char[][kArraySize2]> array_pointer(
         new (nothrow) char[kDynamicArraySize][kArraySize2]);
-    // Prevent clang from optimizing away the whole test.
-    char* volatile p = reinterpret_cast<char*>(array_pointer.get());
-    OverflowTestsSoftExpectTrue(!p);
+    OverflowTestsSoftExpectTrue(!array_pointer);
   }
 #endif  // !defined(OS_WIN) || !defined(ARCH_CPU_64_BITS)
 }
diff --git a/base/sequence_checker.h b/base/sequence_checker.h
index 4716318..ad01828 100644
--- a/base/sequence_checker.h
+++ b/base/sequence_checker.h
@@ -5,6 +5,13 @@
 #ifndef BASE_SEQUENCE_CHECKER_H_
 #define BASE_SEQUENCE_CHECKER_H_
 
+// See comments for the similar block in thread_checker.h.
+#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
+#define ENABLE_SEQUENCE_CHECKER 1
+#else
+#define ENABLE_SEQUENCE_CHECKER 0
+#endif
+
 #include "base/sequence_checker_impl.h"
 
 namespace base {
@@ -15,22 +22,23 @@
 // the right version for your build configuration.
 class SequenceCheckerDoNothing {
  public:
-  bool CalledOnValidSequence() const { return true; }
+  bool CalledOnValidSequencedThread() const {
+    return true;
+  }
 
   void DetachFromSequence() {}
 };
 
-// SequenceChecker is a helper class to verify that calls to some methods of a
-// class are sequenced. Calls are sequenced when they are issued:
-// - From tasks posted to SequencedTaskRunners or SingleThreadTaskRunners bound
-//   to the same sequence, or,
-// - From a single thread outside of any task.
+// SequenceChecker is a helper class used to help verify that some
+// methods of a class are called in sequence -- that is, called from
+// the same SequencedTaskRunner. It is a generalization of
+// ThreadChecker; see comments in sequence_checker_impl.h for details.
 //
 // Example:
 // class MyClass {
 //  public:
 //   void Foo() {
-//     DCHECK(sequence_checker_.CalledOnValidSequence());
+//     DCHECK(sequence_checker_.CalledOnValidSequencedThread());
 //     ... (do stuff) ...
 //   }
 //
@@ -38,14 +46,16 @@
 //   SequenceChecker sequence_checker_;
 // }
 //
-// In Release mode, CalledOnValidSequence() will always return true.
-#if DCHECK_IS_ON()
+// In Release mode, CalledOnValidSequencedThread() will always return true.
+#if ENABLE_SEQUENCE_CHECKER
 class SequenceChecker : public SequenceCheckerImpl {
 };
 #else
 class SequenceChecker : public SequenceCheckerDoNothing {
 };
-#endif  // DCHECK_IS_ON()
+#endif  // ENABLE_SEQUENCE_CHECKER
+
+#undef ENABLE_SEQUENCE_CHECKER
 
 }  // namespace base
 
diff --git a/base/sequence_checker_impl.cc b/base/sequence_checker_impl.cc
index df2a8cb..e95b8ee 100644
--- a/base/sequence_checker_impl.cc
+++ b/base/sequence_checker_impl.cc
@@ -4,66 +4,43 @@
 
 #include "base/sequence_checker_impl.h"
 
-#include "base/logging.h"
-#include "base/memory/ptr_util.h"
-#include "base/sequence_token.h"
-#include "base/threading/sequenced_worker_pool.h"
-#include "base/threading/thread_checker_impl.h"
-
 namespace base {
 
-class SequenceCheckerImpl::Core {
- public:
-  Core()
-      : sequence_token_(SequenceToken::GetForCurrentThread()),
-        sequenced_worker_pool_token_(
-            SequencedWorkerPool::GetSequenceTokenForCurrentThread()) {
-    // SequencedWorkerPool doesn't use SequenceToken and code outside of
-    // SequenceWorkerPool doesn't set a SequencedWorkerPool token.
-    DCHECK(!sequence_token_.IsValid() ||
-           !sequenced_worker_pool_token_.IsValid());
-  }
-
-  ~Core() = default;
-
-  bool CalledOnValidThread() const {
-    if (sequence_token_.IsValid())
-      return sequence_token_ == SequenceToken::GetForCurrentThread();
-
-    if (sequenced_worker_pool_token_.IsValid()) {
-      return sequenced_worker_pool_token_.Equals(
-          SequencedWorkerPool::GetSequenceTokenForCurrentThread());
-    }
-
-    // SequenceChecker behaves as a ThreadChecker when it is not bound to a
-    // valid sequence token.
-    return thread_checker_.CalledOnValidThread();
-  }
-
- private:
-  SequenceToken sequence_token_;
-
-  // TODO(gab): Remove this when SequencedWorkerPool is deprecated in favor of
-  // TaskScheduler. crbug.com/622400
-  SequencedWorkerPool::SequenceToken sequenced_worker_pool_token_;
-
-  // Used when |sequenced_worker_pool_token_| and |sequence_token_| are invalid.
-  ThreadCheckerImpl thread_checker_;
-};
-
-SequenceCheckerImpl::SequenceCheckerImpl() : core_(MakeUnique<Core>()) {}
-SequenceCheckerImpl::~SequenceCheckerImpl() = default;
-
-bool SequenceCheckerImpl::CalledOnValidSequence() const {
+SequenceCheckerImpl::SequenceCheckerImpl()
+    : sequence_token_assigned_(false) {
   AutoLock auto_lock(lock_);
-  if (!core_)
-    core_ = MakeUnique<Core>();
-  return core_->CalledOnValidThread();
+  EnsureSequenceTokenAssigned();
+}
+
+SequenceCheckerImpl::~SequenceCheckerImpl() {}
+
+bool SequenceCheckerImpl::CalledOnValidSequencedThread() const {
+  AutoLock auto_lock(lock_);
+  EnsureSequenceTokenAssigned();
+
+  // If this thread is not associated with a SequencedWorkerPool,
+  // SequenceChecker behaves as a ThreadChecker. See header for details.
+  if (!sequence_token_.IsValid())
+    return thread_checker_.CalledOnValidThread();
+
+  return sequence_token_.Equals(
+      SequencedWorkerPool::GetSequenceTokenForCurrentThread());
 }
 
 void SequenceCheckerImpl::DetachFromSequence() {
   AutoLock auto_lock(lock_);
-  core_.reset();
+  thread_checker_.DetachFromThread();
+  sequence_token_assigned_ = false;
+  sequence_token_ = SequencedWorkerPool::SequenceToken();
+}
+
+void SequenceCheckerImpl::EnsureSequenceTokenAssigned() const {
+  lock_.AssertAcquired();
+  if (sequence_token_assigned_)
+    return;
+
+  sequence_token_assigned_ = true;
+  sequence_token_ = SequencedWorkerPool::GetSequenceTokenForCurrentThread();
 }
 
 }  // namespace base
diff --git a/base/sequence_checker_impl.h b/base/sequence_checker_impl.h
index a54c388..e3c5fed 100644
--- a/base/sequence_checker_impl.h
+++ b/base/sequence_checker_impl.h
@@ -5,40 +5,44 @@
 #ifndef BASE_SEQUENCE_CHECKER_IMPL_H_
 #define BASE_SEQUENCE_CHECKER_IMPL_H_
 
-#include <memory>
-
 #include "base/base_export.h"
-#include "base/compiler_specific.h"
 #include "base/macros.h"
 #include "base/synchronization/lock.h"
+#include "base/threading/sequenced_worker_pool.h"
+#include "base/threading/thread_checker_impl.h"
 
 namespace base {
 
-// Real implementation of SequenceChecker for use in debug mode or for temporary
-// use in release mode (e.g. to CHECK on a threading issue seen only in the
-// wild).
-//
-// Note: You should almost always use the SequenceChecker class to get the right
-// version for your build configuration.
+// SequenceCheckerImpl is used to help verify that some methods of a
+// class are called in sequence -- that is, called from the same
+// SequencedTaskRunner. It is a generalization of ThreadChecker; in
+// particular, it behaves exactly like ThreadChecker if constructed
+// on a thread that is not part of a SequencedWorkerPool.
 class BASE_EXPORT SequenceCheckerImpl {
  public:
   SequenceCheckerImpl();
   ~SequenceCheckerImpl();
 
-  // Returns true if called in sequence with previous calls to this method and
-  // the constructor.
-  bool CalledOnValidSequence() const WARN_UNUSED_RESULT;
+  // Returns whether the we are being called on the same sequence token
+  // as previous calls. If there is no associated sequence, then returns
+  // whether we are being called on the underlying ThreadChecker's thread.
+  bool CalledOnValidSequencedThread() const;
 
-  // Unbinds the checker from the currently associated sequence. The checker
-  // will be re-bound on the next call to CalledOnValidSequence().
+  // Unbinds the checker from the currently associated sequence. The
+  // checker will be re-bound on the next call to CalledOnValidSequence().
   void DetachFromSequence();
 
  private:
-  class Core;
+  void EnsureSequenceTokenAssigned() const;
 
   // Guards all variables below.
   mutable Lock lock_;
-  mutable std::unique_ptr<Core> core_;
+
+  // Used if |sequence_token_| is not valid.
+  ThreadCheckerImpl thread_checker_;
+  mutable bool sequence_token_assigned_;
+
+  mutable SequencedWorkerPool::SequenceToken sequence_token_;
 
   DISALLOW_COPY_AND_ASSIGN(SequenceCheckerImpl);
 };
diff --git a/base/sequence_checker_unittest.cc b/base/sequence_checker_unittest.cc
index 86e9298..196bb1c 100644
--- a/base/sequence_checker_unittest.cc
+++ b/base/sequence_checker_unittest.cc
@@ -2,257 +2,334 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/sequence_checker.h"
+
 #include <stddef.h>
 
 #include <memory>
-#include <string>
+#include <utility>
 
 #include "base/bind.h"
 #include "base/bind_helpers.h"
-#include "base/callback_forward.h"
+#include "base/location.h"
+#include "base/logging.h"
 #include "base/macros.h"
-#include "base/message_loop/message_loop.h"
-#include "base/sequence_checker_impl.h"
-#include "base/sequence_token.h"
+#include "base/memory/ref_counted.h"
 #include "base/single_thread_task_runner.h"
 #include "base/test/sequenced_worker_pool_owner.h"
-#include "base/threading/simple_thread.h"
+#include "base/threading/thread.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
+// Duplicated from base/sequence_checker.h so that we can be good citizens
+// there and undef the macro.
+#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
+#define ENABLE_SEQUENCE_CHECKER 1
+#else
+#define ENABLE_SEQUENCE_CHECKER 0
+#endif
+
 namespace base {
 
 namespace {
 
-constexpr size_t kNumWorkerThreads = 3;
+const size_t kNumWorkerThreads = 3;
 
-// Runs a callback on another thread.
-class RunCallbackThread : public SimpleThread {
+// Simple class to exercise the basics of SequenceChecker.
+// DoStuff should verify that it's called on a valid sequenced thread.
+// SequenceCheckedObject can be destroyed on any thread (like WeakPtr).
+class SequenceCheckedObject {
  public:
-  explicit RunCallbackThread(const Closure& callback)
-      : SimpleThread("RunCallbackThread"), callback_(callback) {
-    Start();
-    Join();
+  SequenceCheckedObject() {}
+  ~SequenceCheckedObject() {}
+
+  // Verifies that it was called on the same thread as the constructor.
+  void DoStuff() {
+    DCHECK(sequence_checker_.CalledOnValidSequencedThread());
+  }
+
+  void DetachFromSequence() {
+    sequence_checker_.DetachFromSequence();
   }
 
  private:
-  // SimpleThread:
-  void Run() override { callback_.Run(); }
+  SequenceChecker sequence_checker_;
 
-  const Closure callback_;
-
-  DISALLOW_COPY_AND_ASSIGN(RunCallbackThread);
+  DISALLOW_COPY_AND_ASSIGN(SequenceCheckedObject);
 };
 
 class SequenceCheckerTest : public testing::Test {
+ public:
+  SequenceCheckerTest() : other_thread_("sequence_checker_test_other_thread") {}
+
+  void SetUp() override {
+    other_thread_.Start();
+    ResetPool();
+  }
+
+  void TearDown() override {
+    other_thread_.Stop();
+  }
+
  protected:
-  SequenceCheckerTest() : pool_owner_(kNumWorkerThreads, "test") {}
+  base::Thread* other_thread() { return &other_thread_; }
 
-  void PostToSequencedWorkerPool(const Closure& callback,
-                                 const std::string& token_name) {
-    pool_owner_.pool()->PostNamedSequencedWorkerTask(token_name, FROM_HERE,
-                                                     callback);
+  const scoped_refptr<SequencedWorkerPool>& pool() {
+    return pool_owner_->pool();
   }
 
-  void FlushSequencedWorkerPoolForTesting() {
-    pool_owner_.pool()->FlushForTesting();
+  void PostDoStuffToWorkerPool(SequenceCheckedObject* sequence_checked_object,
+                               const std::string& token_name) {
+    pool()->PostNamedSequencedWorkerTask(
+        token_name,
+        FROM_HERE,
+        base::Bind(&SequenceCheckedObject::DoStuff,
+                   base::Unretained(sequence_checked_object)));
   }
 
+  void PostDoStuffToOtherThread(
+      SequenceCheckedObject* sequence_checked_object) {
+    other_thread()->task_runner()->PostTask(
+        FROM_HERE, base::Bind(&SequenceCheckedObject::DoStuff,
+                              base::Unretained(sequence_checked_object)));
+  }
+
+  void PostDeleteToOtherThread(
+      std::unique_ptr<SequenceCheckedObject> sequence_checked_object) {
+    other_thread()->message_loop()->task_runner()->DeleteSoon(
+        FROM_HERE, sequence_checked_object.release());
+  }
+
+  // Destroys the SequencedWorkerPool instance, blocking until it is fully shut
+  // down, and creates a new instance.
+  void ResetPool() {
+    pool_owner_.reset(new SequencedWorkerPoolOwner(kNumWorkerThreads, "test"));
+  }
+
+  void MethodOnDifferentThreadDeathTest();
+  void DetachThenCallFromDifferentThreadDeathTest();
+  void DifferentSequenceTokensDeathTest();
+  void WorkerPoolAndSimpleThreadDeathTest();
+  void TwoDifferentWorkerPoolsDeathTest();
+
  private:
   MessageLoop message_loop_;  // Needed by SequencedWorkerPool to function.
-  SequencedWorkerPoolOwner pool_owner_;
-
-  DISALLOW_COPY_AND_ASSIGN(SequenceCheckerTest);
+  base::Thread other_thread_;
+  std::unique_ptr<SequencedWorkerPoolOwner> pool_owner_;
 };
 
-void ExpectCalledOnValidSequence(SequenceCheckerImpl* sequence_checker) {
-  ASSERT_TRUE(sequence_checker);
+TEST_F(SequenceCheckerTest, CallsAllowedOnSameThread) {
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
+      new SequenceCheckedObject);
 
-  // This should bind |sequence_checker| to the current sequence if it wasn't
-  // already bound to a sequence.
-  EXPECT_TRUE(sequence_checker->CalledOnValidSequence());
+  // Verify that DoStuff doesn't assert.
+  sequence_checked_object->DoStuff();
 
-  // Since |sequence_checker| is now bound to the current sequence, another call
-  // to CalledOnValidSequence() should return true.
-  EXPECT_TRUE(sequence_checker->CalledOnValidSequence());
+  // Verify that the destructor doesn't assert.
+  sequence_checked_object.reset();
 }
 
-void ExpectCalledOnValidSequenceWithSequenceToken(
-    SequenceCheckerImpl* sequence_checker,
-    SequenceToken sequence_token) {
-  ScopedSetSequenceTokenForCurrentThread
-      scoped_set_sequence_token_for_current_thread(sequence_token);
-  ExpectCalledOnValidSequence(sequence_checker);
-}
+TEST_F(SequenceCheckerTest, DestructorAllowedOnDifferentThread) {
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
+      new SequenceCheckedObject);
 
-void ExpectNotCalledOnValidSequence(SequenceCheckerImpl* sequence_checker) {
-  ASSERT_TRUE(sequence_checker);
-  EXPECT_FALSE(sequence_checker->CalledOnValidSequence());
-}
-
-}  // namespace
-
-TEST_F(SequenceCheckerTest, CallsAllowedOnSameThreadNoSequenceToken) {
-  SequenceCheckerImpl sequence_checker;
-  EXPECT_TRUE(sequence_checker.CalledOnValidSequence());
-}
-
-TEST_F(SequenceCheckerTest, CallsAllowedOnSameThreadSameSequenceToken) {
-  ScopedSetSequenceTokenForCurrentThread
-      scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
-  SequenceCheckerImpl sequence_checker;
-  EXPECT_TRUE(sequence_checker.CalledOnValidSequence());
-}
-
-TEST_F(SequenceCheckerTest, CallsDisallowedOnDifferentThreadsNoSequenceToken) {
-  SequenceCheckerImpl sequence_checker;
-  RunCallbackThread thread(
-      Bind(&ExpectNotCalledOnValidSequence, Unretained(&sequence_checker)));
-}
-
-TEST_F(SequenceCheckerTest, CallsAllowedOnDifferentThreadsSameSequenceToken) {
-  const SequenceToken sequence_token(SequenceToken::Create());
-
-  ScopedSetSequenceTokenForCurrentThread
-      scoped_set_sequence_token_for_current_thread(sequence_token);
-  SequenceCheckerImpl sequence_checker;
-  EXPECT_TRUE(sequence_checker.CalledOnValidSequence());
-
-  RunCallbackThread thread(Bind(&ExpectCalledOnValidSequenceWithSequenceToken,
-                                Unretained(&sequence_checker), sequence_token));
-}
-
-TEST_F(SequenceCheckerTest, CallsDisallowedOnSameThreadDifferentSequenceToken) {
-  std::unique_ptr<SequenceCheckerImpl> sequence_checker;
-
-  {
-    ScopedSetSequenceTokenForCurrentThread
-        scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
-    sequence_checker.reset(new SequenceCheckerImpl);
-  }
-
-  {
-    // Different SequenceToken.
-    ScopedSetSequenceTokenForCurrentThread
-        scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
-    EXPECT_FALSE(sequence_checker->CalledOnValidSequence());
-  }
-
-  // No SequenceToken.
-  EXPECT_FALSE(sequence_checker->CalledOnValidSequence());
+  // Verify the destructor doesn't assert when called on a different thread.
+  PostDeleteToOtherThread(std::move(sequence_checked_object));
+  other_thread()->Stop();
 }
 
 TEST_F(SequenceCheckerTest, DetachFromSequence) {
-  std::unique_ptr<SequenceCheckerImpl> sequence_checker;
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
+      new SequenceCheckedObject);
 
-  {
-    ScopedSetSequenceTokenForCurrentThread
-        scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
-    sequence_checker.reset(new SequenceCheckerImpl);
-  }
+  // Verify that DoStuff doesn't assert when called on a different thread after
+  // a call to DetachFromSequence.
+  sequence_checked_object->DetachFromSequence();
 
-  sequence_checker->DetachFromSequence();
-
-  {
-    // Verify that CalledOnValidSequence() returns true when called with
-    // a different sequence token after a call to DetachFromSequence().
-    ScopedSetSequenceTokenForCurrentThread
-        scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
-    EXPECT_TRUE(sequence_checker->CalledOnValidSequence());
-  }
+  PostDoStuffToOtherThread(sequence_checked_object.get());
+  other_thread()->Stop();
 }
 
-TEST_F(SequenceCheckerTest, DetachFromSequenceNoSequenceToken) {
-  SequenceCheckerImpl sequence_checker;
-  sequence_checker.DetachFromSequence();
+TEST_F(SequenceCheckerTest, SameSequenceTokenValid) {
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
+      new SequenceCheckedObject);
 
-  // Verify that CalledOnValidSequence() returns true when called on a
-  // different thread after a call to DetachFromSequence().
-  RunCallbackThread thread(
-      Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)));
+  sequence_checked_object->DetachFromSequence();
+  PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
+  PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
+  PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
+  PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
+  pool()->FlushForTesting();
 
-  EXPECT_FALSE(sequence_checker.CalledOnValidSequence());
+  PostDeleteToOtherThread(std::move(sequence_checked_object));
+  other_thread()->Stop();
 }
 
-TEST_F(SequenceCheckerTest, SequencedWorkerPool_SameSequenceTokenValid) {
-  SequenceCheckerImpl sequence_checker;
-  sequence_checker.DetachFromSequence();
+TEST_F(SequenceCheckerTest, DetachSequenceTokenValid) {
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
+      new SequenceCheckedObject);
 
-  PostToSequencedWorkerPool(
-      Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)), "A");
-  PostToSequencedWorkerPool(
-      Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)), "A");
-  FlushSequencedWorkerPoolForTesting();
+  sequence_checked_object->DetachFromSequence();
+  PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
+  PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
+  pool()->FlushForTesting();
+
+  sequence_checked_object->DetachFromSequence();
+  PostDoStuffToWorkerPool(sequence_checked_object.get(), "B");
+  PostDoStuffToWorkerPool(sequence_checked_object.get(), "B");
+  pool()->FlushForTesting();
+
+  PostDeleteToOtherThread(std::move(sequence_checked_object));
+  other_thread()->Stop();
 }
 
-TEST_F(SequenceCheckerTest, SequencedWorkerPool_DetachSequenceTokenValid) {
-  SequenceCheckerImpl sequence_checker;
-  sequence_checker.DetachFromSequence();
+#if GTEST_HAS_DEATH_TEST || !ENABLE_SEQUENCE_CHECKER
 
-  PostToSequencedWorkerPool(
-      Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)), "A");
-  PostToSequencedWorkerPool(
-      Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)), "A");
-  FlushSequencedWorkerPoolForTesting();
+void SequenceCheckerTest::MethodOnDifferentThreadDeathTest() {
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
+      new SequenceCheckedObject);
 
-  sequence_checker.DetachFromSequence();
-
-  PostToSequencedWorkerPool(
-      Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)), "B");
-  PostToSequencedWorkerPool(
-      Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)), "B");
-  FlushSequencedWorkerPoolForTesting();
+  // DoStuff should assert in debug builds only when called on a
+  // different thread.
+  PostDoStuffToOtherThread(sequence_checked_object.get());
+  other_thread()->Stop();
 }
 
-TEST_F(SequenceCheckerTest,
-       SequencedWorkerPool_DifferentSequenceTokensInvalid) {
-  SequenceCheckerImpl sequence_checker;
-  sequence_checker.DetachFromSequence();
+#if ENABLE_SEQUENCE_CHECKER
+TEST_F(SequenceCheckerTest, MethodNotAllowedOnDifferentThreadDeathTestInDebug) {
+  // The default style "fast" does not support multi-threaded tests.
+  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+  ASSERT_DEATH({
+    MethodOnDifferentThreadDeathTest();
+  }, "");
+}
+#else
+TEST_F(SequenceCheckerTest, MethodAllowedOnDifferentThreadDeathTestInRelease) {
+  MethodOnDifferentThreadDeathTest();
+}
+#endif  // ENABLE_SEQUENCE_CHECKER
 
-  PostToSequencedWorkerPool(
-      Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)), "A");
-  PostToSequencedWorkerPool(
-      Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)), "A");
-  FlushSequencedWorkerPoolForTesting();
+void SequenceCheckerTest::DetachThenCallFromDifferentThreadDeathTest() {
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
+      new SequenceCheckedObject);
 
-  PostToSequencedWorkerPool(
-      Bind(&ExpectNotCalledOnValidSequence, Unretained(&sequence_checker)),
-      "B");
-  PostToSequencedWorkerPool(
-      Bind(&ExpectNotCalledOnValidSequence, Unretained(&sequence_checker)),
-      "B");
-  FlushSequencedWorkerPoolForTesting();
+  // DoStuff doesn't assert when called on a different thread
+  // after a call to DetachFromSequence.
+  sequence_checked_object->DetachFromSequence();
+  PostDoStuffToOtherThread(sequence_checked_object.get());
+  other_thread()->Stop();
+
+  // DoStuff should assert in debug builds only after moving to
+  // another thread.
+  sequence_checked_object->DoStuff();
 }
 
-TEST_F(SequenceCheckerTest,
-       SequencedWorkerPool_WorkerPoolAndSimpleThreadInvalid) {
-  SequenceCheckerImpl sequence_checker;
-  sequence_checker.DetachFromSequence();
+#if ENABLE_SEQUENCE_CHECKER
+TEST_F(SequenceCheckerTest, DetachFromSequenceDeathTestInDebug) {
+  // The default style "fast" does not support multi-threaded tests.
+  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+  ASSERT_DEATH({
+    DetachThenCallFromDifferentThreadDeathTest();
+  }, "");
+}
+#else
+TEST_F(SequenceCheckerTest, DetachFromThreadDeathTestInRelease) {
+  DetachThenCallFromDifferentThreadDeathTest();
+}
+#endif  // ENABLE_SEQUENCE_CHECKER
 
-  PostToSequencedWorkerPool(
-      Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)), "A");
-  PostToSequencedWorkerPool(
-      Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)), "A");
-  FlushSequencedWorkerPoolForTesting();
+void SequenceCheckerTest::DifferentSequenceTokensDeathTest() {
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
+      new SequenceCheckedObject);
 
-  EXPECT_FALSE(sequence_checker.CalledOnValidSequence());
+  sequence_checked_object->DetachFromSequence();
+  PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
+  PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
+  PostDoStuffToWorkerPool(sequence_checked_object.get(), "B");
+  PostDoStuffToWorkerPool(sequence_checked_object.get(), "B");
+  pool()->FlushForTesting();
+
+  PostDeleteToOtherThread(std::move(sequence_checked_object));
+  other_thread()->Stop();
 }
 
-TEST_F(SequenceCheckerTest,
-       SequencedWorkerPool_TwoDifferentWorkerPoolsInvalid) {
-  SequenceCheckerImpl sequence_checker;
-  sequence_checker.DetachFromSequence();
+#if ENABLE_SEQUENCE_CHECKER
+TEST_F(SequenceCheckerTest, DifferentSequenceTokensDeathTestInDebug) {
+  // The default style "fast" does not support multi-threaded tests.
+  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+  ASSERT_DEATH({
+    DifferentSequenceTokensDeathTest();
+  }, "");
+}
+#else
+TEST_F(SequenceCheckerTest, DifferentSequenceTokensDeathTestInRelease) {
+  DifferentSequenceTokensDeathTest();
+}
+#endif  // ENABLE_SEQUENCE_CHECKER
 
-  PostToSequencedWorkerPool(
-      Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)), "A");
-  PostToSequencedWorkerPool(
-      Bind(&ExpectCalledOnValidSequence, Unretained(&sequence_checker)), "A");
-  FlushSequencedWorkerPoolForTesting();
+void SequenceCheckerTest::WorkerPoolAndSimpleThreadDeathTest() {
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
+      new SequenceCheckedObject);
+
+  sequence_checked_object->DetachFromSequence();
+  PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
+  PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
+  pool()->FlushForTesting();
+
+  PostDoStuffToOtherThread(sequence_checked_object.get());
+  other_thread()->Stop();
+}
+
+#if ENABLE_SEQUENCE_CHECKER
+TEST_F(SequenceCheckerTest, WorkerPoolAndSimpleThreadDeathTestInDebug) {
+  // The default style "fast" does not support multi-threaded tests.
+  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+  ASSERT_DEATH({
+    WorkerPoolAndSimpleThreadDeathTest();
+  }, "");
+}
+#else
+TEST_F(SequenceCheckerTest, WorkerPoolAndSimpleThreadDeathTestInRelease) {
+  WorkerPoolAndSimpleThreadDeathTest();
+}
+#endif  // ENABLE_SEQUENCE_CHECKER
+
+void SequenceCheckerTest::TwoDifferentWorkerPoolsDeathTest() {
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
+      new SequenceCheckedObject);
+
+  sequence_checked_object->DetachFromSequence();
+  PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
+  PostDoStuffToWorkerPool(sequence_checked_object.get(), "A");
+  pool()->FlushForTesting();
 
   SequencedWorkerPoolOwner second_pool_owner(kNumWorkerThreads, "test2");
   second_pool_owner.pool()->PostNamedSequencedWorkerTask(
-      "A", FROM_HERE, base::Bind(&ExpectNotCalledOnValidSequence,
-                                 base::Unretained(&sequence_checker)));
+      "A",
+      FROM_HERE,
+      base::Bind(&SequenceCheckedObject::DoStuff,
+                 base::Unretained(sequence_checked_object.get())));
   second_pool_owner.pool()->FlushForTesting();
 }
 
+#if ENABLE_SEQUENCE_CHECKER
+TEST_F(SequenceCheckerTest, TwoDifferentWorkerPoolsDeathTestInDebug) {
+  // The default style "fast" does not support multi-threaded tests.
+  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+  ASSERT_DEATH({
+    TwoDifferentWorkerPoolsDeathTest();
+  }, "");
+}
+#else
+TEST_F(SequenceCheckerTest, TwoDifferentWorkerPoolsDeathTestInRelease) {
+  TwoDifferentWorkerPoolsDeathTest();
+}
+#endif  // ENABLE_SEQUENCE_CHECKER
+
+#endif  // GTEST_HAS_DEATH_TEST || !ENABLE_SEQUENCE_CHECKER
+
+}  // namespace
+
 }  // namespace base
+
+// Just in case we ever get lumped together with other compilation units.
+#undef ENABLE_SEQUENCE_CHECKER
diff --git a/base/sequence_token.cc b/base/sequence_token.cc
deleted file mode 100644
index 264e3b6..0000000
--- a/base/sequence_token.cc
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/sequence_token.h"
-
-#include "base/atomic_sequence_num.h"
-#include "base/lazy_instance.h"
-#include "base/logging.h"
-#include "base/threading/thread_local.h"
-
-namespace base {
-
-namespace {
-
-base::StaticAtomicSequenceNumber g_sequence_token_generator;
-
-base::StaticAtomicSequenceNumber g_task_token_generator;
-
-LazyInstance<ThreadLocalPointer<const SequenceToken>>::Leaky
-    tls_current_sequence_token = LAZY_INSTANCE_INITIALIZER;
-
-LazyInstance<ThreadLocalPointer<const TaskToken>>::Leaky
-    tls_current_task_token = LAZY_INSTANCE_INITIALIZER;
-
-}  // namespace
-
-bool SequenceToken::operator==(const SequenceToken& other) const {
-  return token_ == other.token_ && IsValid();
-}
-
-bool SequenceToken::operator!=(const SequenceToken& other) const {
-  return !(*this == other);
-}
-
-bool SequenceToken::IsValid() const {
-  return token_ != kInvalidSequenceToken;
-}
-
-int SequenceToken::ToInternalValue() const {
-  return token_;
-}
-
-SequenceToken SequenceToken::Create() {
-  return SequenceToken(g_sequence_token_generator.GetNext());
-}
-
-SequenceToken SequenceToken::GetForCurrentThread() {
-  const SequenceToken* current_sequence_token =
-      tls_current_sequence_token.Get().Get();
-  return current_sequence_token ? *current_sequence_token : SequenceToken();
-}
-
-bool TaskToken::operator==(const TaskToken& other) const {
-  return token_ == other.token_ && IsValid();
-}
-
-bool TaskToken::operator!=(const TaskToken& other) const {
-  return !(*this == other);
-}
-
-bool TaskToken::IsValid() const {
-  return token_ != kInvalidTaskToken;
-}
-
-TaskToken TaskToken::Create() {
-  return TaskToken(g_task_token_generator.GetNext());
-}
-
-TaskToken TaskToken::GetForCurrentThread() {
-  const TaskToken* current_task_token = tls_current_task_token.Get().Get();
-  return current_task_token ? *current_task_token : TaskToken();
-}
-
-ScopedSetSequenceTokenForCurrentThread::ScopedSetSequenceTokenForCurrentThread(
-    const SequenceToken& sequence_token)
-    : sequence_token_(sequence_token), task_token_(TaskToken::Create()) {
-  DCHECK(!tls_current_sequence_token.Get().Get());
-  DCHECK(!tls_current_task_token.Get().Get());
-  tls_current_sequence_token.Get().Set(&sequence_token_);
-  tls_current_task_token.Get().Set(&task_token_);
-}
-
-ScopedSetSequenceTokenForCurrentThread::
-    ~ScopedSetSequenceTokenForCurrentThread() {
-  DCHECK_EQ(tls_current_sequence_token.Get().Get(), &sequence_token_);
-  DCHECK_EQ(tls_current_task_token.Get().Get(), &task_token_);
-  tls_current_sequence_token.Get().Set(nullptr);
-  tls_current_task_token.Get().Set(nullptr);
-}
-
-}  // namespace base
diff --git a/base/sequence_token.h b/base/sequence_token.h
deleted file mode 100644
index 6e7d191..0000000
--- a/base/sequence_token.h
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_SEQUENCE_TOKEN_H_
-#define BASE_SEQUENCE_TOKEN_H_
-
-#include "base/base_export.h"
-#include "base/macros.h"
-
-namespace base {
-
-// A token that identifies a series of sequenced tasks (i.e. tasks that run one
-// at a time in posting order).
-class BASE_EXPORT SequenceToken {
- public:
-  // Instantiates an invalid SequenceToken.
-  SequenceToken() = default;
-
-  // Explicitly allow copy.
-  SequenceToken(const SequenceToken& other) = default;
-  SequenceToken& operator=(const SequenceToken& other) = default;
-
-  // An invalid SequenceToken is not equal to any other SequenceToken, including
-  // other invalid SequenceTokens.
-  bool operator==(const SequenceToken& other) const;
-  bool operator!=(const SequenceToken& other) const;
-
-  // Returns true if this is a valid SequenceToken.
-  bool IsValid() const;
-
-  // Returns the integer uniquely representing this SequenceToken. This method
-  // should only be used for tracing and debugging.
-  int ToInternalValue() const;
-
-  // Returns a valid SequenceToken which isn't equal to any previously returned
-  // SequenceToken.
-  static SequenceToken Create();
-
-  // Returns the SequenceToken associated with the task running on the current
-  // thread, as determined by the active ScopedSetSequenceTokenForCurrentThread
-  // if any.
-  static SequenceToken GetForCurrentThread();
-
- private:
-  explicit SequenceToken(int token) : token_(token) {}
-
-  static constexpr int kInvalidSequenceToken = -1;
-  int token_ = kInvalidSequenceToken;
-};
-
-// A token that identifies a task.
-//
-// This is used by ThreadCheckerImpl to determine whether calls to
-// CalledOnValidThread() come from the same task and hence are deterministically
-// single-threaded (vs. calls coming from different sequenced or parallel tasks,
-// which may or may not run on the same thread).
-class BASE_EXPORT TaskToken {
- public:
-  // Instantiates an invalid TaskToken.
-  TaskToken() = default;
-
-  // Explicitly allow copy.
-  TaskToken(const TaskToken& other) = default;
-  TaskToken& operator=(const TaskToken& other) = default;
-
-  // An invalid TaskToken is not equal to any other TaskToken, including
-  // other invalid TaskTokens.
-  bool operator==(const TaskToken& other) const;
-  bool operator!=(const TaskToken& other) const;
-
-  // Returns true if this is a valid TaskToken.
-  bool IsValid() const;
-
-  // In the scope of a ScopedSetSequenceTokenForCurrentThread, returns a valid
-  // TaskToken which isn't equal to any TaskToken returned in the scope of a
-  // different ScopedSetSequenceTokenForCurrentThread. Otherwise, returns an
-  // invalid TaskToken.
-  static TaskToken GetForCurrentThread();
-
- private:
-  friend class ScopedSetSequenceTokenForCurrentThread;
-
-  explicit TaskToken(int token) : token_(token) {}
-
-  // Returns a valid TaskToken which isn't equal to any previously returned
-  // TaskToken. This is private as it only meant to be instantiated by
-  // ScopedSetSequenceTokenForCurrentThread.
-  static TaskToken Create();
-
-  static constexpr int kInvalidTaskToken = -1;
-  int token_ = kInvalidTaskToken;
-};
-
-// Instantiate this in the scope where a single task runs.
-class BASE_EXPORT ScopedSetSequenceTokenForCurrentThread {
- public:
-  // Throughout the lifetime of the constructed object,
-  // SequenceToken::GetForCurrentThread() will return |sequence_token| and
-  // TaskToken::GetForCurrentThread() will return a TaskToken which is not equal
-  // to any TaskToken returned in the scope of another
-  // ScopedSetSequenceTokenForCurrentThread.
-  ScopedSetSequenceTokenForCurrentThread(const SequenceToken& sequence_token);
-  ~ScopedSetSequenceTokenForCurrentThread();
-
- private:
-  const SequenceToken sequence_token_;
-  const TaskToken task_token_;
-
-  DISALLOW_COPY_AND_ASSIGN(ScopedSetSequenceTokenForCurrentThread);
-};
-
-}  // namespace base
-
-#endif  // BASE_SEQUENCE_TOKEN_H_
diff --git a/base/sequence_token_unittest.cc b/base/sequence_token_unittest.cc
deleted file mode 100644
index b0e69de..0000000
--- a/base/sequence_token_unittest.cc
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/sequence_token.h"
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-
-TEST(SequenceTokenTest, IsValid) {
-  EXPECT_FALSE(SequenceToken().IsValid());
-  EXPECT_TRUE(SequenceToken::Create().IsValid());
-}
-
-TEST(SequenceTokenTest, OperatorEquals) {
-  SequenceToken invalid_a;
-  SequenceToken invalid_b;
-  const SequenceToken valid_a = SequenceToken::Create();
-  const SequenceToken valid_b = SequenceToken::Create();
-
-  EXPECT_FALSE(invalid_a == invalid_a);
-  EXPECT_FALSE(invalid_a == invalid_b);
-  EXPECT_FALSE(invalid_a == valid_a);
-  EXPECT_FALSE(invalid_a == valid_b);
-
-  EXPECT_FALSE(valid_a == invalid_a);
-  EXPECT_FALSE(valid_a == invalid_b);
-  EXPECT_EQ(valid_a, valid_a);
-  EXPECT_FALSE(valid_a == valid_b);
-}
-
-TEST(SequenceTokenTest, OperatorNotEquals) {
-  SequenceToken invalid_a;
-  SequenceToken invalid_b;
-  const SequenceToken valid_a = SequenceToken::Create();
-  const SequenceToken valid_b = SequenceToken::Create();
-
-  EXPECT_NE(invalid_a, invalid_a);
-  EXPECT_NE(invalid_a, invalid_b);
-  EXPECT_NE(invalid_a, valid_a);
-  EXPECT_NE(invalid_a, valid_b);
-
-  EXPECT_NE(valid_a, invalid_a);
-  EXPECT_NE(valid_a, invalid_b);
-  EXPECT_FALSE(valid_a != valid_a);
-  EXPECT_NE(valid_a, valid_b);
-}
-
-TEST(SequenceTokenTest, GetForCurrentThread) {
-  const SequenceToken token = SequenceToken::Create();
-
-  EXPECT_FALSE(SequenceToken::GetForCurrentThread().IsValid());
-
-  {
-    ScopedSetSequenceTokenForCurrentThread
-        scoped_set_sequence_token_for_current_thread(token);
-    EXPECT_TRUE(SequenceToken::GetForCurrentThread().IsValid());
-    EXPECT_EQ(token, SequenceToken::GetForCurrentThread());
-  }
-
-  EXPECT_FALSE(SequenceToken::GetForCurrentThread().IsValid());
-}
-
-TEST(SequenceTokenTest, ToInternalValue) {
-  const SequenceToken token1 = SequenceToken::Create();
-  const SequenceToken token2 = SequenceToken::Create();
-
-  // Confirm that internal values are unique.
-  EXPECT_NE(token1.ToInternalValue(), token2.ToInternalValue());
-}
-
-// Expect a default-constructed TaskToken to be invalid and not equal to
-// another invalid TaskToken.
-TEST(TaskTokenTest, InvalidDefaultConstructed) {
-  EXPECT_FALSE(TaskToken().IsValid());
-  EXPECT_NE(TaskToken(), TaskToken());
-}
-
-// Expect a TaskToken returned by TaskToken::GetForCurrentThread() outside the
-// scope of a ScopedSetSequenceTokenForCurrentThread to be invalid.
-TEST(TaskTokenTest, InvalidOutsideScope) {
-  EXPECT_FALSE(TaskToken::GetForCurrentThread().IsValid());
-}
-
-// Expect an invalid TaskToken not to be equal with a valid TaskToken.
-TEST(TaskTokenTest, ValidNotEqualsInvalid) {
-  ScopedSetSequenceTokenForCurrentThread
-      scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
-  TaskToken valid = TaskToken::GetForCurrentThread();
-  TaskToken invalid;
-  EXPECT_NE(valid, invalid);
-}
-
-// Expect TaskTokens returned by TaskToken::GetForCurrentThread() in the scope
-// of the same ScopedSetSequenceTokenForCurrentThread instance to be
-// valid and equal with each other.
-TEST(TaskTokenTest, EqualInSameScope) {
-  ScopedSetSequenceTokenForCurrentThread
-      scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
-
-  const TaskToken token_a = TaskToken::GetForCurrentThread();
-  const TaskToken token_b = TaskToken::GetForCurrentThread();
-
-  EXPECT_TRUE(token_a.IsValid());
-  EXPECT_TRUE(token_b.IsValid());
-  EXPECT_EQ(token_a, token_b);
-}
-
-// Expect TaskTokens returned by TaskToken::GetForCurrentThread() in the scope
-// of different ScopedSetSequenceTokenForCurrentThread instances to be
-// valid but not equal to each other.
-TEST(TaskTokenTest, NotEqualInDifferentScopes) {
-  TaskToken token_a;
-  TaskToken token_b;
-
-  {
-    ScopedSetSequenceTokenForCurrentThread
-        scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
-    token_a = TaskToken::GetForCurrentThread();
-  }
-  {
-    ScopedSetSequenceTokenForCurrentThread
-        scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
-    token_b = TaskToken::GetForCurrentThread();
-  }
-
-  EXPECT_TRUE(token_a.IsValid());
-  EXPECT_TRUE(token_b.IsValid());
-  EXPECT_NE(token_a, token_b);
-}
-
-}  // namespace base
diff --git a/base/sequenced_task_runner.cc b/base/sequenced_task_runner.cc
index dc11ebc..00d4048 100644
--- a/base/sequenced_task_runner.cc
+++ b/base/sequenced_task_runner.cc
@@ -14,24 +14,18 @@
   return PostNonNestableDelayedTask(from_here, task, base::TimeDelta());
 }
 
-bool SequencedTaskRunner::DeleteOrReleaseSoonInternal(
+bool SequencedTaskRunner::DeleteSoonInternal(
     const tracked_objects::Location& from_here,
-    void (*deleter)(const void*),
+    void(*deleter)(const void*),
     const void* object) {
   return PostNonNestableTask(from_here, Bind(deleter, object));
 }
 
-OnTaskRunnerDeleter::OnTaskRunnerDeleter(
-    scoped_refptr<SequencedTaskRunner> task_runner)
-    : task_runner_(std::move(task_runner)) {
+bool SequencedTaskRunner::ReleaseSoonInternal(
+    const tracked_objects::Location& from_here,
+    void(*releaser)(const void*),
+    const void* object) {
+  return PostNonNestableTask(from_here, Bind(releaser, object));
 }
 
-OnTaskRunnerDeleter::~OnTaskRunnerDeleter() {
-}
-
-OnTaskRunnerDeleter::OnTaskRunnerDeleter(OnTaskRunnerDeleter&&) = default;
-
-OnTaskRunnerDeleter& OnTaskRunnerDeleter::operator=(
-    OnTaskRunnerDeleter&&) = default;
-
 }  // namespace base
diff --git a/base/sequenced_task_runner.h b/base/sequenced_task_runner.h
index 6b2726e..6bb3f2b 100644
--- a/base/sequenced_task_runner.h
+++ b/base/sequenced_task_runner.h
@@ -122,8 +122,9 @@
   template <class T>
   bool DeleteSoon(const tracked_objects::Location& from_here,
                   const T* object) {
-    return DeleteOrReleaseSoonInternal(from_here, &DeleteHelper<T>::DoDelete,
-                                       object);
+    return
+        subtle::DeleteHelperInternal<T, bool>::DeleteViaSequencedTaskRunner(
+            this, from_here, object);
   }
 
   // Submits a non-nestable task to release the given object.  Returns
@@ -131,34 +132,26 @@
   // and false if the object definitely will not be released.
   template <class T>
   bool ReleaseSoon(const tracked_objects::Location& from_here,
-                   const T* object) {
-    return DeleteOrReleaseSoonInternal(from_here, &ReleaseHelper<T>::DoRelease,
-                                       object);
+                   T* object) {
+    return
+        subtle::ReleaseHelperInternal<T, bool>::ReleaseViaSequencedTaskRunner(
+            this, from_here, object);
   }
 
  protected:
   ~SequencedTaskRunner() override {}
 
  private:
-  bool DeleteOrReleaseSoonInternal(const tracked_objects::Location& from_here,
-                                   void (*deleter)(const void*),
-                                   const void* object);
-};
+  template <class T, class R> friend class subtle::DeleteHelperInternal;
+  template <class T, class R> friend class subtle::ReleaseHelperInternal;
 
-struct BASE_EXPORT OnTaskRunnerDeleter {
-  explicit OnTaskRunnerDeleter(scoped_refptr<SequencedTaskRunner> task_runner);
-  ~OnTaskRunnerDeleter();
+  bool DeleteSoonInternal(const tracked_objects::Location& from_here,
+                          void(*deleter)(const void*),
+                          const void* object);
 
-  OnTaskRunnerDeleter(OnTaskRunnerDeleter&&);
-  OnTaskRunnerDeleter& operator=(OnTaskRunnerDeleter&&);
-
-  template <typename T>
-  void operator()(const T* ptr) {
-    if (ptr)
-      task_runner_->DeleteSoon(FROM_HERE, ptr);
-  }
-
-  scoped_refptr<SequencedTaskRunner> task_runner_;
+  bool ReleaseSoonInternal(const tracked_objects::Location& from_here,
+                           void(*releaser)(const void*),
+                           const void* object);
 };
 
 }  // namespace base
diff --git a/base/sequenced_task_runner_helpers.h b/base/sequenced_task_runner_helpers.h
index 18ec0e2..7980b46 100644
--- a/base/sequenced_task_runner_helpers.h
+++ b/base/sequenced_task_runner_helpers.h
@@ -5,9 +5,23 @@
 #ifndef BASE_SEQUENCED_TASK_RUNNER_HELPERS_H_
 #define BASE_SEQUENCED_TASK_RUNNER_HELPERS_H_
 
+#include "base/debug/alias.h"
+#include "base/macros.h"
+
+// TODO(akalin): Investigate whether it's possible to just have
+// SequencedTaskRunner use these helpers (instead of MessageLoop).
+// Then we can just move these to sequenced_task_runner.h.
+
+namespace tracked_objects {
+class Location;
+}
+
 namespace base {
 
-class SequencedTaskRunner;
+namespace subtle {
+template <class T, class R> class DeleteHelperInternal;
+template <class T, class R> class ReleaseHelperInternal;
+}
 
 // Template helpers which use function indirection to erase T from the
 // function signature while still remembering it so we can call the
@@ -20,23 +34,80 @@
 template <class T>
 class DeleteHelper {
  private:
+  template <class T2, class R> friend class subtle::DeleteHelperInternal;
+
   static void DoDelete(const void* object) {
-    delete static_cast<const T*>(object);
+    delete reinterpret_cast<const T*>(object);
   }
 
-  friend class SequencedTaskRunner;
+  DISALLOW_COPY_AND_ASSIGN(DeleteHelper);
 };
 
 template <class T>
 class ReleaseHelper {
  private:
+  template <class T2, class R> friend class subtle::ReleaseHelperInternal;
+
   static void DoRelease(const void* object) {
-    static_cast<const T*>(object)->Release();
+    reinterpret_cast<const T*>(object)->Release();
   }
 
-  friend class SequencedTaskRunner;
+  DISALLOW_COPY_AND_ASSIGN(ReleaseHelper);
 };
 
+namespace subtle {
+
+// An internal SequencedTaskRunner-like class helper for DeleteHelper
+// and ReleaseHelper.  We don't want to expose the Do*() functions
+// directly directly since the void* argument makes it possible to
+// pass/ an object of the wrong type to delete.  Instead, we force
+// callers to go through these internal helpers for type
+// safety. SequencedTaskRunner-like classes which expose DeleteSoon or
+// ReleaseSoon methods should friend the appropriate helper and
+// implement a corresponding *Internal method with the following
+// signature:
+//
+// bool(const tracked_objects::Location&,
+//      void(*function)(const void*),
+//      void* object)
+//
+// An implementation of this function should simply create a
+// base::Closure from (function, object) and return the result of
+// posting the task.
+template <class T, class ReturnType>
+class DeleteHelperInternal {
+ public:
+  template <class SequencedTaskRunnerType>
+  static ReturnType DeleteViaSequencedTaskRunner(
+      SequencedTaskRunnerType* sequenced_task_runner,
+      const tracked_objects::Location& from_here,
+      const T* object) {
+    return sequenced_task_runner->DeleteSoonInternal(
+        from_here, &DeleteHelper<T>::DoDelete, object);
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(DeleteHelperInternal);
+};
+
+template <class T, class ReturnType>
+class ReleaseHelperInternal {
+ public:
+  template <class SequencedTaskRunnerType>
+  static ReturnType ReleaseViaSequencedTaskRunner(
+      SequencedTaskRunnerType* sequenced_task_runner,
+      const tracked_objects::Location& from_here,
+      const T* object) {
+    return sequenced_task_runner->ReleaseSoonInternal(
+        from_here, &ReleaseHelper<T>::DoRelease, object);
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ReleaseHelperInternal);
+};
+
+}  // namespace subtle
+
 }  // namespace base
 
 #endif  // BASE_SEQUENCED_TASK_RUNNER_HELPERS_H_
diff --git a/base/sha1.cc b/base/sha1_portable.cc
similarity index 95%
rename from base/sha1.cc
rename to base/sha1_portable.cc
index a710001..dd2ab6f 100644
--- a/base/sha1.cc
+++ b/base/sha1_portable.cc
@@ -8,7 +8,6 @@
 #include <stdint.h>
 #include <string.h>
 
-#include "base/sys_byteorder.h"
 
 namespace base {
 
@@ -93,6 +92,10 @@
   }
 }
 
+static inline void swapends(uint32_t* t) {
+  *t = (*t >> 24) | ((*t >> 8) & 0xff00) | ((*t & 0xff00) << 8) | (*t << 24);
+}
+
 const int SecureHashAlgorithm::kDigestSizeBytes = 20;
 
 void SecureHashAlgorithm::Init() {
@@ -115,7 +118,7 @@
   Process();
 
   for (int t = 0; t < 5; ++t)
-    H[t] = ByteSwap(H[t]);
+    swapends(&H[t]);
 }
 
 void SecureHashAlgorithm::Update(const void* data, size_t nbytes) {
@@ -162,7 +165,7 @@
   // W and M are in a union, so no need to memcpy.
   // memcpy(W, M, sizeof(M));
   for (t = 0; t < 16; ++t)
-    W[t] = ByteSwap(W[t]);
+    swapends(&W[t]);
 
   // b.
   for (t = 16; t < 80; ++t)
diff --git a/base/stl_util.h b/base/stl_util.h
index b0670b2..12e226a 100644
--- a/base/stl_util.h
+++ b/base/stl_util.h
@@ -8,37 +8,13 @@
 #define BASE_STL_UTIL_H_
 
 #include <algorithm>
-#include <deque>
-#include <forward_list>
 #include <functional>
 #include <iterator>
-#include <list>
-#include <map>
-#include <set>
 #include <string>
-#include <unordered_map>
-#include <unordered_set>
 #include <vector>
 
 #include "base/logging.h"
 
-namespace base {
-
-namespace internal {
-
-// Calls erase on iterators of matching elements.
-template <typename Container, typename Predicate>
-void IterateAndEraseIf(Container& container, Predicate pred) {
-  for (auto it = container.begin(); it != container.end();) {
-    if (pred(*it))
-      it = container.erase(it);
-    else
-      ++it;
-  }
-}
-
-}  // namespace internal
-
 // Clears internal memory of an STL object.
 // STL clear()/reserve(0) does not always free internal memory allocated
 // This function uses swap/destructor to ensure the internal memory is freed.
@@ -51,6 +27,69 @@
   obj->reserve(0);
 }
 
+// For a range within a container of pointers, calls delete (non-array version)
+// on these pointers.
+// NOTE: for these three functions, we could just implement a DeleteObject
+// functor and then call for_each() on the range and functor, but this
+// requires us to pull in all of algorithm.h, which seems expensive.
+// For hash_[multi]set, it is important that this deletes behind the iterator
+// because the hash_set may call the hash function on the iterator when it is
+// advanced, which could result in the hash function trying to deference a
+// stale pointer.
+template <class ForwardIterator>
+void STLDeleteContainerPointers(ForwardIterator begin, ForwardIterator end) {
+  while (begin != end) {
+    ForwardIterator temp = begin;
+    ++begin;
+    delete *temp;
+  }
+}
+
+// For a range within a container of pairs, calls delete (non-array version) on
+// BOTH items in the pairs.
+// NOTE: Like STLDeleteContainerPointers, it is important that this deletes
+// behind the iterator because if both the key and value are deleted, the
+// container may call the hash function on the iterator when it is advanced,
+// which could result in the hash function trying to dereference a stale
+// pointer.
+template <class ForwardIterator>
+void STLDeleteContainerPairPointers(ForwardIterator begin,
+                                    ForwardIterator end) {
+  while (begin != end) {
+    ForwardIterator temp = begin;
+    ++begin;
+    delete temp->first;
+    delete temp->second;
+  }
+}
+
+// For a range within a container of pairs, calls delete (non-array version) on
+// the FIRST item in the pairs.
+// NOTE: Like STLDeleteContainerPointers, deleting behind the iterator.
+template <class ForwardIterator>
+void STLDeleteContainerPairFirstPointers(ForwardIterator begin,
+                                         ForwardIterator end) {
+  while (begin != end) {
+    ForwardIterator temp = begin;
+    ++begin;
+    delete temp->first;
+  }
+}
+
+// For a range within a container of pairs, calls delete.
+// NOTE: Like STLDeleteContainerPointers, deleting behind the iterator.
+// Deleting the value does not always invalidate the iterator, but it may
+// do so if the key is a pointer into the value object.
+template <class ForwardIterator>
+void STLDeleteContainerPairSecondPointers(ForwardIterator begin,
+                                          ForwardIterator end) {
+  while (begin != end) {
+    ForwardIterator temp = begin;
+    ++begin;
+    delete temp->second;
+  }
+}
+
 // Counts the number of instances of val in a container.
 template <typename Container, typename T>
 typename std::iterator_traits<
@@ -76,6 +115,74 @@
   return str->empty() ? NULL : &*str->begin();
 }
 
+// The following functions are useful for cleaning up STL containers whose
+// elements point to allocated memory.
+
+// STLDeleteElements() deletes all the elements in an STL container and clears
+// the container.  This function is suitable for use with a vector, set,
+// hash_set, or any other STL container which defines sensible begin(), end(),
+// and clear() methods.
+//
+// If container is NULL, this function is a no-op.
+//
+// As an alternative to calling STLDeleteElements() directly, consider
+// STLElementDeleter (defined below), which ensures that your container's
+// elements are deleted when the STLElementDeleter goes out of scope.
+template <class T>
+void STLDeleteElements(T* container) {
+  if (!container)
+    return;
+  STLDeleteContainerPointers(container->begin(), container->end());
+  container->clear();
+}
+
+// Given an STL container consisting of (key, value) pairs, STLDeleteValues
+// deletes all the "value" components and clears the container.  Does nothing
+// in the case it's given a NULL pointer.
+template <class T>
+void STLDeleteValues(T* container) {
+  if (!container)
+    return;
+  STLDeleteContainerPairSecondPointers(container->begin(), container->end());
+  container->clear();
+}
+
+
+// The following classes provide a convenient way to delete all elements or
+// values from STL containers when they goes out of scope.  This greatly
+// simplifies code that creates temporary objects and has multiple return
+// statements.  Example:
+//
+// vector<MyProto *> tmp_proto;
+// STLElementDeleter<vector<MyProto *> > d(&tmp_proto);
+// if (...) return false;
+// ...
+// return success;
+
+// Given a pointer to an STL container this class will delete all the element
+// pointers when it goes out of scope.
+template<class T>
+class STLElementDeleter {
+ public:
+  STLElementDeleter<T>(T* container) : container_(container) {}
+  ~STLElementDeleter<T>() { STLDeleteElements(container_); }
+
+ private:
+  T* container_;
+};
+
+// Given a pointer to an STL container this class will delete all the value
+// pointers when it goes out of scope.
+template<class T>
+class STLValueDeleter {
+ public:
+  STLValueDeleter<T>(T* container) : container_(container) {}
+  ~STLValueDeleter<T>() { STLDeleteValues(container_); }
+
+ private:
+  T* container_;
+};
+
 // Test to see if a set, map, hash_set or hash_map contains a particular key.
 // Returns true if the key is in the collection.
 template <typename Collection, typename Key>
@@ -91,6 +198,8 @@
       collection.end();
 }
 
+namespace base {
+
 // Returns true if the container is sorted.
 template <typename Container>
 bool STLIsSorted(const Container& cont) {
@@ -148,145 +257,6 @@
                        a2.begin(), a2.end());
 }
 
-// Erase/EraseIf are based on library fundamentals ts v2 erase/erase_if
-// http://en.cppreference.com/w/cpp/experimental/lib_extensions_2
-// They provide a generic way to erase elements from a container.
-// The functions here implement these for the standard containers until those
-// functions are available in the C++ standard.
-// For Chromium containers overloads should be defined in their own headers
-// (like standard containers).
-// Note: there is no std::erase for standard associative containers so we don't
-// have it either.
-
-template <typename CharT, typename Traits, typename Allocator, typename Value>
-void Erase(std::basic_string<CharT, Traits, Allocator>& container,
-           const Value& value) {
-  container.erase(std::remove(container.begin(), container.end(), value),
-                  container.end());
-}
-
-template <typename CharT, typename Traits, typename Allocator, class Predicate>
-void EraseIf(std::basic_string<CharT, Traits, Allocator>& container,
-             Predicate pred) {
-  container.erase(std::remove_if(container.begin(), container.end(), pred),
-                  container.end());
-}
-
-template <class T, class Allocator, class Value>
-void Erase(std::deque<T, Allocator>& container, const Value& value) {
-  container.erase(std::remove(container.begin(), container.end(), value),
-                  container.end());
-}
-
-template <class T, class Allocator, class Predicate>
-void EraseIf(std::deque<T, Allocator>& container, Predicate pred) {
-  container.erase(std::remove_if(container.begin(), container.end(), pred),
-                  container.end());
-}
-
-template <class T, class Allocator, class Value>
-void Erase(std::vector<T, Allocator>& container, const Value& value) {
-  container.erase(std::remove(container.begin(), container.end(), value),
-                  container.end());
-}
-
-template <class T, class Allocator, class Predicate>
-void EraseIf(std::vector<T, Allocator>& container, Predicate pred) {
-  container.erase(std::remove_if(container.begin(), container.end(), pred),
-                  container.end());
-}
-
-template <class T, class Allocator, class Value>
-void Erase(std::forward_list<T, Allocator>& container, const Value& value) {
-  // Unlike std::forward_list::remove, this function template accepts
-  // heterogeneous types and does not force a conversion to the container's
-  // value type before invoking the == operator.
-  container.remove_if([&](const T& cur) { return cur == value; });
-}
-
-template <class T, class Allocator, class Predicate>
-void EraseIf(std::forward_list<T, Allocator>& container, Predicate pred) {
-  container.remove_if(pred);
-}
-
-template <class T, class Allocator, class Value>
-void Erase(std::list<T, Allocator>& container, const Value& value) {
-  // Unlike std::list::remove, this function template accepts heterogeneous
-  // types and does not force a conversion to the container's value type before
-  // invoking the == operator.
-  container.remove_if([&](const T& cur) { return cur == value; });
-}
-
-template <class T, class Allocator, class Predicate>
-void EraseIf(std::list<T, Allocator>& container, Predicate pred) {
-  container.remove_if(pred);
-}
-
-template <class Key, class T, class Compare, class Allocator, class Predicate>
-void EraseIf(std::map<Key, T, Compare, Allocator>& container, Predicate pred) {
-  internal::IterateAndEraseIf(container, pred);
-}
-
-template <class Key, class T, class Compare, class Allocator, class Predicate>
-void EraseIf(std::multimap<Key, T, Compare, Allocator>& container,
-             Predicate pred) {
-  internal::IterateAndEraseIf(container, pred);
-}
-
-template <class Key, class Compare, class Allocator, class Predicate>
-void EraseIf(std::set<Key, Compare, Allocator>& container, Predicate pred) {
-  internal::IterateAndEraseIf(container, pred);
-}
-
-template <class Key, class Compare, class Allocator, class Predicate>
-void EraseIf(std::multiset<Key, Compare, Allocator>& container,
-             Predicate pred) {
-  internal::IterateAndEraseIf(container, pred);
-}
-
-template <class Key,
-          class T,
-          class Hash,
-          class KeyEqual,
-          class Allocator,
-          class Predicate>
-void EraseIf(std::unordered_map<Key, T, Hash, KeyEqual, Allocator>& container,
-             Predicate pred) {
-  internal::IterateAndEraseIf(container, pred);
-}
-
-template <class Key,
-          class T,
-          class Hash,
-          class KeyEqual,
-          class Allocator,
-          class Predicate>
-void EraseIf(
-    std::unordered_multimap<Key, T, Hash, KeyEqual, Allocator>& container,
-    Predicate pred) {
-  internal::IterateAndEraseIf(container, pred);
-}
-
-template <class Key,
-          class Hash,
-          class KeyEqual,
-          class Allocator,
-          class Predicate>
-void EraseIf(std::unordered_set<Key, Hash, KeyEqual, Allocator>& container,
-             Predicate pred) {
-  internal::IterateAndEraseIf(container, pred);
-}
-
-template <class Key,
-          class Hash,
-          class KeyEqual,
-          class Allocator,
-          class Predicate>
-void EraseIf(std::unordered_multiset<Key, Hash, KeyEqual, Allocator>& container,
-             Predicate pred) {
-  internal::IterateAndEraseIf(container, pred);
-}
-
 }  // namespace base
 
 #endif  // BASE_STL_UTIL_H_
diff --git a/base/stl_util_unittest.cc b/base/stl_util_unittest.cc
index 48d0f66..42004eb 100644
--- a/base/stl_util_unittest.cc
+++ b/base/stl_util_unittest.cc
@@ -4,20 +4,8 @@
 
 #include "base/stl_util.h"
 
-#include <deque>
-#include <forward_list>
-#include <functional>
-#include <iterator>
-#include <list>
-#include <map>
 #include <set>
-#include <string>
-#include <unordered_map>
-#include <unordered_set>
-#include <vector>
 
-#include "base/strings/string16.h"
-#include "base/strings/utf_string_conversions.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace {
@@ -40,55 +28,6 @@
   int value_;
 };
 
-template <typename Container>
-void RunEraseTest() {
-  const std::pair<Container, Container> test_data[] = {
-      {Container(), Container()}, {{1, 2, 3}, {1, 3}}, {{1, 2, 3, 2}, {1, 3}}};
-
-  for (auto test_case : test_data) {
-    base::Erase(test_case.first, 2);
-    EXPECT_EQ(test_case.second, test_case.first);
-  }
-}
-
-// This test is written for containers of std::pair<int, int> to support maps.
-template <typename Container>
-void RunEraseIfTest() {
-  struct {
-    Container input;
-    Container erase_even;
-    Container erase_odd;
-  } test_data[] = {
-      {Container(), Container(), Container()},
-      {{{1, 1}, {2, 2}, {3, 3}}, {{1, 1}, {3, 3}}, {{2, 2}}},
-      {{{1, 1}, {2, 2}, {3, 3}, {4, 4}}, {{1, 1}, {3, 3}}, {{2, 2}, {4, 4}}},
-  };
-
-  for (auto test_case : test_data) {
-    base::EraseIf(test_case.input, [](const std::pair<int, int>& elem) {
-      return !(elem.first & 1);
-    });
-    EXPECT_EQ(test_case.erase_even, test_case.input);
-  }
-
-  for (auto test_case : test_data) {
-    base::EraseIf(test_case.input, [](const std::pair<int, int>& elem) {
-      return elem.first & 1;
-    });
-    EXPECT_EQ(test_case.erase_odd, test_case.input);
-  }
-}
-
-struct CustomIntHash {
-  size_t operator()(int elem) const { return std::hash<int>()(elem) + 1; }
-};
-
-struct HashByFirst {
-  size_t operator()(const std::pair<int, int>& elem) const {
-    return std::hash<int>()(elem.first);
-  }
-};
-
 }  // namespace
 
 namespace base {
@@ -324,100 +263,5 @@
   EXPECT_EQ("abc", s2);
 }
 
-TEST(Erase, String) {
-  const std::pair<std::string, std::string> test_data[] = {
-      {"", ""}, {"abc", "bc"}, {"abca", "bc"},
-  };
-
-  for (auto test_case : test_data) {
-    Erase(test_case.first, 'a');
-    EXPECT_EQ(test_case.second, test_case.first);
-  }
-
-  for (auto test_case : test_data) {
-    EraseIf(test_case.first, [](char elem) { return elem < 'b'; });
-    EXPECT_EQ(test_case.second, test_case.first);
-  }
-}
-
-TEST(Erase, String16) {
-  std::pair<base::string16, base::string16> test_data[] = {
-      {base::string16(), base::string16()},
-      {UTF8ToUTF16("abc"), UTF8ToUTF16("bc")},
-      {UTF8ToUTF16("abca"), UTF8ToUTF16("bc")},
-  };
-
-  const base::string16 letters = UTF8ToUTF16("ab");
-  for (auto test_case : test_data) {
-    Erase(test_case.first, letters[0]);
-    EXPECT_EQ(test_case.second, test_case.first);
-  }
-
-  for (auto test_case : test_data) {
-    EraseIf(test_case.first, [&](short elem) { return elem < letters[1]; });
-    EXPECT_EQ(test_case.second, test_case.first);
-  }
-}
-
-TEST(Erase, Deque) {
-  RunEraseTest<std::deque<int>>();
-  RunEraseIfTest<std::deque<std::pair<int, int>>>();
-}
-
-TEST(Erase, Vector) {
-  RunEraseTest<std::vector<int>>();
-  RunEraseIfTest<std::vector<std::pair<int, int>>>();
-}
-
-TEST(Erase, ForwardList) {
-  RunEraseTest<std::forward_list<int>>();
-  RunEraseIfTest<std::forward_list<std::pair<int, int>>>();
-}
-
-TEST(Erase, List) {
-  RunEraseTest<std::list<int>>();
-  RunEraseIfTest<std::list<std::pair<int, int>>>();
-}
-
-TEST(Erase, Map) {
-  RunEraseIfTest<std::map<int, int>>();
-  RunEraseIfTest<std::map<int, int, std::greater<int>>>();
-}
-
-TEST(Erase, Multimap) {
-  RunEraseIfTest<std::multimap<int, int>>();
-  RunEraseIfTest<std::multimap<int, int, std::greater<int>>>();
-}
-
-TEST(Erase, Set) {
-  RunEraseIfTest<std::set<std::pair<int, int>>>();
-  RunEraseIfTest<
-      std::set<std::pair<int, int>, std::greater<std::pair<int, int>>>>();
-}
-
-TEST(Erase, Multiset) {
-  RunEraseIfTest<std::multiset<std::pair<int, int>>>();
-  RunEraseIfTest<
-      std::multiset<std::pair<int, int>, std::greater<std::pair<int, int>>>>();
-}
-
-TEST(Erase, UnorderedMap) {
-  RunEraseIfTest<std::unordered_map<int, int>>();
-  RunEraseIfTest<std::unordered_map<int, int, CustomIntHash>>();
-}
-
-TEST(Erase, UnorderedMultimap) {
-  RunEraseIfTest<std::unordered_multimap<int, int>>();
-  RunEraseIfTest<std::unordered_multimap<int, int, CustomIntHash>>();
-}
-
-TEST(Erase, UnorderedSet) {
-  RunEraseIfTest<std::unordered_set<std::pair<int, int>, HashByFirst>>();
-}
-
-TEST(Erase, UnorderedMultiset) {
-  RunEraseIfTest<std::unordered_multiset<std::pair<int, int>, HashByFirst>>();
-}
-
 }  // namespace
 }  // namespace base
diff --git a/base/strings/string_number_conversions.cc b/base/strings/string_number_conversions.cc
index adb4bdb..09aeb44 100644
--- a/base/strings/string_number_conversions.cc
+++ b/base/strings/string_number_conversions.cc
@@ -10,11 +10,11 @@
 #include <wctype.h>
 
 #include <limits>
-#include <type_traits>
 
 #include "base/logging.h"
 #include "base/numerics/safe_math.h"
 #include "base/scoped_clear_errno.h"
+#include "base/scoped_clear_errno.h"
 
 namespace base {
 
@@ -35,8 +35,7 @@
 
     // The ValueOrDie call below can never fail, because UnsignedAbs is valid
     // for all valid inputs.
-    typename std::make_unsigned<INT>::type res =
-        CheckedNumeric<INT>(value).UnsignedAbs().ValueOrDie();
+    auto res = CheckedNumeric<INT>(value).UnsignedAbs().ValueOrDie();
 
     CHR* end = outbuf + kOutputBufSize;
     CHR* i = end;
diff --git a/base/strings/string_number_conversions_unittest.cc b/base/strings/string_number_conversions_unittest.cc
index b4c3068..91191e0 100644
--- a/base/strings/string_number_conversions_unittest.cc
+++ b/base/strings/string_number_conversions_unittest.cc
@@ -719,41 +719,19 @@
     double output;
     bool success;
   } cases[] = {
-    // Test different forms of zero.
     {"0", 0.0, true},
-    {"+0", 0.0, true},
-    {"-0", 0.0, true},
-    {"0.0", 0.0, true},
-    {"000000000000000000000000000000.0", 0.0, true},
-    {"0.000000000000000000000000000", 0.0, true},
-
-    // Test the answer.
     {"42", 42.0, true},
     {"-42", -42.0, true},
-
-    // Test variances of an ordinary number.
     {"123.45", 123.45, true},
     {"-123.45", -123.45, true},
     {"+123.45", 123.45, true},
-
-    // Test different forms of representation.
     {"2.99792458e8", 299792458.0, true},
     {"149597870.691E+3", 149597870691.0, true},
     {"6.", 6.0, true},
-
-    // Test around the largest/smallest value that a double can represent.
-    {"9e307", 9e307, true},
-    {"1.7976e308", 1.7976e308, true},
-    {"1.7977e308", HUGE_VAL, false},
-    {"1.797693134862315807e+308", HUGE_VAL, true},
-    {"1.797693134862315808e+308", HUGE_VAL, false},
-    {"9e308", HUGE_VAL, false},
-    {"9e309", HUGE_VAL, false},
-    {"9e999", HUGE_VAL, false},
-    {"9e1999", HUGE_VAL, false},
-    {"9e19999", HUGE_VAL, false},
-    {"9e99999999999999999999", std::numeric_limits<double>::infinity(), false},
-    {"-9e99999999999999999999", -std::numeric_limits<double>::infinity(), false},
+    {"9e99999999999999999999", std::numeric_limits<double>::infinity(),
+                               false},
+    {"-9e99999999999999999999", -std::numeric_limits<double>::infinity(),
+                                false},
     {"1e-2", 0.01, true},
     {"42 ", 42.0, false},
     {" 1e-2", 0.01, false},
@@ -761,9 +739,6 @@
     {"-1E-7", -0.0000001, true},
     {"01e02", 100, true},
     {"2.3e15", 2.3e15, true},
-    {"100e-309", 100e-309, true},
-
-    // Test some invalid cases.
     {"\t\n\v\f\r -123.45e2", -12345.0, false},
     {"+123 e4", 123.0, false},
     {"123e ", 123.0, false},
@@ -774,10 +749,6 @@
     {"-", 0.0, false},
     {"+", 0.0, false},
     {"", 0.0, false},
-
-    // crbug.org/588726
-    {"-0.0010000000000000000000000000000000000000001e-256",
-     -1.0000000000000001e-259, true},
   };
 
   for (size_t i = 0; i < arraysize(cases); ++i) {
@@ -838,54 +809,4 @@
   EXPECT_EQ(hex.compare("01FF02FE038081"), 0);
 }
 
-// Test cases of known-bad strtod conversions that motivated the use of dmg_fp.
-// See https://bugs.chromium.org/p/chromium/issues/detail?id=593512.
-TEST(StringNumberConversionsTest, StrtodFailures) {
-  static const struct {
-    const char* input;
-    uint64_t expected;
-  } cases[] = {
-      // http://www.exploringbinary.com/incorrectly-rounded-conversions-in-visual-c-plus-plus/
-      {"9214843084008499", 0x43405e6cec57761aULL},
-      {"0.500000000000000166533453693773481063544750213623046875",
-       0x3fe0000000000002ULL},
-      {"30078505129381147446200", 0x44997a3c7271b021ULL},
-      {"1777820000000000000001", 0x4458180d5bad2e3eULL},
-      {"0.500000000000000166547006220929549868969843373633921146392822265625",
-       0x3fe0000000000002ULL},
-      {"0.50000000000000016656055874808561867439493653364479541778564453125",
-       0x3fe0000000000002ULL},
-      {"0.3932922657273", 0x3fd92bb352c4623aULL},
-
-      // http://www.exploringbinary.com/incorrectly-rounded-conversions-in-gcc-and-glibc/
-      {"0.500000000000000166533453693773481063544750213623046875",
-       0x3fe0000000000002ULL},
-      {"3.518437208883201171875e13", 0x42c0000000000002ULL},
-      {"62.5364939768271845828", 0x404f44abd5aa7ca4ULL},
-      {"8.10109172351e-10", 0x3e0bd5cbaef0fd0cULL},
-      {"1.50000000000000011102230246251565404236316680908203125",
-       0x3ff8000000000000ULL},
-      {"9007199254740991.4999999999999999999999999999999995",
-       0x433fffffffffffffULL},
-
-      // http://www.exploringbinary.com/incorrect-decimal-to-floating-point-conversion-in-sqlite/
-      {"1e-23", 0x3b282db34012b251ULL},
-      {"8.533e+68", 0x4e3fa69165a8eea2ULL},
-      {"4.1006e-184", 0x19dbe0d1c7ea60c9ULL},
-      {"9.998e+307", 0x7fe1cc0a350ca87bULL},
-      {"9.9538452227e-280", 0x0602117ae45cde43ULL},
-      {"6.47660115e-260", 0x0a1fdd9e333badadULL},
-      {"7.4e+47", 0x49e033d7eca0adefULL},
-      {"5.92e+48", 0x4a1033d7eca0adefULL},
-      {"7.35e+66", 0x4dd172b70eababa9ULL},
-      {"8.32116e+55", 0x4b8b2628393e02cdULL},
-  };
-
-  for (const auto& test : cases) {
-    double output;
-    EXPECT_TRUE(StringToDouble(test.input, &output));
-    EXPECT_EQ(bit_cast<uint64_t>(output), test.expected);
-  }
-}
-
 }  // namespace base
diff --git a/base/strings/string_split.cc b/base/strings/string_split.cc
index a8180b2..6c949b9 100644
--- a/base/strings/string_split.cc
+++ b/base/strings/string_split.cc
@@ -227,22 +227,18 @@
   return success;
 }
 
-std::vector<string16> SplitStringUsingSubstr(StringPiece16 input,
-                                             StringPiece16 delimiter,
-                                             WhitespaceHandling whitespace,
-                                             SplitResult result_type) {
-  std::vector<string16> result;
-  SplitStringUsingSubstrT(input, delimiter, whitespace, result_type, &result);
-  return result;
+void SplitStringUsingSubstr(StringPiece16 input,
+                            StringPiece16 delimiter,
+                            std::vector<string16>* result) {
+  SplitStringUsingSubstrT(input, delimiter, TRIM_WHITESPACE, SPLIT_WANT_ALL,
+                          result);
 }
 
-std::vector<std::string> SplitStringUsingSubstr(StringPiece input,
-                                                StringPiece delimiter,
-                                                WhitespaceHandling whitespace,
-                                                SplitResult result_type) {
-  std::vector<std::string> result;
-  SplitStringUsingSubstrT(input, delimiter, whitespace, result_type, &result);
-  return result;
+void SplitStringUsingSubstr(StringPiece input,
+                            StringPiece delimiter,
+                            std::vector<std::string>* result) {
+  SplitStringUsingSubstrT(input, delimiter, TRIM_WHITESPACE, SPLIT_WANT_ALL,
+                          result);
 }
 
 std::vector<StringPiece16> SplitStringPieceUsingSubstr(
diff --git a/base/strings/string_split.h b/base/strings/string_split.h
index 24b9dfa..ec9f246 100644
--- a/base/strings/string_split.h
+++ b/base/strings/string_split.h
@@ -90,16 +90,16 @@
 
 // Similar to SplitString, but use a substring delimiter instead of a list of
 // characters that are all possible delimiters.
-BASE_EXPORT std::vector<string16> SplitStringUsingSubstr(
-    StringPiece16 input,
-    StringPiece16 delimiter,
-    WhitespaceHandling whitespace,
-    SplitResult result_type);
-BASE_EXPORT std::vector<std::string> SplitStringUsingSubstr(
-    StringPiece input,
-    StringPiece delimiter,
-    WhitespaceHandling whitespace,
-    SplitResult result_type);
+//
+// TODO(brettw) this should probably be changed and expanded to provide a
+// mirror of the SplitString[Piece] API above, just with the different
+// delimiter handling.
+BASE_EXPORT void SplitStringUsingSubstr(StringPiece16 input,
+                                        StringPiece16 delimiter,
+                                        std::vector<string16>* result);
+BASE_EXPORT void SplitStringUsingSubstr(StringPiece input,
+                                        StringPiece delimiter,
+                                        std::vector<std::string>* result);
 
 // Like SplitStringUsingSubstr above except it returns a vector of StringPieces
 // which reference the original buffer without copying. Although you have to be
diff --git a/base/strings/string_split_unittest.cc b/base/strings/string_split_unittest.cc
index bf09aa5..657a2db 100644
--- a/base/strings/string_split_unittest.cc
+++ b/base/strings/string_split_unittest.cc
@@ -150,8 +150,8 @@
 }
 
 TEST(SplitStringUsingSubstrTest, EmptyString) {
-  std::vector<std::string> results = SplitStringUsingSubstr(
-      std::string(), "DELIMITER", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+  std::vector<std::string> results;
+  SplitStringUsingSubstr(std::string(), "DELIMITER", &results);
   ASSERT_EQ(1u, results.size());
   EXPECT_THAT(results, ElementsAre(""));
 }
@@ -231,33 +231,38 @@
 }
 
 TEST(SplitStringUsingSubstrTest, StringWithNoDelimiter) {
-  std::vector<std::string> results = SplitStringUsingSubstr(
-      "alongwordwithnodelimiter", "DELIMITER", TRIM_WHITESPACE,
-      SPLIT_WANT_ALL);
+  std::vector<std::string> results;
+  SplitStringUsingSubstr("alongwordwithnodelimiter", "DELIMITER", &results);
   ASSERT_EQ(1u, results.size());
   EXPECT_THAT(results, ElementsAre("alongwordwithnodelimiter"));
 }
 
 TEST(SplitStringUsingSubstrTest, LeadingDelimitersSkipped) {
-  std::vector<std::string> results = SplitStringUsingSubstr(
+  std::vector<std::string> results;
+  SplitStringUsingSubstr(
       "DELIMITERDELIMITERDELIMITERoneDELIMITERtwoDELIMITERthree",
-      "DELIMITER", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+      "DELIMITER",
+      &results);
   ASSERT_EQ(6u, results.size());
   EXPECT_THAT(results, ElementsAre("", "", "", "one", "two", "three"));
 }
 
 TEST(SplitStringUsingSubstrTest, ConsecutiveDelimitersSkipped) {
-  std::vector<std::string> results = SplitStringUsingSubstr(
+  std::vector<std::string> results;
+  SplitStringUsingSubstr(
       "unoDELIMITERDELIMITERDELIMITERdosDELIMITERtresDELIMITERDELIMITERcuatro",
-      "DELIMITER", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+      "DELIMITER",
+      &results);
   ASSERT_EQ(7u, results.size());
   EXPECT_THAT(results, ElementsAre("uno", "", "", "dos", "tres", "", "cuatro"));
 }
 
 TEST(SplitStringUsingSubstrTest, TrailingDelimitersSkipped) {
-  std::vector<std::string> results = SplitStringUsingSubstr(
+  std::vector<std::string> results;
+  SplitStringUsingSubstr(
       "unDELIMITERdeuxDELIMITERtroisDELIMITERquatreDELIMITERDELIMITERDELIMITER",
-      "DELIMITER", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+      "DELIMITER",
+      &results);
   ASSERT_EQ(7u, results.size());
   EXPECT_THAT(
       results, ElementsAre("un", "deux", "trois", "quatre", "", "", ""));
diff --git a/base/strings/string_util.cc b/base/strings/string_util.cc
index 71ae894..cb668ed 100644
--- a/base/strings/string_util.cc
+++ b/base/strings/string_util.cc
@@ -64,21 +64,6 @@
   return elem1.parameter < elem2.parameter;
 }
 
-// Overloaded function to append one string onto the end of another. Having a
-// separate overload for |source| as both string and StringPiece allows for more
-// efficient usage from functions templated to work with either type (avoiding a
-// redundant call to the BasicStringPiece constructor in both cases).
-template <typename string_type>
-inline void AppendToString(string_type* target, const string_type& source) {
-  target->append(source);
-}
-
-template <typename string_type>
-inline void AppendToString(string_type* target,
-                           const BasicStringPiece<string_type>& source) {
-  source.AppendToString(target);
-}
-
 // Assuming that a pointer is the size of a "machine word", then
 // uintptr_t is an integer type that is also a machine word.
 typedef uintptr_t MachineWord;
@@ -868,40 +853,21 @@
   return WriteIntoT(str, length_with_null);
 }
 
-// Generic version for all JoinString overloads. |list_type| must be a sequence
-// (std::vector or std::initializer_list) of strings/StringPieces (std::string,
-// string16, StringPiece or StringPiece16). |string_type| is either std::string
-// or string16.
-template <typename list_type, typename string_type>
-static string_type JoinStringT(const list_type& parts,
-                               BasicStringPiece<string_type> sep) {
-  if (parts.size() == 0)
-    return string_type();
+template<typename STR>
+static STR JoinStringT(const std::vector<STR>& parts,
+                       BasicStringPiece<STR> sep) {
+  if (parts.empty())
+    return STR();
 
-  // Pre-allocate the eventual size of the string. Start with the size of all of
-  // the separators (note that this *assumes* parts.size() > 0).
-  size_t total_size = (parts.size() - 1) * sep.size();
-  for (const auto& part : parts)
-    total_size += part.size();
-  string_type result;
-  result.reserve(total_size);
-
+  STR result(parts[0]);
   auto iter = parts.begin();
-  DCHECK(iter != parts.end());
-  AppendToString(&result, *iter);
   ++iter;
 
   for (; iter != parts.end(); ++iter) {
     sep.AppendToString(&result);
-    // Using the overloaded AppendToString allows this template function to work
-    // on both strings and StringPieces without creating an intermediate
-    // StringPiece object.
-    AppendToString(&result, *iter);
+    result += *iter;
   }
 
-  // Sanity-check that we pre-allocated correctly.
-  DCHECK_EQ(total_size, result.size());
-
   return result;
 }
 
@@ -915,26 +881,6 @@
   return JoinStringT(parts, separator);
 }
 
-std::string JoinString(const std::vector<StringPiece>& parts,
-                       StringPiece separator) {
-  return JoinStringT(parts, separator);
-}
-
-string16 JoinString(const std::vector<StringPiece16>& parts,
-                    StringPiece16 separator) {
-  return JoinStringT(parts, separator);
-}
-
-std::string JoinString(std::initializer_list<StringPiece> parts,
-                       StringPiece separator) {
-  return JoinStringT(parts, separator);
-}
-
-string16 JoinString(std::initializer_list<StringPiece16> parts,
-                    StringPiece16 separator) {
-  return JoinStringT(parts, separator);
-}
-
 template<class FormatStringType, class OutStringType>
 OutStringType DoReplaceStringPlaceholders(
     const FormatStringType& format_string,
diff --git a/base/strings/string_util.h b/base/strings/string_util.h
index 29076ed..0ee077c 100644
--- a/base/strings/string_util.h
+++ b/base/strings/string_util.h
@@ -12,7 +12,6 @@
 #include <stddef.h>
 #include <stdint.h>
 
-#include <initializer_list>
 #include <string>
 #include <vector>
 
@@ -291,6 +290,8 @@
 BASE_EXPORT bool IsStringUTF8(const StringPiece& str);
 BASE_EXPORT bool IsStringASCII(const StringPiece& str);
 BASE_EXPORT bool IsStringASCII(const StringPiece16& str);
+// A convenience adaptor for WebStrings, as they don't convert into
+// StringPieces directly.
 BASE_EXPORT bool IsStringASCII(const string16& str);
 #if defined(WCHAR_T_IS_UTF32)
 BASE_EXPORT bool IsStringASCII(const std::wstring& str);
@@ -436,30 +437,11 @@
 BASE_EXPORT wchar_t* WriteInto(std::wstring* str, size_t length_with_null);
 #endif
 
-// Does the opposite of SplitString()/SplitStringPiece(). Joins a vector or list
-// of strings into a single string, inserting |separator| (which may be empty)
-// in between all elements.
-//
-// If possible, callers should build a vector of StringPieces and use the
-// StringPiece variant, so that they do not create unnecessary copies of
-// strings. For example, instead of using SplitString, modifying the vector,
-// then using JoinString, use SplitStringPiece followed by JoinString so that no
-// copies of those strings are created until the final join operation.
+// Does the opposite of SplitString().
 BASE_EXPORT std::string JoinString(const std::vector<std::string>& parts,
                                    StringPiece separator);
 BASE_EXPORT string16 JoinString(const std::vector<string16>& parts,
                                 StringPiece16 separator);
-BASE_EXPORT std::string JoinString(const std::vector<StringPiece>& parts,
-                                   StringPiece separator);
-BASE_EXPORT string16 JoinString(const std::vector<StringPiece16>& parts,
-                                StringPiece16 separator);
-// Explicit initializer_list overloads are required to break ambiguity when used
-// with a literal initializer list (otherwise the compiler would not be able to
-// decide between the string and StringPiece overloads).
-BASE_EXPORT std::string JoinString(std::initializer_list<StringPiece> parts,
-                                   StringPiece separator);
-BASE_EXPORT string16 JoinString(std::initializer_list<StringPiece16> parts,
-                                StringPiece16 separator);
 
 // Replace $1-$2-$3..$9 in the format string with values from |subst|.
 // Additionally, any number of consecutive '$' characters is replaced by that
diff --git a/base/strings/string_util_unittest.cc b/base/strings/string_util_unittest.cc
index 6ac307e..df2226e 100644
--- a/base/strings/string_util_unittest.cc
+++ b/base/strings/string_util_unittest.cc
@@ -676,10 +676,6 @@
   std::vector<std::string> parts;
   EXPECT_EQ(std::string(), JoinString(parts, separator));
 
-  parts.push_back(std::string());
-  EXPECT_EQ(std::string(), JoinString(parts, separator));
-  parts.clear();
-
   parts.push_back("a");
   EXPECT_EQ("a", JoinString(parts, separator));
 
@@ -698,10 +694,6 @@
   std::vector<string16> parts;
   EXPECT_EQ(string16(), JoinString(parts, separator));
 
-  parts.push_back(string16());
-  EXPECT_EQ(string16(), JoinString(parts, separator));
-  parts.clear();
-
   parts.push_back(ASCIIToUTF16("a"));
   EXPECT_EQ(ASCIIToUTF16("a"), JoinString(parts, separator));
 
@@ -715,108 +707,6 @@
   EXPECT_EQ(ASCIIToUTF16("a|b|c|| "), JoinString(parts, ASCIIToUTF16("|")));
 }
 
-TEST(StringUtilTest, JoinStringPiece) {
-  std::string separator(", ");
-  std::vector<StringPiece> parts;
-  EXPECT_EQ(std::string(), JoinString(parts, separator));
-
-  // Test empty first part (https://crbug.com/698073).
-  parts.push_back(StringPiece());
-  EXPECT_EQ(std::string(), JoinString(parts, separator));
-  parts.clear();
-
-  parts.push_back("a");
-  EXPECT_EQ("a", JoinString(parts, separator));
-
-  parts.push_back("b");
-  parts.push_back("c");
-  EXPECT_EQ("a, b, c", JoinString(parts, separator));
-
-  parts.push_back(StringPiece());
-  EXPECT_EQ("a, b, c, ", JoinString(parts, separator));
-  parts.push_back(" ");
-  EXPECT_EQ("a|b|c|| ", JoinString(parts, "|"));
-}
-
-TEST(StringUtilTest, JoinStringPiece16) {
-  string16 separator = ASCIIToUTF16(", ");
-  std::vector<StringPiece16> parts;
-  EXPECT_EQ(string16(), JoinString(parts, separator));
-
-  // Test empty first part (https://crbug.com/698073).
-  parts.push_back(StringPiece16());
-  EXPECT_EQ(string16(), JoinString(parts, separator));
-  parts.clear();
-
-  const string16 kA = ASCIIToUTF16("a");
-  parts.push_back(kA);
-  EXPECT_EQ(ASCIIToUTF16("a"), JoinString(parts, separator));
-
-  const string16 kB = ASCIIToUTF16("b");
-  parts.push_back(kB);
-  const string16 kC = ASCIIToUTF16("c");
-  parts.push_back(kC);
-  EXPECT_EQ(ASCIIToUTF16("a, b, c"), JoinString(parts, separator));
-
-  parts.push_back(StringPiece16());
-  EXPECT_EQ(ASCIIToUTF16("a, b, c, "), JoinString(parts, separator));
-  const string16 kSpace = ASCIIToUTF16(" ");
-  parts.push_back(kSpace);
-  EXPECT_EQ(ASCIIToUTF16("a|b|c|| "), JoinString(parts, ASCIIToUTF16("|")));
-}
-
-TEST(StringUtilTest, JoinStringInitializerList) {
-  std::string separator(", ");
-  EXPECT_EQ(std::string(), JoinString({}, separator));
-
-  // Test empty first part (https://crbug.com/698073).
-  EXPECT_EQ(std::string(), JoinString({StringPiece()}, separator));
-
-  // With const char*s.
-  EXPECT_EQ("a", JoinString({"a"}, separator));
-  EXPECT_EQ("a, b, c", JoinString({"a", "b", "c"}, separator));
-  EXPECT_EQ("a, b, c, ", JoinString({"a", "b", "c", StringPiece()}, separator));
-  EXPECT_EQ("a|b|c|| ", JoinString({"a", "b", "c", StringPiece(), " "}, "|"));
-
-  // With std::strings.
-  const std::string kA = "a";
-  const std::string kB = "b";
-  EXPECT_EQ("a, b", JoinString({kA, kB}, separator));
-
-  // With StringPieces.
-  const StringPiece kPieceA = kA;
-  const StringPiece kPieceB = kB;
-  EXPECT_EQ("a, b", JoinString({kPieceA, kPieceB}, separator));
-}
-
-TEST(StringUtilTest, JoinStringInitializerList16) {
-  string16 separator = ASCIIToUTF16(", ");
-  EXPECT_EQ(string16(), JoinString({}, separator));
-
-  // Test empty first part (https://crbug.com/698073).
-  EXPECT_EQ(string16(), JoinString({StringPiece16()}, separator));
-
-  // With string16s.
-  const string16 kA = ASCIIToUTF16("a");
-  EXPECT_EQ(ASCIIToUTF16("a"), JoinString({kA}, separator));
-
-  const string16 kB = ASCIIToUTF16("b");
-  const string16 kC = ASCIIToUTF16("c");
-  EXPECT_EQ(ASCIIToUTF16("a, b, c"), JoinString({kA, kB, kC}, separator));
-
-  EXPECT_EQ(ASCIIToUTF16("a, b, c, "),
-            JoinString({kA, kB, kC, StringPiece16()}, separator));
-  const string16 kSpace = ASCIIToUTF16(" ");
-  EXPECT_EQ(
-      ASCIIToUTF16("a|b|c|| "),
-      JoinString({kA, kB, kC, StringPiece16(), kSpace}, ASCIIToUTF16("|")));
-
-  // With StringPiece16s.
-  const StringPiece16 kPieceA = kA;
-  const StringPiece16 kPieceB = kB;
-  EXPECT_EQ(ASCIIToUTF16("a, b"), JoinString({kPieceA, kPieceB}, separator));
-}
-
 TEST(StringUtilTest, StartsWith) {
   EXPECT_TRUE(StartsWith("javascript:url", "javascript",
                          base::CompareCase::SENSITIVE));
diff --git a/base/strings/utf_string_conversion_utils.h b/base/strings/utf_string_conversion_utils.h
index 2d95870..c716404 100644
--- a/base/strings/utf_string_conversion_utils.h
+++ b/base/strings/utf_string_conversion_utils.h
@@ -5,8 +5,7 @@
 #ifndef BASE_STRINGS_UTF_STRING_CONVERSION_UTILS_H_
 #define BASE_STRINGS_UTF_STRING_CONVERSION_UTILS_H_
 
-// Low-level UTF handling functions. Most code will want to use the functions
-// in utf_string_conversions.h
+// This should only be used by the various UTF string conversion files.
 
 #include <stddef.h>
 #include <stdint.h>
diff --git a/base/strings/utf_string_conversions.cc b/base/strings/utf_string_conversions.cc
index 85450c6..6b17eac 100644
--- a/base/strings/utf_string_conversions.cc
+++ b/base/strings/utf_string_conversions.cc
@@ -180,6 +180,10 @@
 }
 
 std::string UTF16ToUTF8(StringPiece16 utf16) {
+  if (IsStringASCII(utf16)) {
+    return std::string(utf16.begin(), utf16.end());
+  }
+
   std::string ret;
   // Ignore the success flag of this call, it will do the best it can for
   // invalid input, which is what we want here.
diff --git a/base/synchronization/atomic_flag.cc b/base/synchronization/atomic_flag.cc
deleted file mode 100644
index 8c2018d..0000000
--- a/base/synchronization/atomic_flag.cc
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/synchronization/atomic_flag.h"
-
-#include "base/logging.h"
-
-namespace base {
-
-AtomicFlag::AtomicFlag() {
-  // It doesn't matter where the AtomicFlag is built so long as it's always
-  // Set() from the same sequence after. Note: the sequencing requirements are
-  // necessary for IsSet()'s callers to know which sequence's memory operations
-  // they are synchronized with.
-  set_sequence_checker_.DetachFromSequence();
-}
-
-void AtomicFlag::Set() {
-  DCHECK(set_sequence_checker_.CalledOnValidSequence());
-  base::subtle::Release_Store(&flag_, 1);
-}
-
-bool AtomicFlag::IsSet() const {
-  return base::subtle::Acquire_Load(&flag_) != 0;
-}
-
-void AtomicFlag::UnsafeResetForTesting() {
-  base::subtle::Release_Store(&flag_, 0);
-}
-
-}  // namespace base
diff --git a/base/synchronization/atomic_flag.h b/base/synchronization/atomic_flag.h
deleted file mode 100644
index ff175e1..0000000
--- a/base/synchronization/atomic_flag.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_SYNCHRONIZATION_ATOMIC_FLAG_H_
-#define BASE_SYNCHRONIZATION_ATOMIC_FLAG_H_
-
-#include "base/atomicops.h"
-#include "base/base_export.h"
-#include "base/macros.h"
-#include "base/sequence_checker.h"
-
-namespace base {
-
-// A flag that can safely be set from one thread and read from other threads.
-//
-// This class IS NOT intended for synchronization between threads.
-class BASE_EXPORT AtomicFlag {
- public:
-  AtomicFlag();
-  ~AtomicFlag() = default;
-
-  // Set the flag. Must always be called from the same sequence.
-  void Set();
-
-  // Returns true iff the flag was set. If this returns true, the current thread
-  // is guaranteed to be synchronized with all memory operations on the sequence
-  // which invoked Set() up until at least the first call to Set() on it.
-  bool IsSet() const;
-
-  // Resets the flag. Be careful when using this: callers might not expect
-  // IsSet() to return false after returning true once.
-  void UnsafeResetForTesting();
-
- private:
-  base::subtle::Atomic32 flag_ = 0;
-  SequenceChecker set_sequence_checker_;
-
-  DISALLOW_COPY_AND_ASSIGN(AtomicFlag);
-};
-
-}  // namespace base
-
-#endif  // BASE_SYNCHRONIZATION_ATOMIC_FLAG_H_
diff --git a/base/synchronization/atomic_flag_unittest.cc b/base/synchronization/atomic_flag_unittest.cc
deleted file mode 100644
index a3aa334..0000000
--- a/base/synchronization/atomic_flag_unittest.cc
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/synchronization/atomic_flag.h"
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/single_thread_task_runner.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/test/gtest_util.h"
-#include "base/threading/platform_thread.h"
-#include "base/threading/thread.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-
-namespace {
-
-void ExpectSetFlagDeath(AtomicFlag* flag) {
-  ASSERT_TRUE(flag);
-  EXPECT_DCHECK_DEATH(flag->Set());
-}
-
-// Busy waits (to explicitly avoid using synchronization constructs that would
-// defeat the purpose of testing atomics) until |tested_flag| is set and then
-// verifies that non-atomic |*expected_after_flag| is true and sets |*done_flag|
-// before returning if it's non-null.
-void BusyWaitUntilFlagIsSet(AtomicFlag* tested_flag, bool* expected_after_flag,
-                            AtomicFlag* done_flag) {
-  while (!tested_flag->IsSet())
-    PlatformThread::YieldCurrentThread();
-
-  EXPECT_TRUE(*expected_after_flag);
-  if (done_flag)
-    done_flag->Set();
-}
-
-}  // namespace
-
-TEST(AtomicFlagTest, SimpleSingleThreadedTest) {
-  AtomicFlag flag;
-  ASSERT_FALSE(flag.IsSet());
-  flag.Set();
-  ASSERT_TRUE(flag.IsSet());
-}
-
-TEST(AtomicFlagTest, DoubleSetTest) {
-  AtomicFlag flag;
-  ASSERT_FALSE(flag.IsSet());
-  flag.Set();
-  ASSERT_TRUE(flag.IsSet());
-  flag.Set();
-  ASSERT_TRUE(flag.IsSet());
-}
-
-TEST(AtomicFlagTest, ReadFromDifferentThread) {
-  // |tested_flag| is the one being tested below.
-  AtomicFlag tested_flag;
-  // |expected_after_flag| is used to confirm that sequential consistency is
-  // obtained around |tested_flag|.
-  bool expected_after_flag = false;
-  // |reset_flag| is used to confirm the test flows as intended without using
-  // synchronization constructs which would defeat the purpose of exercising
-  // atomics.
-  AtomicFlag reset_flag;
-
-  Thread thread("AtomicFlagTest.ReadFromDifferentThread");
-  ASSERT_TRUE(thread.Start());
-  thread.task_runner()->PostTask(
-      FROM_HERE,
-      Bind(&BusyWaitUntilFlagIsSet, &tested_flag, &expected_after_flag,
-           &reset_flag));
-
-  // To verify that IsSet() fetches the flag's value from memory every time it
-  // is called (not just the first time that it is called on a thread), sleep
-  // before setting the flag.
-  PlatformThread::Sleep(TimeDelta::FromMilliseconds(20));
-
-  // |expected_after_flag| is used to verify that all memory operations
-  // performed before |tested_flag| is Set() are visible to threads that can see
-  // IsSet().
-  expected_after_flag = true;
-  tested_flag.Set();
-
-  // Sleep again to give the busy loop time to observe the flag and verify
-  // expectations.
-  PlatformThread::Sleep(TimeDelta::FromMilliseconds(20));
-
-  // Use |reset_flag| to confirm that the above completed (which the rest of
-  // this test assumes).
-  ASSERT_TRUE(reset_flag.IsSet());
-
-  tested_flag.UnsafeResetForTesting();
-  EXPECT_FALSE(tested_flag.IsSet());
-  expected_after_flag = false;
-
-  // Perform the same test again after the controlled UnsafeResetForTesting(),
-  // |thread| is guaranteed to be synchronized past the
-  // |UnsafeResetForTesting()| call when the task runs per the implicit
-  // synchronization in the post task mechanism.
-  thread.task_runner()->PostTask(
-      FROM_HERE,
-      Bind(&BusyWaitUntilFlagIsSet, &tested_flag, &expected_after_flag,
-           nullptr));
-
-  PlatformThread::Sleep(TimeDelta::FromMilliseconds(20));
-
-  expected_after_flag = true;
-  tested_flag.Set();
-
-  // The |thread|'s destructor will block until the posted task completes, so
-  // the test will time out if it fails to see the flag be set.
-}
-
-TEST(AtomicFlagTest, SetOnDifferentSequenceDeathTest) {
-  // Checks that Set() can't be called from another sequence after being called
-  // on this one. AtomicFlag should die on a DCHECK if Set() is called again
-  // from another sequence.
-  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
-  Thread t("AtomicFlagTest.SetOnDifferentThreadDeathTest");
-  ASSERT_TRUE(t.Start());
-  EXPECT_TRUE(t.WaitUntilThreadStarted());
-
-  AtomicFlag flag;
-  flag.Set();
-  t.task_runner()->PostTask(FROM_HERE, Bind(&ExpectSetFlagDeath, &flag));
-}
-
-}  // namespace base
diff --git a/base/synchronization/cancellation_flag.cc b/base/synchronization/cancellation_flag.cc
new file mode 100644
index 0000000..ca5c0a8
--- /dev/null
+++ b/base/synchronization/cancellation_flag.cc
@@ -0,0 +1,26 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/cancellation_flag.h"
+
+#include "base/logging.h"
+
+namespace base {
+
+void CancellationFlag::Set() {
+#if !defined(NDEBUG)
+  DCHECK_EQ(set_on_, PlatformThread::CurrentId());
+#endif
+  base::subtle::Release_Store(&flag_, 1);
+}
+
+bool CancellationFlag::IsSet() const {
+  return base::subtle::Acquire_Load(&flag_) != 0;
+}
+
+void CancellationFlag::UnsafeResetForTesting() {
+  base::subtle::Release_Store(&flag_, 0);
+}
+
+}  // namespace base
diff --git a/base/synchronization/cancellation_flag.h b/base/synchronization/cancellation_flag.h
index 39094e2..f2f83f4 100644
--- a/base/synchronization/cancellation_flag.h
+++ b/base/synchronization/cancellation_flag.h
@@ -5,15 +5,44 @@
 #ifndef BASE_SYNCHRONIZATION_CANCELLATION_FLAG_H_
 #define BASE_SYNCHRONIZATION_CANCELLATION_FLAG_H_
 
-#include "base/synchronization/atomic_flag.h"
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/threading/platform_thread.h"
 
 namespace base {
 
-// Use inheritance instead of "using" to allow forward declaration of "class
-// CancellationFlag".
-// TODO(fdoray): Replace CancellationFlag with AtomicFlag throughout the
-// codebase and delete this file. crbug.com/630251
-class CancellationFlag : public AtomicFlag {};
+// CancellationFlag allows one thread to cancel jobs executed on some worker
+// thread. Calling Set() from one thread and IsSet() from a number of threads
+// is thread-safe.
+//
+// This class IS NOT intended for synchronization between threads.
+class BASE_EXPORT CancellationFlag {
+ public:
+  CancellationFlag() : flag_(false) {
+#if !defined(NDEBUG)
+    set_on_ = PlatformThread::CurrentId();
+#endif
+  }
+  ~CancellationFlag() {}
+
+  // Set the flag. May only be called on the thread which owns the object.
+  void Set();
+  bool IsSet() const;  // Returns true iff the flag was set.
+
+  // For subtle reasons that may be different on different architectures,
+  // a different thread testing IsSet() may erroneously read 'true' after
+  // this method has been called.
+  void UnsafeResetForTesting();
+
+ private:
+  base::subtle::Atomic32 flag_;
+#if !defined(NDEBUG)
+  PlatformThreadId set_on_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(CancellationFlag);
+};
 
 }  // namespace base
 
diff --git a/base/synchronization/cancellation_flag_unittest.cc b/base/synchronization/cancellation_flag_unittest.cc
new file mode 100644
index 0000000..13c74bc
--- /dev/null
+++ b/base/synchronization/cancellation_flag_unittest.cc
@@ -0,0 +1,65 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests of CancellationFlag class.
+
+#include "base/synchronization/cancellation_flag.h"
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/spin_wait.h"
+#include "base/threading/thread.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+namespace base {
+
+namespace {
+
+//------------------------------------------------------------------------------
+// Define our test class.
+//------------------------------------------------------------------------------
+
+void CancelHelper(CancellationFlag* flag) {
+#if GTEST_HAS_DEATH_TEST
+  ASSERT_DEBUG_DEATH(flag->Set(), "");
+#endif
+}
+
+TEST(CancellationFlagTest, SimpleSingleThreadedTest) {
+  CancellationFlag flag;
+  ASSERT_FALSE(flag.IsSet());
+  flag.Set();
+  ASSERT_TRUE(flag.IsSet());
+}
+
+TEST(CancellationFlagTest, DoubleSetTest) {
+  CancellationFlag flag;
+  ASSERT_FALSE(flag.IsSet());
+  flag.Set();
+  ASSERT_TRUE(flag.IsSet());
+  flag.Set();
+  ASSERT_TRUE(flag.IsSet());
+}
+
+TEST(CancellationFlagTest, SetOnDifferentThreadDeathTest) {
+  // Checks that Set() can't be called from any other thread.
+  // CancellationFlag should die on a DCHECK if Set() is called from
+  // other thread.
+  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+  Thread t("CancellationFlagTest.SetOnDifferentThreadDeathTest");
+  ASSERT_TRUE(t.Start());
+  ASSERT_TRUE(t.message_loop());
+  ASSERT_TRUE(t.IsRunning());
+
+  CancellationFlag flag;
+  t.task_runner()->PostTask(FROM_HERE, base::Bind(&CancelHelper, &flag));
+}
+
+}  // namespace
+
+}  // namespace base
diff --git a/base/synchronization/condition_variable.h b/base/synchronization/condition_variable.h
index b567751..ebf90d2 100644
--- a/base/synchronization/condition_variable.h
+++ b/base/synchronization/condition_variable.h
@@ -91,13 +91,11 @@
   ~ConditionVariable();
 
   // Wait() releases the caller's critical section atomically as it starts to
-  // sleep, and the reacquires it when it is signaled. The wait functions are
-  // susceptible to spurious wakeups. (See usage note 1 for more details.)
+  // sleep, and the reacquires it when it is signaled.
   void Wait();
   void TimedWait(const TimeDelta& max_time);
 
-  // Broadcast() revives all waiting threads. (See usage note 2 for more
-  // details.)
+  // Broadcast() revives all waiting threads.
   void Broadcast();
   // Signal() revives one waiting thread.
   void Signal();
diff --git a/base/synchronization/condition_variable_posix.cc b/base/synchronization/condition_variable_posix.cc
index d07c671..d86fd18 100644
--- a/base/synchronization/condition_variable_posix.cc
+++ b/base/synchronization/condition_variable_posix.cc
@@ -118,8 +118,6 @@
 #endif  // OS_ANDROID && HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC
 #endif  // OS_MACOSX
 
-  // On failure, we only expect the CV to timeout. Any other error value means
-  // that we've unexpectedly woken up.
   DCHECK(rv == 0 || rv == ETIMEDOUT);
 #if DCHECK_IS_ON()
   user_lock_->CheckUnheldAndMark();
diff --git a/base/synchronization/lock.h b/base/synchronization/lock.h
index 599984e..fbf6cef 100644
--- a/base/synchronization/lock.h
+++ b/base/synchronization/lock.h
@@ -61,23 +61,6 @@
   void AssertAcquired() const;
 #endif  // DCHECK_IS_ON()
 
-  // Whether Lock mitigates priority inversion when used from different thread
-  // priorities.
-  static bool HandlesMultipleThreadPriorities() {
-#if defined(OS_POSIX)
-    // POSIX mitigates priority inversion by setting the priority of a thread
-    // holding a Lock to the maximum priority of any other thread waiting on it.
-    return internal::LockImpl::PriorityInheritanceAvailable();
-#elif defined(OS_WIN)
-    // Windows mitigates priority inversion by randomly boosting the priority of
-    // ready threads.
-    // https://msdn.microsoft.com/library/windows/desktop/ms684831.aspx
-    return true;
-#else
-#error Unsupported platform
-#endif
-  }
-
 #if defined(OS_POSIX) || defined(OS_WIN)
   // Both Windows and POSIX implementations of ConditionVariable need to be
   // able to see our lock and tweak our debugging counters, as they release and
diff --git a/base/synchronization/lock_impl.h b/base/synchronization/lock_impl.h
index 603585a..cbaabc7 100644
--- a/base/synchronization/lock_impl.h
+++ b/base/synchronization/lock_impl.h
@@ -48,11 +48,6 @@
   // unnecessary.
   NativeHandle* native_handle() { return &native_handle_; }
 
-#if defined(OS_POSIX)
-  // Whether this lock will attempt to use priority inheritance.
-  static bool PriorityInheritanceAvailable();
-#endif
-
  private:
   NativeHandle native_handle_;
 
diff --git a/base/synchronization/lock_impl_posix.cc b/base/synchronization/lock_impl_posix.cc
index ff997ea..5619ada 100644
--- a/base/synchronization/lock_impl_posix.cc
+++ b/base/synchronization/lock_impl_posix.cc
@@ -7,45 +7,27 @@
 #include <errno.h>
 #include <string.h>
 
-#include "base/debug/activity_tracker.h"
 #include "base/logging.h"
-#include "base/synchronization/lock.h"
 
 namespace base {
 namespace internal {
 
-// Determines which platforms can consider using priority inheritance locks. Use
-// this define for platform code that may not compile if priority inheritance
-// locks aren't available. For this platform code,
-// PRIORITY_INHERITANCE_LOCKS_POSSIBLE() is a necessary but insufficient check.
-// Lock::PriorityInheritanceAvailable still must be checked as the code may
-// compile but the underlying platform still may not correctly support priority
-// inheritance locks.
-#if defined(OS_NACL) || defined(OS_ANDROID) || defined(__ANDROID__)
-#define PRIORITY_INHERITANCE_LOCKS_POSSIBLE() 0
-#else
-#define PRIORITY_INHERITANCE_LOCKS_POSSIBLE() 1
-#endif
-
 LockImpl::LockImpl() {
+#ifndef NDEBUG
+  // In debug, setup attributes for lock error checking.
   pthread_mutexattr_t mta;
   int rv = pthread_mutexattr_init(&mta);
   DCHECK_EQ(rv, 0) << ". " << strerror(rv);
-#if PRIORITY_INHERITANCE_LOCKS_POSSIBLE()
-  if (PriorityInheritanceAvailable()) {
-    rv = pthread_mutexattr_setprotocol(&mta, PTHREAD_PRIO_INHERIT);
-    DCHECK_EQ(rv, 0) << ". " << strerror(rv);
-  }
-#endif
-#ifndef NDEBUG
-  // In debug, setup attributes for lock error checking.
   rv = pthread_mutexattr_settype(&mta, PTHREAD_MUTEX_ERRORCHECK);
   DCHECK_EQ(rv, 0) << ". " << strerror(rv);
-#endif
   rv = pthread_mutex_init(&native_handle_, &mta);
   DCHECK_EQ(rv, 0) << ". " << strerror(rv);
   rv = pthread_mutexattr_destroy(&mta);
   DCHECK_EQ(rv, 0) << ". " << strerror(rv);
+#else
+  // In release, go with the default lock attributes.
+  pthread_mutex_init(&native_handle_, NULL);
+#endif
 }
 
 LockImpl::~LockImpl() {
@@ -60,7 +42,6 @@
 }
 
 void LockImpl::Lock() {
-  base::debug::ScopedLockAcquireActivity lock_activity(this);
   int rv = pthread_mutex_lock(&native_handle_);
   DCHECK_EQ(rv, 0) << ". " << strerror(rv);
 }
@@ -70,29 +51,5 @@
   DCHECK_EQ(rv, 0) << ". " << strerror(rv);
 }
 
-// static
-bool LockImpl::PriorityInheritanceAvailable() {
-#if PRIORITY_INHERITANCE_LOCKS_POSSIBLE() && defined(OS_MACOSX)
-  return true;
-#else
-  // Security concerns prevent the use of priority inheritance mutexes on Linux.
-  //   * CVE-2010-0622 - wake_futex_pi unlocks incorrect, possible DoS.
-  //     https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2010-0622
-  //   * CVE-2012-6647 - Linux < 3.5.1, futex_wait_requeue_pi possible DoS.
-  //     https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2012-6647
-  //   * CVE-2014-3153 - Linux <= 3.14.5, futex_requeue, privilege escalation.
-  //     https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2014-3153
-  //
-  // If the above were all addressed, we still need a runtime check to deal with
-  // the bug below.
-  //   * glibc Bug 14652: https://sourceware.org/bugzilla/show_bug.cgi?id=14652
-  //     Fixed in glibc 2.17.
-  //     Priority inheritance mutexes may deadlock with condition variables
-  //     during recacquisition of the mutex after the condition variable is
-  //     signalled.
-  return false;
-#endif
-}
-
 }  // namespace internal
 }  // namespace base
diff --git a/base/synchronization/waitable_event.h b/base/synchronization/waitable_event.h
index 761965f..3863e98 100644
--- a/base/synchronization/waitable_event.h
+++ b/base/synchronization/waitable_event.h
@@ -25,7 +25,6 @@
 namespace base {
 
 class TimeDelta;
-class TimeTicks;
 
 // A WaitableEvent can be a useful thread synchronization tool when you want to
 // allow one thread to wait for another thread to finish some work. For
@@ -87,17 +86,12 @@
   //   delete e;
   void Wait();
 
-  // Wait up until wait_delta has passed for the event to be signaled.  Returns
-  // true if the event was signaled.
+  // Wait up until max_time has passed for the event to be signaled.  Returns
+  // true if the event was signaled.  If this method returns false, then it
+  // does not necessarily mean that max_time was exceeded.
   //
   // TimedWait can synchronise its own destruction like |Wait|.
-  bool TimedWait(const TimeDelta& wait_delta);
-
-  // Wait up until end_time deadline has passed for the event to be signaled.
-  // Return true if the event was signaled.
-  //
-  // TimedWaitUntil can synchronise its own destruction like |Wait|.
-  bool TimedWaitUntil(const TimeTicks& end_time);
+  bool TimedWait(const TimeDelta& max_time);
 
 #if defined(OS_WIN)
   HANDLE handle() const { return handle_.Get(); }
diff --git a/base/synchronization/waitable_event_posix.cc b/base/synchronization/waitable_event_posix.cc
index 5dfff46..b32c882 100644
--- a/base/synchronization/waitable_event_posix.cc
+++ b/base/synchronization/waitable_event_posix.cc
@@ -7,7 +7,6 @@
 #include <algorithm>
 #include <vector>
 
-#include "base/debug/activity_tracker.h"
 #include "base/logging.h"
 #include "base/synchronization/condition_variable.h"
 #include "base/synchronization/lock.h"
@@ -153,22 +152,14 @@
 };
 
 void WaitableEvent::Wait() {
-  bool result = TimedWaitUntil(TimeTicks::Max());
+  bool result = TimedWait(TimeDelta::FromSeconds(-1));
   DCHECK(result) << "TimedWait() should never fail with infinite timeout";
 }
 
-bool WaitableEvent::TimedWait(const TimeDelta& wait_delta) {
-  // TimeTicks takes care of overflow including the cases when wait_delta
-  // is a maximum value.
-  return TimedWaitUntil(TimeTicks::Now() + wait_delta);
-}
-
-bool WaitableEvent::TimedWaitUntil(const TimeTicks& end_time) {
+bool WaitableEvent::TimedWait(const TimeDelta& max_time) {
   base::ThreadRestrictions::AssertWaitAllowed();
-  // Record the event that this thread is blocking upon (for hang diagnosis).
-  base::debug::ScopedEventWaitActivity event_activity(this);
-
-  const bool finite_time = !end_time.is_max();
+  const TimeTicks end_time(TimeTicks::Now() + max_time);
+  const bool finite_time = max_time.ToInternalValue() >= 0;
 
   kernel_->lock_.Acquire();
   if (kernel_->signaled_) {
@@ -241,9 +232,6 @@
   base::ThreadRestrictions::AssertWaitAllowed();
   DCHECK(count) << "Cannot wait on no events";
 
-  // Record an event (the first) that this thread is blocking upon.
-  base::debug::ScopedEventWaitActivity event_activity(raw_waitables[0]);
-
   // We need to acquire the locks in a globally consistent order. Thus we sort
   // the array of waitables by address. We actually sort a pairs so that we can
   // map back to the original index values later.
diff --git a/base/synchronization/waitable_event_unittest.cc b/base/synchronization/waitable_event_unittest.cc
index c0e280a..ac5c9f1 100644
--- a/base/synchronization/waitable_event_unittest.cc
+++ b/base/synchronization/waitable_event_unittest.cc
@@ -136,7 +136,13 @@
 
 // Tests that using TimeDelta::Max() on TimedWait() is not the same as passing
 // a timeout of 0. (crbug.com/465948)
-TEST(WaitableEventTest, TimedWait) {
+#if defined(OS_POSIX)
+// crbug.com/465948 not fixed yet.
+#define MAYBE_TimedWait DISABLED_TimedWait
+#else
+#define MAYBE_TimedWait TimedWait
+#endif
+TEST(WaitableEventTest, MAYBE_TimedWait) {
   WaitableEvent* ev =
       new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
                         WaitableEvent::InitialState::NOT_SIGNALED);
@@ -147,58 +153,11 @@
   TimeTicks start = TimeTicks::Now();
   PlatformThread::Create(0, &signaler, &thread);
 
-  EXPECT_TRUE(ev->TimedWait(TimeDelta::Max()));
+  ev->TimedWait(TimeDelta::Max());
   EXPECT_GE(TimeTicks::Now() - start, thread_delay);
   delete ev;
 
   PlatformThread::Join(thread);
 }
 
-// Tests that a sub-ms TimedWait doesn't time out promptly.
-TEST(WaitableEventTest, SubMsTimedWait) {
-  WaitableEvent ev(WaitableEvent::ResetPolicy::AUTOMATIC,
-                   WaitableEvent::InitialState::NOT_SIGNALED);
-
-  TimeDelta delay = TimeDelta::FromMicroseconds(900);
-  TimeTicks start_time = TimeTicks::Now();
-  ev.TimedWait(delay);
-  EXPECT_GE(TimeTicks::Now() - start_time, delay);
-}
-
-// Tests that TimedWaitUntil can be safely used with various end_time deadline
-// values.
-TEST(WaitableEventTest, TimedWaitUntil) {
-  WaitableEvent ev(WaitableEvent::ResetPolicy::AUTOMATIC,
-                   WaitableEvent::InitialState::NOT_SIGNALED);
-
-  TimeTicks start_time(TimeTicks::Now());
-  TimeDelta delay = TimeDelta::FromMilliseconds(10);
-
-  // Should be OK to wait for the current time or time in the past.
-  // That should end promptly and be equivalent to IsSignalled.
-  EXPECT_FALSE(ev.TimedWaitUntil(start_time));
-  EXPECT_FALSE(ev.TimedWaitUntil(start_time - delay));
-
-  // Should be OK to wait for zero TimeTicks().
-  EXPECT_FALSE(ev.TimedWaitUntil(TimeTicks()));
-
-  // Waiting for a time in the future shouldn't end before the deadline
-  // if the event isn't signalled.
-  EXPECT_FALSE(ev.TimedWaitUntil(start_time + delay));
-  EXPECT_GE(TimeTicks::Now() - start_time, delay);
-
-  // Test that passing TimeTicks::Max to TimedWaitUntil is valid and isn't
-  // the same as passing TimeTicks(). Also verifies that signaling event
-  // ends the wait promptly.
-  WaitableEventSignaler signaler(delay, &ev);
-  PlatformThreadHandle thread;
-  start_time = TimeTicks::Now();
-  PlatformThread::Create(0, &signaler, &thread);
-
-  EXPECT_TRUE(ev.TimedWaitUntil(TimeTicks::Max()));
-  EXPECT_GE(TimeTicks::Now() - start_time, delay);
-
-  PlatformThread::Join(thread);
-}
-
 }  // namespace base
diff --git a/base/synchronization/waitable_event_watcher.h b/base/synchronization/waitable_event_watcher.h
index 44ef504..eb51eff 100644
--- a/base/synchronization/waitable_event_watcher.h
+++ b/base/synchronization/waitable_event_watcher.h
@@ -6,14 +6,13 @@
 #define BASE_SYNCHRONIZATION_WAITABLE_EVENT_WATCHER_H_
 
 #include "base/base_export.h"
-#include "base/macros.h"
-#include "base/sequence_checker.h"
 #include "build/build_config.h"
 
 #if defined(OS_WIN)
 #include "base/win/object_watcher.h"
 #else
 #include "base/callback.h"
+#include "base/message_loop/message_loop.h"
 #include "base/synchronization/waitable_event.h"
 #endif
 
@@ -21,13 +20,14 @@
 
 class Flag;
 class AsyncWaiter;
+class AsyncCallbackTask;
 class WaitableEvent;
 
 // This class provides a way to wait on a WaitableEvent asynchronously.
 //
 // Each instance of this object can be waiting on a single WaitableEvent. When
-// the waitable event is signaled, a callback is invoked on the sequence that
-// called StartWatching(). This callback can be deleted by deleting the waiter.
+// the waitable event is signaled, a callback is made in the thread of a given
+// MessageLoop. This callback can be deleted by deleting the waiter.
 //
 // Typical usage:
 //
@@ -60,56 +60,53 @@
 
 class BASE_EXPORT WaitableEventWatcher
 #if defined(OS_WIN)
-    : public win::ObjectWatcher::Delegate
+    : public win::ObjectWatcher::Delegate {
+#else
+    : public MessageLoop::DestructionObserver {
 #endif
-{
  public:
   typedef Callback<void(WaitableEvent*)> EventCallback;
   WaitableEventWatcher();
-
-#if defined(OS_WIN)
   ~WaitableEventWatcher() override;
-#else
-  ~WaitableEventWatcher();
-#endif
 
-  // When |event| is signaled, |callback| is called on the sequence that called
-  // StartWatching().
+  // When @event is signaled, the given callback is called on the thread of the
+  // current message loop when StartWatching is called.
   bool StartWatching(WaitableEvent* event, const EventCallback& callback);
 
-  // Cancel the current watch. Must be called from the same sequence which
+  // Cancel the current watch. Must be called from the same thread which
   // started the watch.
   //
   // Does nothing if no event is being watched, nor if the watch has completed.
   // The callback will *not* be called for the current watch after this
-  // function returns. Since the callback runs on the same sequence as this
+  // function returns. Since the callback runs on the same thread as this
   // function, it cannot be called during this function either.
   void StopWatching();
 
+  // Return the currently watched event, or NULL if no object is currently being
+  // watched.
+  WaitableEvent* GetWatchedEvent();
+
+  // Return the callback that will be invoked when the event is
+  // signaled.
+  const EventCallback& callback() const { return callback_; }
+
  private:
 #if defined(OS_WIN)
   void OnObjectSignaled(HANDLE h) override;
-
   win::ObjectWatcher watcher_;
-  EventCallback callback_;
-  WaitableEvent* event_ = nullptr;
 #else
-  // Instantiated in StartWatching(). Set before the callback runs. Reset in
-  // StopWatching() or StartWatching().
+  // Implementation of MessageLoop::DestructionObserver
+  void WillDestroyCurrentMessageLoop() override;
+
+  MessageLoop* message_loop_;
   scoped_refptr<Flag> cancel_flag_;
-
-  // Enqueued in the wait list of the watched WaitableEvent.
-  AsyncWaiter* waiter_ = nullptr;
-
-  // Kernel of the watched WaitableEvent.
+  AsyncWaiter* waiter_;
+  base::Closure internal_callback_;
   scoped_refptr<WaitableEvent::WaitableEventKernel> kernel_;
-
-  // Ensures that StartWatching() and StopWatching() are called on the same
-  // sequence.
-  SequenceChecker sequence_checker_;
 #endif
 
-  DISALLOW_COPY_AND_ASSIGN(WaitableEventWatcher);
+  WaitableEvent* event_;
+  EventCallback callback_;
 };
 
 }  // namespace base
diff --git a/base/synchronization/waitable_event_watcher_posix.cc b/base/synchronization/waitable_event_watcher_posix.cc
index 3adbc5f..7cf8688 100644
--- a/base/synchronization/waitable_event_watcher_posix.cc
+++ b/base/synchronization/waitable_event_watcher_posix.cc
@@ -4,12 +4,12 @@
 
 #include "base/synchronization/waitable_event_watcher.h"
 
-#include <utility>
-
 #include "base/bind.h"
-#include "base/logging.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/single_thread_task_runner.h"
 #include "base/synchronization/lock.h"
-#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/synchronization/waitable_event.h"
 
 namespace base {
 
@@ -17,15 +17,14 @@
 // WaitableEventWatcher (async waits).
 //
 // The basic design is that we add an AsyncWaiter to the wait-list of the event.
-// That AsyncWaiter has a pointer to SequencedTaskRunner, and a Task to be
-// posted to it. The task ends up calling the callback when it runs on the
-// sequence.
+// That AsyncWaiter has a pointer to MessageLoop, and a Task to be posted to it.
+// The MessageLoop ends up running the task, which calls the delegate.
 //
 // Since the wait can be canceled, we have a thread-safe Flag object which is
 // set when the wait has been canceled. At each stage in the above, we check the
 // flag before going onto the next stage. Since the wait may only be canceled in
-// the sequence which runs the Task, we are assured that the callback cannot be
-// called after canceling...
+// the MessageLoop which runs the Task, we are assured that the delegate cannot
+// be called after canceling...
 
 // -----------------------------------------------------------------------------
 // A thread-safe, reference-counted, write-once flag.
@@ -55,22 +54,23 @@
 };
 
 // -----------------------------------------------------------------------------
-// This is an asynchronous waiter which posts a task to a SequencedTaskRunner
-// when fired. An AsyncWaiter may only be in a single wait-list.
+// This is an asynchronous waiter which posts a task to a MessageLoop when
+// fired. An AsyncWaiter may only be in a single wait-list.
 // -----------------------------------------------------------------------------
 class AsyncWaiter : public WaitableEvent::Waiter {
  public:
-  AsyncWaiter(scoped_refptr<SequencedTaskRunner> task_runner,
+  AsyncWaiter(MessageLoop* message_loop,
               const base::Closure& callback,
               Flag* flag)
-      : task_runner_(std::move(task_runner)),
+      : message_loop_(message_loop),
         callback_(callback),
-        flag_(flag) {}
+        flag_(flag) { }
 
   bool Fire(WaitableEvent* event) override {
     // Post the callback if we haven't been cancelled.
-    if (!flag_->value())
-      task_runner_->PostTask(FROM_HERE, callback_);
+    if (!flag_->value()) {
+      message_loop_->task_runner()->PostTask(FROM_HERE, callback_);
+    }
 
     // We are removed from the wait-list by the WaitableEvent itself. It only
     // remains to delete ourselves.
@@ -85,37 +85,37 @@
   bool Compare(void* tag) override { return tag == flag_.get(); }
 
  private:
-  const scoped_refptr<SequencedTaskRunner> task_runner_;
-  const base::Closure callback_;
-  const scoped_refptr<Flag> flag_;
+  MessageLoop *const message_loop_;
+  base::Closure callback_;
+  scoped_refptr<Flag> flag_;
 };
 
 // -----------------------------------------------------------------------------
-// For async waits we need to run a callback on a sequence. We do this by
-// posting an AsyncCallbackHelper task, which calls the callback and keeps track
-// of when the event is canceled.
+// For async waits we need to make a callback in a MessageLoop thread. We do
+// this by posting a callback, which calls the delegate and keeps track of when
+// the event is canceled.
 // -----------------------------------------------------------------------------
 void AsyncCallbackHelper(Flag* flag,
                          const WaitableEventWatcher::EventCallback& callback,
                          WaitableEvent* event) {
-  // Runs on the sequence that called StartWatching().
+  // Runs in MessageLoop thread.
   if (!flag->value()) {
-    // This is to let the WaitableEventWatcher know that the event has occured.
+    // This is to let the WaitableEventWatcher know that the event has occured
+    // because it needs to be able to return NULL from GetWatchedObject
     flag->Set();
     callback.Run(event);
   }
 }
 
-WaitableEventWatcher::WaitableEventWatcher() {
-  sequence_checker_.DetachFromSequence();
+WaitableEventWatcher::WaitableEventWatcher()
+    : message_loop_(NULL),
+      cancel_flag_(NULL),
+      waiter_(NULL),
+      event_(NULL) {
 }
 
 WaitableEventWatcher::~WaitableEventWatcher() {
-  // The destructor may be called from a different sequence than StartWatching()
-  // when there is no active watch. To avoid triggering a DCHECK in
-  // StopWatching(), do not call it when there is no active watch.
-  if (cancel_flag_ && !cancel_flag_->value())
-    StopWatching();
+  StopWatching();
 }
 
 // -----------------------------------------------------------------------------
@@ -125,44 +125,61 @@
 bool WaitableEventWatcher::StartWatching(
     WaitableEvent* event,
     const EventCallback& callback) {
-  DCHECK(sequence_checker_.CalledOnValidSequence());
-  DCHECK(SequencedTaskRunnerHandle::Get());
+  MessageLoop *const current_ml = MessageLoop::current();
+  DCHECK(current_ml) << "Cannot create WaitableEventWatcher without a "
+                        "current MessageLoop";
 
   // A user may call StartWatching from within the callback function. In this
   // case, we won't know that we have finished watching, expect that the Flag
   // will have been set in AsyncCallbackHelper().
-  if (cancel_flag_.get() && cancel_flag_->value())
-    cancel_flag_ = nullptr;
+  if (cancel_flag_.get() && cancel_flag_->value()) {
+    if (message_loop_) {
+      message_loop_->RemoveDestructionObserver(this);
+      message_loop_ = NULL;
+    }
 
-  DCHECK(!cancel_flag_) << "StartWatching called while still watching";
+    cancel_flag_ = NULL;
+  }
+
+  DCHECK(!cancel_flag_.get()) << "StartWatching called while still watching";
 
   cancel_flag_ = new Flag;
-  const Closure internal_callback = base::Bind(
-      &AsyncCallbackHelper, base::RetainedRef(cancel_flag_), callback, event);
+  callback_ = callback;
+  internal_callback_ = base::Bind(
+      &AsyncCallbackHelper, base::RetainedRef(cancel_flag_), callback_, event);
   WaitableEvent::WaitableEventKernel* kernel = event->kernel_.get();
 
   AutoLock locked(kernel->lock_);
 
+  event_ = event;
+
   if (kernel->signaled_) {
     if (!kernel->manual_reset_)
       kernel->signaled_ = false;
 
     // No hairpinning - we can't call the delegate directly here. We have to
-    // post a task to the SequencedTaskRunnerHandle as usual.
-    SequencedTaskRunnerHandle::Get()->PostTask(FROM_HERE, internal_callback);
+    // enqueue a task on the MessageLoop as normal.
+    current_ml->task_runner()->PostTask(FROM_HERE, internal_callback_);
     return true;
   }
 
+  message_loop_ = current_ml;
+  current_ml->AddDestructionObserver(this);
+
   kernel_ = kernel;
-  waiter_ = new AsyncWaiter(SequencedTaskRunnerHandle::Get(), internal_callback,
-                            cancel_flag_.get());
+  waiter_ = new AsyncWaiter(current_ml, internal_callback_, cancel_flag_.get());
   event->Enqueue(waiter_);
 
   return true;
 }
 
 void WaitableEventWatcher::StopWatching() {
-  DCHECK(sequence_checker_.CalledOnValidSequence());
+  callback_.Reset();
+
+  if (message_loop_) {
+    message_loop_->RemoveDestructionObserver(this);
+    message_loop_ = NULL;
+  }
 
   if (!cancel_flag_.get())  // if not currently watching...
     return;
@@ -210,24 +227,44 @@
     // have been enqueued with the MessageLoop because the waiter was never
     // signaled)
     delete waiter_;
+    internal_callback_.Reset();
     cancel_flag_ = NULL;
     return;
   }
 
-  // Case 3: the waiter isn't on the wait-list, thus it was signaled. It may not
-  // have run yet, so we set the flag to tell it not to bother enqueuing the
-  // task on the SequencedTaskRunner, but to delete it instead. The Waiter
-  // deletes itself once run.
+  // Case 3: the waiter isn't on the wait-list, thus it was signaled. It may
+  // not have run yet, so we set the flag to tell it not to bother enqueuing the
+  // task on the MessageLoop, but to delete it instead. The Waiter deletes
+  // itself once run.
   cancel_flag_->Set();
   cancel_flag_ = NULL;
 
   // If the waiter has already run then the task has been enqueued. If the Task
   // hasn't yet run, the flag will stop the delegate from getting called. (This
-  // is thread safe because one may only delete a Handle from the sequence that
-  // called StartWatching()).
+  // is thread safe because one may only delete a Handle from the MessageLoop
+  // thread.)
   //
   // If the delegate has already been called then we have nothing to do. The
   // task has been deleted by the MessageLoop.
 }
 
+WaitableEvent* WaitableEventWatcher::GetWatchedEvent() {
+  if (!cancel_flag_.get())
+    return NULL;
+
+  if (cancel_flag_->value())
+    return NULL;
+
+  return event_;
+}
+
+// -----------------------------------------------------------------------------
+// This is called when the MessageLoop which the callback will be run it is
+// deleted. We need to cancel the callback as if we had been deleted, but we
+// will still be deleted at some point in the future.
+// -----------------------------------------------------------------------------
+void WaitableEventWatcher::WillDestroyCurrentMessageLoop() {
+  StopWatching();
+}
+
 }  // namespace base
diff --git a/base/sys_byteorder.h b/base/sys_byteorder.h
index 9ee1827..8d9066c 100644
--- a/base/sys_byteorder.h
+++ b/base/sys_byteorder.h
@@ -13,7 +13,6 @@
 
 #include <stdint.h>
 
-#include "base/logging.h"
 #include "build/build_config.h"
 
 #if defined(COMPILER_MSVC)
@@ -47,21 +46,6 @@
 #endif
 }
 
-inline uintptr_t ByteSwapUintPtrT(uintptr_t x) {
-  // We do it this way because some build configurations are ILP32 even when
-  // defined(ARCH_CPU_64_BITS). Unfortunately, we can't use sizeof in #ifs. But,
-  // because these conditionals are constexprs, the irrelevant branches will
-  // likely be optimized away, so this construction should not result in code
-  // bloat.
-  if (sizeof(uintptr_t) == 4) {
-    return ByteSwap(static_cast<uint32_t>(x));
-  } else if (sizeof(uintptr_t) == 8) {
-    return ByteSwap(static_cast<uint64_t>(x));
-  } else {
-    NOTREACHED();
-  }
-}
-
 // Converts the bytes in |x| from host order (endianness) to little endian, and
 // returns the result.
 inline uint16_t ByteSwapToLE16(uint16_t x) {
diff --git a/base/sys_info.h b/base/sys_info.h
index e35feff..b107477 100644
--- a/base/sys_info.h
+++ b/base/sys_info.h
@@ -107,19 +107,9 @@
   static bool GetLsbReleaseValue(const std::string& key, std::string* value);
 
   // Convenience function for GetLsbReleaseValue("CHROMEOS_RELEASE_BOARD",...).
-  // Returns "unknown" if CHROMEOS_RELEASE_BOARD is not set. Otherwise returns
-  // the full name of the board. WARNING: the returned value often differs in
-  // developer built system compared to devices that use the official version.
-  // E.g. for developer built version, the function could return 'glimmer' while
-  // for officially used versions it would be like 'glimmer-signed-mp-v4keys'.
-  // Use GetStrippedReleaseBoard() function if you need only the short name of
-  // the board (would be 'glimmer' in the case described above).
+  // Returns "unknown" if CHROMEOS_RELEASE_BOARD is not set.
   static std::string GetLsbReleaseBoard();
 
-  // Convenience function for GetLsbReleaseBoard() removing trailing "-signed-*"
-  // if present. Returns "unknown" if CHROMEOS_RELEASE_BOARD is not set.
-  static std::string GetStrippedReleaseBoard();
-
   // Returns the creation time of /etc/lsb-release. (Used to get the date and
   // time of the Chrome OS build).
   static Time GetLsbReleaseTime();
diff --git a/base/sys_info_chromeos.cc b/base/sys_info_chromeos.cc
index 29f8384..3794ed9 100644
--- a/base/sys_info_chromeos.cc
+++ b/base/sys_info_chromeos.cc
@@ -163,7 +163,7 @@
   bool is_running_on_chromeos_;
 };
 
-static LazyInstance<ChromeOSVersionInfo>::Leaky
+static LazyInstance<ChromeOSVersionInfo>
     g_chrome_os_version_info = LAZY_INSTANCE_INITIALIZER;
 
 ChromeOSVersionInfo& GetChromeOSVersionInfo() {
@@ -200,16 +200,6 @@
 }
 
 // static
-std::string SysInfo::GetStrippedReleaseBoard() {
-  std::string board = GetLsbReleaseBoard();
-  const size_t index = board.find("-signed-");
-  if (index != std::string::npos)
-    board.resize(index);
-
-  return base::ToLowerASCII(board);
-}
-
-// static
 Time SysInfo::GetLsbReleaseTime() {
   return GetChromeOSVersionInfo().lsb_release_time();
 }
diff --git a/base/sys_info_mac.mm b/base/sys_info_mac.mm
index f8d668c..102d99f 100644
--- a/base/sys_info_mac.mm
+++ b/base/sys_info_mac.mm
@@ -47,12 +47,18 @@
     *minor_version = version.minorVersion;
     *bugfix_version = version.patchVersion;
   } else {
+#else
+  // Android buildbots are too old and have trouble using the forward
+  // declarations for some reason. Conditionally-compile the above block
+  // only when building on a more modern version of OS X.
+  if (true) {
+#endif
     // -[NSProcessInfo operatingSystemVersion] is documented available in 10.10.
     // It's also available via a private API since 10.9.2. For the remaining
     // cases in 10.9, rely on ::Gestalt(..). Since this code is only needed for
     // 10.9.0 and 10.9.1 and uses the recommended replacement thereafter,
     // suppress the warning for this fallback case.
-    DCHECK(base::mac::IsOS10_9());
+    DCHECK(base::mac::IsOSMavericks());
 #pragma clang diagnostic push
 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
     Gestalt(gestaltSystemVersionMajor,
diff --git a/base/sys_info_posix.cc b/base/sys_info_posix.cc
index cbdfa3f..5d1c450 100644
--- a/base/sys_info_posix.cc
+++ b/base/sys_info_posix.cc
@@ -28,11 +28,6 @@
 #include <sys/statvfs.h>
 #endif
 
-#if defined(OS_LINUX)
-#include <linux/magic.h>
-#include <sys/vfs.h>
-#endif
-
 namespace {
 
 #if !defined(OS_OPENBSD)
@@ -78,23 +73,6 @@
     base::internal::LazySysInfoValue<int64_t, AmountOfVirtualMemory>>::Leaky
     g_lazy_virtual_memory = LAZY_INSTANCE_INITIALIZER;
 
-#if defined(OS_LINUX)
-bool IsStatsZeroIfUnlimited(const base::FilePath& path) {
-  struct statfs stats;
-
-  if (HANDLE_EINTR(statfs(path.value().c_str(), &stats)) != 0)
-    return false;
-
-  switch (static_cast<uint32_t>(stats.f_type)) {
-    case TMPFS_MAGIC:
-    case HUGETLBFS_MAGIC:
-    case RAMFS_MAGIC:
-      return true;
-  }
-  return false;
-}
-#endif
-
 bool GetDiskSpaceInfo(const base::FilePath& path,
                       int64_t* available_bytes,
                       int64_t* total_bytes) {
@@ -102,25 +80,10 @@
   if (HANDLE_EINTR(statvfs(path.value().c_str(), &stats)) != 0)
     return false;
 
-#if defined(OS_LINUX)
-  const bool zero_size_means_unlimited =
-      stats.f_blocks == 0 && IsStatsZeroIfUnlimited(path);
-#else
-  const bool zero_size_means_unlimited = false;
-#endif
-
-  if (available_bytes) {
-    *available_bytes =
-        zero_size_means_unlimited
-            ? std::numeric_limits<int64_t>::max()
-            : static_cast<int64_t>(stats.f_bavail) * stats.f_frsize;
-  }
-
-  if (total_bytes) {
-    *total_bytes = zero_size_means_unlimited
-                       ? std::numeric_limits<int64_t>::max()
-                       : static_cast<int64_t>(stats.f_blocks) * stats.f_frsize;
-  }
+  if (available_bytes)
+    *available_bytes = static_cast<int64_t>(stats.f_bavail) * stats.f_frsize;
+  if (total_bytes)
+    *total_bytes = static_cast<int64_t>(stats.f_blocks) * stats.f_frsize;
   return true;
 }
 
diff --git a/base/sys_info_unittest.cc b/base/sys_info_unittest.cc
index c3b8507..0231df6 100644
--- a/base/sys_info_unittest.cc
+++ b/base/sys_info_unittest.cc
@@ -156,14 +156,4 @@
   EXPECT_TRUE(base::SysInfo::IsRunningOnChromeOS());
 }
 
-TEST_F(SysInfoTest, GetStrippedReleaseBoard) {
-  const char* kLsbRelease1 = "CHROMEOS_RELEASE_BOARD=Glimmer\n";
-  base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease1, base::Time());
-  EXPECT_EQ("glimmer", base::SysInfo::GetStrippedReleaseBoard());
-
-  const char* kLsbRelease2 = "CHROMEOS_RELEASE_BOARD=glimmer-signed-mp-v4keys";
-  base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease2, base::Time());
-  EXPECT_EQ("glimmer", base::SysInfo::GetStrippedReleaseBoard());
-}
-
 #endif  // OS_CHROMEOS
diff --git a/base/task/cancelable_task_tracker.cc b/base/task/cancelable_task_tracker.cc
index 9999c18..6f39410 100644
--- a/base/task/cancelable_task_tracker.cc
+++ b/base/task/cancelable_task_tracker.cc
@@ -8,14 +8,21 @@
 
 #include <utility>
 
+#include "base/bind.h"
 #include "base/callback_helpers.h"
+#include "base/compiler_specific.h"
 #include "base/location.h"
 #include "base/memory/ref_counted.h"
+#include "base/single_thread_task_runner.h"
 #include "base/synchronization/cancellation_flag.h"
 #include "base/task_runner.h"
-#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/threading/thread_task_runner_handle.h"
 
-namespace base {
+using base::Bind;
+using base::CancellationFlag;
+using base::Closure;
+using base::hash_map;
+using base::TaskRunner;
 
 namespace {
 
@@ -50,6 +57,8 @@
 
 }  // namespace
 
+namespace base {
+
 // static
 const CancelableTaskTracker::TaskId CancelableTaskTracker::kBadTaskId = 0;
 
@@ -57,7 +66,7 @@
     : next_id_(1),weak_factory_(this) {}
 
 CancelableTaskTracker::~CancelableTaskTracker() {
-  DCHECK(sequence_checker_.CalledOnValidSequence());
+  DCHECK(thread_checker_.CalledOnValidThread());
 
   TryCancelAll();
 }
@@ -66,7 +75,7 @@
     TaskRunner* task_runner,
     const tracked_objects::Location& from_here,
     const Closure& task) {
-  DCHECK(sequence_checker_.CalledOnValidSequence());
+  DCHECK(thread_checker_.CalledOnValidThread());
 
   return PostTaskAndReply(task_runner, from_here, task, Bind(&base::DoNothing));
 }
@@ -74,12 +83,12 @@
 CancelableTaskTracker::TaskId CancelableTaskTracker::PostTaskAndReply(
     TaskRunner* task_runner,
     const tracked_objects::Location& from_here,
-    Closure task,
-    Closure reply) {
-  DCHECK(sequence_checker_.CalledOnValidSequence());
+    const Closure& task,
+    const Closure& reply) {
+  DCHECK(thread_checker_.CalledOnValidThread());
 
-  // We need a SequencedTaskRunnerHandle to run |reply|.
-  DCHECK(base::SequencedTaskRunnerHandle::IsSet());
+  // We need a MessageLoop to run reply.
+  DCHECK(base::ThreadTaskRunnerHandle::IsSet());
 
   // Owned by reply callback below.
   CancellationFlag* flag = new CancellationFlag();
@@ -87,12 +96,15 @@
   TaskId id = next_id_;
   next_id_++;  // int64_t is big enough that we ignore the potential overflow.
 
-  Closure untrack_closure =
+  const Closure& untrack_closure =
       Bind(&CancelableTaskTracker::Untrack, weak_factory_.GetWeakPtr(), id);
-  bool success = task_runner->PostTaskAndReply(
-      from_here, Bind(&RunIfNotCanceled, flag, std::move(task)),
-      Bind(&RunIfNotCanceledThenUntrack, base::Owned(flag), std::move(reply),
-           std::move(untrack_closure)));
+  bool success =
+      task_runner->PostTaskAndReply(from_here,
+                                    Bind(&RunIfNotCanceled, flag, task),
+                                    Bind(&RunIfNotCanceledThenUntrack,
+                                         base::Owned(flag),
+                                         reply,
+                                         untrack_closure));
 
   if (!success)
     return kBadTaskId;
@@ -103,8 +115,8 @@
 
 CancelableTaskTracker::TaskId CancelableTaskTracker::NewTrackedTaskId(
     IsCanceledCallback* is_canceled_cb) {
-  DCHECK(sequence_checker_.CalledOnValidSequence());
-  DCHECK(base::SequencedTaskRunnerHandle::IsSet());
+  DCHECK(thread_checker_.CalledOnValidThread());
+  DCHECK(base::ThreadTaskRunnerHandle::IsSet());
 
   TaskId id = next_id_;
   next_id_++;  // int64_t is big enough that we ignore the potential overflow.
@@ -117,11 +129,11 @@
       Bind(&CancelableTaskTracker::Untrack, weak_factory_.GetWeakPtr(), id),
       flag);
 
-  // Will always run |untrack_and_delete_flag| on current sequence.
+  // Will always run |untrack_and_delete_flag| on current MessageLoop.
   base::ScopedClosureRunner* untrack_and_delete_flag_runner =
       new base::ScopedClosureRunner(
           Bind(&RunOrPostToTaskRunner,
-               RetainedRef(base::SequencedTaskRunnerHandle::Get()),
+               RetainedRef(base::ThreadTaskRunnerHandle::Get()),
                untrack_and_delete_flag));
 
   *is_canceled_cb =
@@ -132,7 +144,7 @@
 }
 
 void CancelableTaskTracker::TryCancel(TaskId id) {
-  DCHECK(sequence_checker_.CalledOnValidSequence());
+  DCHECK(thread_checker_.CalledOnValidThread());
 
   hash_map<TaskId, CancellationFlag*>::const_iterator it = task_flags_.find(id);
   if (it == task_flags_.end()) {
@@ -148,7 +160,7 @@
 }
 
 void CancelableTaskTracker::TryCancelAll() {
-  DCHECK(sequence_checker_.CalledOnValidSequence());
+  DCHECK(thread_checker_.CalledOnValidThread());
 
   for (hash_map<TaskId, CancellationFlag*>::const_iterator it =
            task_flags_.begin();
@@ -159,19 +171,19 @@
 }
 
 bool CancelableTaskTracker::HasTrackedTasks() const {
-  DCHECK(sequence_checker_.CalledOnValidSequence());
+  DCHECK(thread_checker_.CalledOnValidThread());
   return !task_flags_.empty();
 }
 
 void CancelableTaskTracker::Track(TaskId id, CancellationFlag* flag) {
-  DCHECK(sequence_checker_.CalledOnValidSequence());
+  DCHECK(thread_checker_.CalledOnValidThread());
 
   bool success = task_flags_.insert(std::make_pair(id, flag)).second;
   DCHECK(success);
 }
 
 void CancelableTaskTracker::Untrack(TaskId id) {
-  DCHECK(sequence_checker_.CalledOnValidSequence());
+  DCHECK(thread_checker_.CalledOnValidThread());
   size_t num = task_flags_.erase(id);
   DCHECK_EQ(1u, num);
 }
diff --git a/base/task/cancelable_task_tracker.h b/base/task/cancelable_task_tracker.h
index 4f64a24..86b5a45 100644
--- a/base/task/cancelable_task_tracker.h
+++ b/base/task/cancelable_task_tracker.h
@@ -15,38 +15,36 @@
 //
 // CancelableCallback (base/cancelable_callback.h) and WeakPtr binding are
 // preferred solutions for canceling a task. However, they don't support
-// cancelation from another sequence. This is sometimes a performance critical
+// cancelation from another thread. This is sometimes a performance critical
 // requirement. E.g. We need to cancel database lookup task on DB thread when
 // user changes inputed text. If it is performance critical to do a best effort
-// cancelation of a task, then CancelableTaskTracker is appropriate, otherwise
-// use one of the other mechanisms.
+// cancelation of a task, then CancelableTaskTracker is appropriate,
+// otherwise use one of the other mechanisms.
 //
 // THREAD-SAFETY:
 //
-// 1. A CancelableTaskTracker object must be created, used, and destroyed on a
-//    single sequence.
+// 1. CancelableTaskTracker objects are not thread safe. They must
+// be created, used, and destroyed on the originating thread that posts the
+// task. It's safe to destroy a CancelableTaskTracker while there
+// are outstanding tasks. This is commonly used to cancel all outstanding
+// tasks.
 //
-// 2. It's safe to destroy a CancelableTaskTracker while there are outstanding
-//    tasks. This is commonly used to cancel all outstanding tasks.
+// 2. Both task and reply are deleted on the originating thread.
 //
-// 3. Both task and reply are deleted on the originating sequence.
-//
-// 4. IsCanceledCallback can be run or deleted on any sequence.
+// 3. IsCanceledCallback is thread safe and can be run or deleted on any
+// thread.
 #ifndef BASE_TASK_CANCELABLE_TASK_TRACKER_H_
 #define BASE_TASK_CANCELABLE_TASK_TRACKER_H_
 
 #include <stdint.h>
 
-#include <utility>
-
 #include "base/base_export.h"
-#include "base/bind.h"
 #include "base/callback.h"
 #include "base/containers/hash_tables.h"
 #include "base/macros.h"
 #include "base/memory/weak_ptr.h"
-#include "base/post_task_and_reply_with_result_internal.h"
-#include "base/sequence_checker.h"
+#include "base/task_runner_util.h"
+#include "base/threading/thread_checker.h"
 
 namespace tracked_objects {
 class Location;
@@ -76,21 +74,25 @@
 
   TaskId PostTaskAndReply(base::TaskRunner* task_runner,
                           const tracked_objects::Location& from_here,
-                          base::Closure task,
-                          base::Closure reply);
+                          const base::Closure& task,
+                          const base::Closure& reply);
 
   template <typename TaskReturnType, typename ReplyArgType>
-  TaskId PostTaskAndReplyWithResult(base::TaskRunner* task_runner,
-                                    const tracked_objects::Location& from_here,
-                                    base::Callback<TaskReturnType()> task,
-                                    base::Callback<void(ReplyArgType)> reply) {
+  TaskId PostTaskAndReplyWithResult(
+      base::TaskRunner* task_runner,
+      const tracked_objects::Location& from_here,
+      const base::Callback<TaskReturnType(void)>& task,
+      const base::Callback<void(ReplyArgType)>& reply) {
     TaskReturnType* result = new TaskReturnType();
     return PostTaskAndReply(
-        task_runner, from_here,
+        task_runner,
+        from_here,
         base::Bind(&base::internal::ReturnAsParamAdapter<TaskReturnType>,
-                   std::move(task), base::Unretained(result)),
+                   task,
+                   base::Unretained(result)),
         base::Bind(&base::internal::ReplyAdapter<TaskReturnType, ReplyArgType>,
-                   std::move(reply), base::Owned(result)));
+                   reply,
+                   base::Owned(result)));
   }
 
   // Creates a tracked TaskId and an associated IsCanceledCallback. Client can
@@ -128,7 +130,7 @@
   base::hash_map<TaskId, base::CancellationFlag*> task_flags_;
 
   TaskId next_id_;
-  SequenceChecker sequence_checker_;
+  base::ThreadChecker thread_checker_;
 
   base::WeakPtrFactory<CancelableTaskTracker> weak_factory_;
 
diff --git a/base/task/cancelable_task_tracker_unittest.cc b/base/task/cancelable_task_tracker_unittest.cc
index fd480f3..ff9e40b 100644
--- a/base/task/cancelable_task_tracker_unittest.cc
+++ b/base/task/cancelable_task_tracker_unittest.cc
@@ -15,7 +15,6 @@
 #include "base/memory/weak_ptr.h"
 #include "base/run_loop.h"
 #include "base/single_thread_task_runner.h"
-#include "base/test/gtest_util.h"
 #include "base/test/test_simple_task_runner.h"
 #include "base/threading/thread.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -113,7 +112,7 @@
       test_task_runner.get(), FROM_HERE, MakeExpectedNotRunClosure(FROM_HERE));
   EXPECT_NE(CancelableTaskTracker::kBadTaskId, task_id);
 
-  EXPECT_EQ(1U, test_task_runner->NumPendingTasks());
+  EXPECT_EQ(1U, test_task_runner->GetPendingTasks().size());
 
   task_tracker_.TryCancel(task_id);
 
@@ -345,11 +344,23 @@
   }
 };
 
+// Duplicated from base/threading/thread_checker.h so that we can be
+// good citizens there and undef the macro.
+#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+#define ENABLE_THREAD_CHECKER 1
+#else
+#define ENABLE_THREAD_CHECKER 0
+#endif
+
 // Runs |fn| with |task_tracker|, expecting it to crash in debug mode.
 void MaybeRunDeadlyTaskTrackerMemberFunction(
     CancelableTaskTracker* task_tracker,
     const Callback<void(CancelableTaskTracker*)>& fn) {
-  EXPECT_DCHECK_DEATH(fn.Run(task_tracker));
+// CancelableTask uses DCHECKs with its ThreadChecker (itself only
+// enabled in debug mode).
+#if ENABLE_THREAD_CHECKER
+  EXPECT_DEATH_IF_SUPPORTED(fn.Run(task_tracker), "");
+#endif
 }
 
 void PostDoNothingTask(CancelableTaskTracker* task_tracker) {
diff --git a/base/task_runner.cc b/base/task_runner.cc
index 35c0a23..262e1f8 100644
--- a/base/task_runner.cc
+++ b/base/task_runner.cc
@@ -4,8 +4,6 @@
 
 #include "base/task_runner.h"
 
-#include <utility>
-
 #include "base/compiler_specific.h"
 #include "base/logging.h"
 #include "base/threading/post_task_and_reply_impl.h"
@@ -47,11 +45,12 @@
   return PostDelayedTask(from_here, task, base::TimeDelta());
 }
 
-bool TaskRunner::PostTaskAndReply(const tracked_objects::Location& from_here,
-                                  Closure task,
-                                  Closure reply) {
+bool TaskRunner::PostTaskAndReply(
+    const tracked_objects::Location& from_here,
+    const Closure& task,
+    const Closure& reply) {
   return PostTaskAndReplyTaskRunner(this).PostTaskAndReply(
-      from_here, std::move(task), std::move(reply));
+      from_here, task, reply);
 }
 
 TaskRunner::TaskRunner() {}
diff --git a/base/task_runner.h b/base/task_runner.h
index be3039d..9593835 100644
--- a/base/task_runner.h
+++ b/base/task_runner.h
@@ -8,7 +8,7 @@
 #include <stddef.h>
 
 #include "base/base_export.h"
-#include "base/callback.h"
+#include "base/callback_forward.h"
 #include "base/location.h"
 #include "base/memory/ref_counted.h"
 #include "base/time/time.h"
@@ -123,8 +123,8 @@
   //     and the reply will cancel itself safely because it is bound to a
   //     WeakPtr<>.
   bool PostTaskAndReply(const tracked_objects::Location& from_here,
-                        Closure task,
-                        Closure reply);
+                        const Closure& task,
+                        const Closure& reply);
 
  protected:
   friend struct TaskRunnerTraits;
diff --git a/base/task_runner_util.h b/base/task_runner_util.h
index 7fda076..ba8e120 100644
--- a/base/task_runner_util.h
+++ b/base/task_runner_util.h
@@ -5,17 +5,37 @@
 #ifndef BASE_TASK_RUNNER_UTIL_H_
 #define BASE_TASK_RUNNER_UTIL_H_
 
-#include <utility>
-
 #include "base/bind.h"
 #include "base/bind_helpers.h"
-#include "base/callback.h"
 #include "base/logging.h"
-#include "base/post_task_and_reply_with_result_internal.h"
 #include "base/task_runner.h"
 
 namespace base {
 
+namespace internal {
+
+// Adapts a function that produces a result via a return value to
+// one that returns via an output parameter.
+template <typename ReturnType>
+void ReturnAsParamAdapter(const Callback<ReturnType(void)>& func,
+                          ReturnType* result) {
+  *result = func.Run();
+}
+
+// Adapts a T* result to a callblack that expects a T.
+template <typename TaskReturnType, typename ReplyArgType>
+void ReplyAdapter(const Callback<void(ReplyArgType)>& callback,
+                  TaskReturnType* result) {
+  // TODO(ajwong): Remove this conditional and add a DCHECK to enforce that
+  // |reply| must be non-null in PostTaskAndReplyWithResult() below after
+  // current code that relies on this API softness has been removed.
+  // http://crbug.com/162712
+  if (!callback.is_null())
+    callback.Run(std::move(*result));
+}
+
+}  // namespace internal
+
 // When you have these methods
 //
 //   R DoWorkAndReturn();
@@ -31,18 +51,18 @@
 //     Bind(&DoWorkAndReturn),
 //     Bind(&Callback));
 template <typename TaskReturnType, typename ReplyArgType>
-bool PostTaskAndReplyWithResult(TaskRunner* task_runner,
-                                const tracked_objects::Location& from_here,
-                                Callback<TaskReturnType()> task,
-                                Callback<void(ReplyArgType)> reply) {
-  DCHECK(task);
-  DCHECK(reply);
+bool PostTaskAndReplyWithResult(
+    TaskRunner* task_runner,
+    const tracked_objects::Location& from_here,
+    const Callback<TaskReturnType(void)>& task,
+    const Callback<void(ReplyArgType)>& reply) {
   TaskReturnType* result = new TaskReturnType();
   return task_runner->PostTaskAndReply(
-      from_here, base::Bind(&internal::ReturnAsParamAdapter<TaskReturnType>,
-                            std::move(task), result),
-      base::Bind(&internal::ReplyAdapter<TaskReturnType, ReplyArgType>,
-                 std::move(reply), base::Owned(result)));
+      from_here,
+      base::Bind(&internal::ReturnAsParamAdapter<TaskReturnType>, task,
+                 result),
+      base::Bind(&internal::ReplyAdapter<TaskReturnType, ReplyArgType>, reply,
+                 base::Owned(result)));
 }
 
 }  // namespace base
diff --git a/base/task_scheduler/scheduler_lock_impl.cc b/base/task_scheduler/scheduler_lock_impl.cc
index d60f259..7480e18 100644
--- a/base/task_scheduler/scheduler_lock_impl.cc
+++ b/base/task_scheduler/scheduler_lock_impl.cc
@@ -67,30 +67,19 @@
     // Otherwise, make sure that the previous lock acquired is an allowed
     // predecessor.
     AutoLock auto_lock(allowed_predecessor_map_lock_);
-    // Using at() is exception-safe here as |lock| was registered already.
     const SchedulerLockImpl* allowed_predecessor =
         allowed_predecessor_map_.at(lock);
     DCHECK_EQ(acquired_locks->back(), allowed_predecessor);
   }
 
-  // Asserts that |lock|'s registered predecessor is safe. Because
-  // SchedulerLocks are registered at construction time and any predecessor
-  // specified on a SchedulerLock must already exist, the first registered
-  // SchedulerLock in a potential chain must have a null predecessor and is thus
-  // cycle-free. Any subsequent SchedulerLock with a predecessor must come from
-  // the set of registered SchedulerLocks. Since the registered SchedulerLocks
-  // only contain cycle-free SchedulerLocks, this subsequent SchedulerLock is
-  // itself cycle-free and may be safely added to the registered SchedulerLock
-  // set.
   void AssertSafePredecessor(const SchedulerLockImpl* lock) const {
     allowed_predecessor_map_lock_.AssertAcquired();
-    // Using at() is exception-safe here as |lock| was registered already.
-    const SchedulerLockImpl* predecessor = allowed_predecessor_map_.at(lock);
-    if (predecessor) {
-      DCHECK(allowed_predecessor_map_.find(predecessor) !=
-             allowed_predecessor_map_.end())
-          << "SchedulerLock was registered before its predecessor. "
-          << "Potential cycle detected";
+    for (const SchedulerLockImpl* predecessor =
+             allowed_predecessor_map_.at(lock);
+         predecessor != nullptr;
+         predecessor = allowed_predecessor_map_.at(predecessor)) {
+      DCHECK_NE(predecessor, lock) <<
+          "Scheduler lock predecessor cycle detected.";
     }
   }
 
diff --git a/base/task_scheduler/scheduler_lock_unittest.cc b/base/task_scheduler/scheduler_lock_unittest.cc
index 5518247..daa5025 100644
--- a/base/task_scheduler/scheduler_lock_unittest.cc
+++ b/base/task_scheduler/scheduler_lock_unittest.cc
@@ -10,7 +10,7 @@
 #include "base/macros.h"
 #include "base/rand_util.h"
 #include "base/synchronization/waitable_event.h"
-#include "base/test/gtest_util.h"
+#include "base/task_scheduler/test_utils.h"
 #include "base/threading/platform_thread.h"
 #include "base/threading/simple_thread.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -131,7 +131,7 @@
   EXPECT_DCHECK_DEATH({
     lock.Acquire();
     predecessor.Acquire();
-  });
+  }, "");
 }
 
 TEST(TaskSchedulerLock, AcquireNonPredecessor) {
@@ -140,7 +140,7 @@
   EXPECT_DCHECK_DEATH({
     lock1.Acquire();
     lock2.Acquire();
-  });
+  }, "");
 }
 
 TEST(TaskSchedulerLock, AcquireMultipleLocksInOrder) {
@@ -172,7 +172,7 @@
   EXPECT_DCHECK_DEATH({
     lock1.Acquire();
     lock3.Acquire();
-  });
+  }, "");
 }
 
 TEST(TaskSchedulerLock, AcquireLocksDifferentThreadsSafely) {
@@ -258,7 +258,7 @@
     SchedulerLock lock;
   };
 
-  EXPECT_DCHECK_DEATH({ SelfReferentialLock lock; });
+  EXPECT_DCHECK_DEATH({ SelfReferentialLock lock; }, "");
 }
 
 TEST(TaskSchedulerLock, PredecessorCycle) {
@@ -269,7 +269,7 @@
     SchedulerLock lock2;
   };
 
-  EXPECT_DCHECK_DEATH({ LockCycle cycle; });
+  EXPECT_DCHECK_DEATH({ LockCycle cycle; }, "");
 }
 
 TEST(TaskSchedulerLock, PredecessorLongerCycle) {
@@ -288,7 +288,7 @@
     SchedulerLock lock5;
   };
 
-  EXPECT_DCHECK_DEATH({ LockCycle cycle; });
+  EXPECT_DCHECK_DEATH({ LockCycle cycle; }, "");
 }
 
 }  // namespace
diff --git a/base/task_scheduler/scoped_set_task_priority_for_current_thread.cc b/base/task_scheduler/scoped_set_task_priority_for_current_thread.cc
deleted file mode 100644
index a163863..0000000
--- a/base/task_scheduler/scoped_set_task_priority_for_current_thread.cc
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/task_scheduler/scoped_set_task_priority_for_current_thread.h"
-
-#include "base/lazy_instance.h"
-#include "base/logging.h"
-#include "base/threading/thread_local.h"
-
-namespace base {
-namespace internal {
-
-namespace {
-
-LazyInstance<ThreadLocalPointer<const TaskPriority>>::Leaky
-    tls_task_priority_for_current_thread = LAZY_INSTANCE_INITIALIZER;
-
-}  // namespace
-
-ScopedSetTaskPriorityForCurrentThread::ScopedSetTaskPriorityForCurrentThread(
-    TaskPriority priority)
-    : priority_(priority) {
-  DCHECK(!tls_task_priority_for_current_thread.Get().Get());
-  tls_task_priority_for_current_thread.Get().Set(&priority_);
-}
-
-ScopedSetTaskPriorityForCurrentThread::
-    ~ScopedSetTaskPriorityForCurrentThread() {
-  DCHECK_EQ(&priority_, tls_task_priority_for_current_thread.Get().Get());
-  tls_task_priority_for_current_thread.Get().Set(nullptr);
-}
-
-TaskPriority GetTaskPriorityForCurrentThread() {
-  const TaskPriority* priority =
-      tls_task_priority_for_current_thread.Get().Get();
-  return priority ? *priority : TaskPriority::USER_VISIBLE;
-}
-
-}  // namespace internal
-}  // namespace base
diff --git a/base/task_scheduler/scoped_set_task_priority_for_current_thread.h b/base/task_scheduler/scoped_set_task_priority_for_current_thread.h
deleted file mode 100644
index 4508911..0000000
--- a/base/task_scheduler/scoped_set_task_priority_for_current_thread.h
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TASK_SCHEDULER_SCOPED_SET_TASK_PRIORITY_FOR_CURRENT_THREAD_H_
-#define BASE_TASK_SCHEDULER_SCOPED_SET_TASK_PRIORITY_FOR_CURRENT_THREAD_H_
-
-#include "base/base_export.h"
-#include "base/macros.h"
-#include "base/task_scheduler/task_traits.h"
-
-namespace base {
-namespace internal {
-
-class BASE_EXPORT ScopedSetTaskPriorityForCurrentThread {
- public:
-  // Within the scope of this object, GetTaskPriorityForCurrentThread() will
-  // return |priority|.
-  ScopedSetTaskPriorityForCurrentThread(TaskPriority priority);
-  ~ScopedSetTaskPriorityForCurrentThread();
-
- private:
-  const TaskPriority priority_;
-
-  DISALLOW_COPY_AND_ASSIGN(ScopedSetTaskPriorityForCurrentThread);
-};
-
-// Returns the priority of the TaskScheduler task running on the current thread,
-// or TaskPriority::USER_VISIBLE if no TaskScheduler task is running on the
-// current thread.
-BASE_EXPORT TaskPriority GetTaskPriorityForCurrentThread();
-
-}  // namespace internal
-}  // namespace base
-
-#endif  // BASE_TASK_SCHEDULER_SCOPED_SET_TASK_PRIORITY_FOR_CURRENT_THREAD_H_
diff --git a/base/task_scheduler/scoped_set_task_priority_for_current_thread_unittest.cc b/base/task_scheduler/scoped_set_task_priority_for_current_thread_unittest.cc
deleted file mode 100644
index c497af6..0000000
--- a/base/task_scheduler/scoped_set_task_priority_for_current_thread_unittest.cc
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/task_scheduler/scoped_set_task_priority_for_current_thread.h"
-
-#include "base/task_scheduler/task_traits.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-namespace internal {
-
-TEST(TaskSchedulerScopedSetTaskPriorityForCurrentThreadTest,
-     ScopedSetTaskPriorityForCurrentThread) {
-  EXPECT_EQ(TaskPriority::USER_VISIBLE, GetTaskPriorityForCurrentThread());
-  {
-    ScopedSetTaskPriorityForCurrentThread
-        scoped_set_task_priority_for_current_thread(
-            TaskPriority::USER_BLOCKING);
-    EXPECT_EQ(TaskPriority::USER_BLOCKING, GetTaskPriorityForCurrentThread());
-  }
-  EXPECT_EQ(TaskPriority::USER_VISIBLE, GetTaskPriorityForCurrentThread());
-}
-
-}  // namespace internal
-}  // namespace base
diff --git a/base/task_scheduler/sequence.cc b/base/task_scheduler/sequence.cc
index 601b540..4ecb605 100644
--- a/base/task_scheduler/sequence.cc
+++ b/base/task_scheduler/sequence.cc
@@ -26,30 +26,24 @@
   return queue_.size() == 1;
 }
 
-std::unique_ptr<Task> Sequence::TakeTask() {
+const Task* Sequence::PeekTask() const {
+  AutoSchedulerLock auto_lock(lock_);
+
+  if (queue_.empty())
+    return nullptr;
+
+  return queue_.front().get();
+}
+
+bool Sequence::PopTask() {
   AutoSchedulerLock auto_lock(lock_);
   DCHECK(!queue_.empty());
-  DCHECK(queue_.front());
 
   const int priority_index =
       static_cast<int>(queue_.front()->traits.priority());
   DCHECK_GT(num_tasks_per_priority_[priority_index], 0U);
   --num_tasks_per_priority_[priority_index];
 
-  return std::move(queue_.front());
-}
-
-TaskTraits Sequence::PeekTaskTraits() const {
-  AutoSchedulerLock auto_lock(lock_);
-  DCHECK(!queue_.empty());
-  DCHECK(queue_.front());
-  return queue_.front()->traits;
-}
-
-bool Sequence::Pop() {
-  AutoSchedulerLock auto_lock(lock_);
-  DCHECK(!queue_.empty());
-  DCHECK(!queue_.front());
   queue_.pop();
   return queue_.empty();
 }
diff --git a/base/task_scheduler/sequence.h b/base/task_scheduler/sequence.h
index ed1d0ac..3fa037f 100644
--- a/base/task_scheduler/sequence.h
+++ b/base/task_scheduler/sequence.h
@@ -13,7 +13,6 @@
 #include "base/base_export.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
-#include "base/sequence_token.h"
 #include "base/task_scheduler/scheduler_lock.h"
 #include "base/task_scheduler/sequence_sort_key.h"
 #include "base/task_scheduler/task.h"
@@ -22,10 +21,7 @@
 namespace base {
 namespace internal {
 
-// A Sequence holds slots each containing up to a single Task that must be
-// executed in posting order.
-//
-// In comments below, an "empty Sequence" is a Sequence with no slot.
+// A sequence holds tasks that must be executed in posting order.
 //
 // Note: there is a known refcounted-ownership cycle in the Scheduler
 // architecture: Sequence -> Task -> TaskRunner -> Sequence -> ...
@@ -44,38 +40,26 @@
  public:
   Sequence();
 
-  // Adds |task| in a new slot at the end of the Sequence. Returns true if the
-  // Sequence was empty before this operation.
+  // Adds |task| at the end of the sequence's queue. Returns true if the
+  // sequence was empty before this operation.
   bool PushTask(std::unique_ptr<Task> task);
 
-  // Transfers ownership of the Task in the front slot of the Sequence to the
-  // caller. The front slot of the Sequence will be nullptr and remain until
-  // Pop(). Cannot be called on an empty Sequence or a Sequence whose front slot
-  // is already nullptr.
-  std::unique_ptr<Task> TakeTask();
+  // Returns the task in front of the sequence's queue, if any.
+  const Task* PeekTask() const;
 
-  // Returns the TaskTraits of the Task in front of the Sequence. Cannot be
-  // called on an empty Sequence or on a Sequence whose front slot is empty.
-  TaskTraits PeekTaskTraits() const;
+  // Removes the task in front of the sequence's queue. Returns true if the
+  // sequence is empty after this operation. Cannot be called on an empty
+  // sequence.
+  bool PopTask();
 
-  // Removes the front slot of the Sequence. The front slot must have been
-  // emptied by TakeTask() before this is called. Cannot be called on an empty
-  // Sequence. Returns true if the Sequence is empty after this operation.
-  bool Pop();
-
-  // Returns a SequenceSortKey representing the priority of the Sequence. Cannot
-  // be called on an empty Sequence.
+  // Returns a SequenceSortKey representing the priority of the sequence. Cannot
+  // be called on an empty sequence.
   SequenceSortKey GetSortKey() const;
 
-  // Returns a token that uniquely identifies this Sequence.
-  const SequenceToken& token() const { return token_; }
-
  private:
   friend class RefCountedThreadSafe<Sequence>;
   ~Sequence();
 
-  const SequenceToken token_ = SequenceToken::Create();
-
   // Synchronizes access to all members.
   mutable SchedulerLock lock_;
 
diff --git a/base/task_scheduler/sequence_unittest.cc b/base/task_scheduler/sequence_unittest.cc
index c45d8a8..6a15299 100644
--- a/base/task_scheduler/sequence_unittest.cc
+++ b/base/task_scheduler/sequence_unittest.cc
@@ -4,12 +4,7 @@
 
 #include "base/task_scheduler/sequence.h"
 
-#include <utility>
-
-#include "base/bind.h"
 #include "base/macros.h"
-#include "base/memory/ptr_util.h"
-#include "base/test/gtest_util.h"
 #include "base/time/time.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -18,7 +13,6 @@
 
 namespace {
 
-
 class TaskSchedulerSequenceTest : public testing::Test {
  public:
   TaskSchedulerSequenceTest()
@@ -62,7 +56,7 @@
   std::unique_ptr<Task> task_e_owned_;
 
   // Raw pointers to those same tasks for verification. This is needed because
-  // the unique_ptrs above no longer point to the tasks once they have been
+  // the scoped_ptrs above no longer point to the tasks once they have been
   // moved into a Sequence.
   const Task* task_a_;
   const Task* task_b_;
@@ -76,54 +70,54 @@
 
 }  // namespace
 
-TEST_F(TaskSchedulerSequenceTest, PushTakeRemove) {
+TEST_F(TaskSchedulerSequenceTest, PushPopPeek) {
   scoped_refptr<Sequence> sequence(new Sequence);
 
   // Push task A in the sequence. Its sequenced time should be updated and it
   // should be in front of the sequence.
   EXPECT_TRUE(sequence->PushTask(std::move(task_a_owned_)));
   EXPECT_FALSE(task_a_->sequenced_time.is_null());
-  EXPECT_EQ(task_a_->traits.priority(), sequence->PeekTaskTraits().priority());
+  EXPECT_EQ(task_a_, sequence->PeekTask());
 
   // Push task B, C and D in the sequence. Their sequenced time should be
   // updated and task A should always remain in front of the sequence.
   EXPECT_FALSE(sequence->PushTask(std::move(task_b_owned_)));
   EXPECT_FALSE(task_b_->sequenced_time.is_null());
-  EXPECT_EQ(task_a_->traits.priority(), sequence->PeekTaskTraits().priority());
+  EXPECT_EQ(task_a_, sequence->PeekTask());
 
   EXPECT_FALSE(sequence->PushTask(std::move(task_c_owned_)));
   EXPECT_FALSE(task_c_->sequenced_time.is_null());
-  EXPECT_EQ(task_a_->traits.priority(), sequence->PeekTaskTraits().priority());
+  EXPECT_EQ(task_a_, sequence->PeekTask());
 
   EXPECT_FALSE(sequence->PushTask(std::move(task_d_owned_)));
   EXPECT_FALSE(task_d_->sequenced_time.is_null());
-  EXPECT_EQ(task_a_->traits.priority(), sequence->PeekTaskTraits().priority());
+  EXPECT_EQ(task_a_, sequence->PeekTask());
 
-  // Get the task in front of the sequence. It should be task A.
-  EXPECT_EQ(task_a_, sequence->TakeTask().get());
+  // Pop task A. Task B should now be in front.
+  EXPECT_FALSE(sequence->PopTask());
+  EXPECT_EQ(task_b_, sequence->PeekTask());
 
-  // Remove the empty slot. Task B should now be in front.
-  EXPECT_FALSE(sequence->Pop());
-  EXPECT_EQ(task_b_, sequence->TakeTask().get());
+  // Pop task B. Task C should now be in front.
+  EXPECT_FALSE(sequence->PopTask());
+  EXPECT_EQ(task_c_, sequence->PeekTask());
 
-  // Remove the empty slot. Task C should now be in front.
-  EXPECT_FALSE(sequence->Pop());
-  EXPECT_EQ(task_c_, sequence->TakeTask().get());
+  // Pop task C. Task D should now be in front.
+  EXPECT_FALSE(sequence->PopTask());
+  EXPECT_EQ(task_d_, sequence->PeekTask());
 
-  // Remove the empty slot. Task D should now be in front.
-  EXPECT_FALSE(sequence->Pop());
-  EXPECT_EQ(task_d_, sequence->TakeTask().get());
-
-  // Push task E in the sequence. Its sequenced time should be updated.
+  // Push task E in the sequence. Its sequenced time should be updated and
+  // task D should remain in front.
   EXPECT_FALSE(sequence->PushTask(std::move(task_e_owned_)));
   EXPECT_FALSE(task_e_->sequenced_time.is_null());
+  EXPECT_EQ(task_d_, sequence->PeekTask());
 
-  // Remove the empty slot. Task E should now be in front.
-  EXPECT_FALSE(sequence->Pop());
-  EXPECT_EQ(task_e_, sequence->TakeTask().get());
+  // Pop task D. Task E should now be in front.
+  EXPECT_FALSE(sequence->PopTask());
+  EXPECT_EQ(task_e_, sequence->PeekTask());
 
-  // Remove the empty slot. The sequence should now be empty.
-  EXPECT_TRUE(sequence->Pop());
+  // Pop task E. The sequence should now be empty.
+  EXPECT_TRUE(sequence->PopTask());
+  EXPECT_EQ(nullptr, sequence->PeekTask());
 }
 
 TEST_F(TaskSchedulerSequenceTest, GetSortKey) {
@@ -158,24 +152,21 @@
 
   // Pop task A. The highest priority is still USER_BLOCKING. The task in front
   // of the sequence is now task B.
-  sequence->TakeTask();
-  sequence->Pop();
+  sequence->PopTask();
   EXPECT_EQ(
       SequenceSortKey(TaskPriority::USER_BLOCKING, task_b_->sequenced_time),
       sequence->GetSortKey());
 
   // Pop task B. The highest priority is still USER_BLOCKING. The task in front
   // of the sequence is now task C.
-  sequence->TakeTask();
-  sequence->Pop();
+  sequence->PopTask();
   EXPECT_EQ(
       SequenceSortKey(TaskPriority::USER_BLOCKING, task_c_->sequenced_time),
       sequence->GetSortKey());
 
   // Pop task C. The highest priority is still USER_BLOCKING. The task in front
   // of the sequence is now task D.
-  sequence->TakeTask();
-  sequence->Pop();
+  sequence->PopTask();
   EXPECT_EQ(
       SequenceSortKey(TaskPriority::USER_BLOCKING, task_d_->sequenced_time),
       sequence->GetSortKey());
@@ -189,38 +180,10 @@
 
   // Pop task D. The highest priority is now from task E (BACKGROUND). The
   // task in front of the sequence is now task E.
-  sequence->TakeTask();
-  sequence->Pop();
+  sequence->PopTask();
   EXPECT_EQ(SequenceSortKey(TaskPriority::BACKGROUND, task_e_->sequenced_time),
             sequence->GetSortKey());
 }
 
-// Verify that a DCHECK fires if Pop() is called on a sequence whose front slot
-// isn't empty.
-TEST_F(TaskSchedulerSequenceTest, PopNonEmptyFrontSlot) {
-  scoped_refptr<Sequence> sequence(new Sequence);
-  sequence->PushTask(
-      MakeUnique<Task>(FROM_HERE, Bind(&DoNothing), TaskTraits(), TimeDelta()));
-
-  EXPECT_DCHECK_DEATH({ sequence->Pop(); });
-}
-
-// Verify that a DCHECK fires if TakeTask() is called on a sequence whose front
-// slot is empty.
-TEST_F(TaskSchedulerSequenceTest, TakeEmptyFrontSlot) {
-  scoped_refptr<Sequence> sequence(new Sequence);
-  sequence->PushTask(
-      MakeUnique<Task>(FROM_HERE, Bind(&DoNothing), TaskTraits(), TimeDelta()));
-
-  EXPECT_TRUE(sequence->TakeTask());
-  EXPECT_DCHECK_DEATH({ sequence->TakeTask(); });
-}
-
-// Verify that a DCHECK fires if TakeTask() is called on an empty sequence.
-TEST_F(TaskSchedulerSequenceTest, TakeEmptySequence) {
-  scoped_refptr<Sequence> sequence(new Sequence);
-  EXPECT_DCHECK_DEATH({ sequence->TakeTask(); });
-}
-
 }  // namespace internal
 }  // namespace base
diff --git a/base/task_scheduler/task.cc b/base/task_scheduler/task.cc
index 3780c16..8a589a2 100644
--- a/base/task_scheduler/task.cc
+++ b/base/task_scheduler/task.cc
@@ -10,20 +10,12 @@
 Task::Task(const tracked_objects::Location& posted_from,
            const Closure& task,
            const TaskTraits& traits,
-           TimeDelta delay)
+           const TimeDelta& delay)
     : PendingTask(posted_from,
                   task,
                   delay.is_zero() ? TimeTicks() : TimeTicks::Now() + delay,
                   false),  // Not nestable.
-      // Prevent a delayed BLOCK_SHUTDOWN task from blocking shutdown before
-      // being scheduled by changing its shutdown behavior to SKIP_ON_SHUTDOWN.
-      traits(!delay.is_zero() &&
-                     traits.shutdown_behavior() ==
-                         TaskShutdownBehavior::BLOCK_SHUTDOWN
-                 ? TaskTraits(traits).WithShutdownBehavior(
-                       TaskShutdownBehavior::SKIP_ON_SHUTDOWN)
-                 : traits),
-      delay(delay) {}
+      traits(traits) {}
 
 Task::~Task() = default;
 
diff --git a/base/task_scheduler/task.h b/base/task_scheduler/task.h
index c5b9bdb..2b53c69 100644
--- a/base/task_scheduler/task.h
+++ b/base/task_scheduler/task.h
@@ -23,22 +23,17 @@
 // profiling inherited from PendingTask.
 struct BASE_EXPORT Task : public PendingTask {
   // |posted_from| is the site the task was posted from. |task| is the closure
-  // to run. |traits_in| is metadata about the task. |delay| is a delay that
-  // must expire before the Task runs. If |delay| is non-zero and the shutdown
-  // behavior in |traits| is BLOCK_SHUTDOWN, the shutdown behavior is
-  // automatically adjusted to SKIP_ON_SHUTDOWN.
+  // to run. |traits| is metadata about the task. |delay| is a delay that must
+  // expire before the Task runs.
   Task(const tracked_objects::Location& posted_from,
        const Closure& task,
        const TaskTraits& traits,
-       TimeDelta delay);
+       const TimeDelta& delay);
   ~Task();
 
   // The TaskTraits of this task.
   const TaskTraits traits;
 
-  // The delay that must expire before the task runs.
-  const TimeDelta delay;
-
   // The time at which the task was inserted in its sequence. For an undelayed
   // task, this happens at post time. For a delayed task, this happens some
   // time after the task's delay has expired. If the task hasn't been inserted
diff --git a/base/task_scheduler/task_traits.cc b/base/task_scheduler/task_traits.cc
index 6acf324..dd55535 100644
--- a/base/task_scheduler/task_traits.cc
+++ b/base/task_scheduler/task_traits.cc
@@ -8,29 +8,20 @@
 
 #include <ostream>
 
-#include "base/logging.h"
-#include "base/task_scheduler/scoped_set_task_priority_for_current_thread.h"
-
 namespace base {
 
 // Do not rely on defaults hard-coded below beyond the guarantees described in
 // the header; anything else is subject to change. Tasks should explicitly
 // request defaults if the behavior is critical to the task.
 TaskTraits::TaskTraits()
-    : may_block_(false),
-      with_base_sync_primitives_(false),
-      priority_(internal::GetTaskPriorityForCurrentThread()),
+    : with_file_io_(false),
+      priority_(TaskPriority::BACKGROUND),
       shutdown_behavior_(TaskShutdownBehavior::SKIP_ON_SHUTDOWN) {}
 
 TaskTraits::~TaskTraits() = default;
 
-TaskTraits& TaskTraits::MayBlock() {
-  may_block_ = true;
-  return *this;
-}
-
-TaskTraits& TaskTraits::WithBaseSyncPrimitives() {
-  with_base_sync_primitives_ = true;
+TaskTraits& TaskTraits::WithFileIO() {
+  with_file_io_ = true;
   return *this;
 }
 
@@ -45,41 +36,34 @@
   return *this;
 }
 
-const char* TaskPriorityToString(TaskPriority task_priority) {
+std::ostream& operator<<(std::ostream& os, const TaskPriority& task_priority) {
   switch (task_priority) {
     case TaskPriority::BACKGROUND:
-      return "BACKGROUND";
+      os << "BACKGROUND";
+      break;
     case TaskPriority::USER_VISIBLE:
-      return "USER_VISIBLE";
+      os << "USER_VISIBLE";
+      break;
     case TaskPriority::USER_BLOCKING:
-      return "USER_BLOCKING";
+      os << "USER_BLOCKING";
+      break;
   }
-  NOTREACHED();
-  return "";
-}
-
-const char* TaskShutdownBehaviorToString(
-    TaskShutdownBehavior shutdown_behavior) {
-  switch (shutdown_behavior) {
-    case TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN:
-      return "CONTINUE_ON_SHUTDOWN";
-    case TaskShutdownBehavior::SKIP_ON_SHUTDOWN:
-      return "SKIP_ON_SHUTDOWN";
-    case TaskShutdownBehavior::BLOCK_SHUTDOWN:
-      return "BLOCK_SHUTDOWN";
-  }
-  NOTREACHED();
-  return "";
-}
-
-std::ostream& operator<<(std::ostream& os, const TaskPriority& task_priority) {
-  os << TaskPriorityToString(task_priority);
   return os;
 }
 
 std::ostream& operator<<(std::ostream& os,
                          const TaskShutdownBehavior& shutdown_behavior) {
-  os << TaskShutdownBehaviorToString(shutdown_behavior);
+  switch (shutdown_behavior) {
+    case TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN:
+      os << "CONTINUE_ON_SHUTDOWN";
+      break;
+    case TaskShutdownBehavior::SKIP_ON_SHUTDOWN:
+      os << "SKIP_ON_SHUTDOWN";
+      break;
+    case TaskShutdownBehavior::BLOCK_SHUTDOWN:
+      os << "BLOCK_SHUTDOWN";
+      break;
+  }
   return os;
 }
 
diff --git a/base/task_scheduler/task_traits.h b/base/task_scheduler/task_traits.h
index 435fdac..0c0d304 100644
--- a/base/task_scheduler/task_traits.h
+++ b/base/task_scheduler/task_traits.h
@@ -78,57 +78,19 @@
 // Describes metadata for a single task or a group of tasks.
 class BASE_EXPORT TaskTraits {
  public:
-  // Constructs a default TaskTraits for tasks that
-  //     (1) don't block (ref. MayBlock() and WithBaseSyncPrimitives()),
-  //     (2) prefer inheriting the current priority to specifying their own, and
-  //     (3) can either block shutdown or be skipped on shutdown
-  //         (TaskScheduler implementation is free to choose a fitting default).
-  // Tasks that require stricter guarantees and/or know the specific
-  // TaskPriority appropriate for them should highlight those by requesting
+  // Constructs a default TaskTraits for tasks with
+  //     (1) no I/O,
+  //     (2) low priority, and
+  //     (3) may block shutdown or be skipped on shutdown.
+  // Tasks that require stricter guarantees should highlight those by requesting
   // explicit traits below.
   TaskTraits();
   TaskTraits(const TaskTraits& other) = default;
   TaskTraits& operator=(const TaskTraits& other) = default;
   ~TaskTraits();
 
-  // Tasks with this trait may block. This includes but is not limited to tasks
-  // that wait on synchronous file I/O operations: read or write a file from
-  // disk, interact with a pipe or a socket, rename or delete a file, enumerate
-  // files in a directory, etc. This trait isn't required for the mere use of
-  // locks. For tasks that block on base/ synchronization primitives, see
-  // WithBaseSyncPrimitives().
-  TaskTraits& MayBlock();
-
-  // Tasks with this trait will pass base::AssertWaitAllowed(), i.e. will be
-  // allowed on the following methods :
-  // - base::WaitableEvent::Wait
-  // - base::ConditionVariable::Wait
-  // - base::PlatformThread::Join
-  // - base::PlatformThread::Sleep
-  // - base::Process::WaitForExit
-  // - base::Process::WaitForExitWithTimeout
-  //
-  // Tasks should generally not use these methods.
-  //
-  // Instead of waiting on a WaitableEvent or a ConditionVariable, put the work
-  // that should happen after the wait in a callback and post that callback from
-  // where the WaitableEvent or ConditionVariable would have been signaled. If
-  // something needs to be scheduled after many tasks have executed, use
-  // base::BarrierClosure.
-  //
-  // Avoid creating threads. Instead, use
-  // base::Create(Sequenced|SingleTreaded)TaskRunnerWithTraits(). If a thread is
-  // really needed, make it non-joinable and add cleanup work at the end of the
-  // thread's main function (if using base::Thread, override Cleanup()).
-  //
-  // On Windows, join processes asynchronously using base::win::ObjectWatcher.
-  //
-  // MayBlock() must be specified in conjunction with this trait if and only if
-  // removing usage of methods listed above in the labeled tasks would still
-  // result in tasks that may block (per MayBlock()'s definition).
-  //
-  // In doubt, consult with //base/task_scheduler/OWNERS.
-  TaskTraits& WithBaseSyncPrimitives();
+  // Allows tasks with these traits to do file I/O.
+  TaskTraits& WithFileIO();
 
   // Applies |priority| to tasks with these traits.
   TaskTraits& WithPriority(TaskPriority priority);
@@ -136,11 +98,8 @@
   // Applies |shutdown_behavior| to tasks with these traits.
   TaskTraits& WithShutdownBehavior(TaskShutdownBehavior shutdown_behavior);
 
-  // Returns true if tasks with these traits may block.
-  bool may_block() const { return may_block_; }
-
-  // Returns true if tasks with these traits may use base/ sync primitives.
-  bool with_base_sync_primitives() const { return with_base_sync_primitives_; }
+  // Returns true if file I/O is allowed by these traits.
+  bool with_file_io() const { return with_file_io_; }
 
   // Returns the priority of tasks with these traits.
   TaskPriority priority() const { return priority_; }
@@ -149,22 +108,29 @@
   TaskShutdownBehavior shutdown_behavior() const { return shutdown_behavior_; }
 
  private:
-  bool may_block_;
-  bool with_base_sync_primitives_;
+  bool with_file_io_;
   TaskPriority priority_;
   TaskShutdownBehavior shutdown_behavior_;
 };
 
-// Returns string literals for the enums defined in this file. These methods
-// should only be used for tracing and debugging.
-BASE_EXPORT const char* TaskPriorityToString(TaskPriority task_priority);
-BASE_EXPORT const char* TaskShutdownBehaviorToString(
-    TaskShutdownBehavior task_priority);
+// Describes how tasks are executed by a task runner.
+enum class ExecutionMode {
+  // Can execute multiple tasks at a time in any order.
+  PARALLEL,
 
-// Stream operators so that the enums defined in this file can be used in
-// DCHECK and EXPECT statements.
+  // Executes one task at a time in posting order. The sequence’s priority is
+  // equivalent to the highest priority pending task in the sequence.
+  SEQUENCED,
+
+  // Executes one task at a time on a single thread in posting order.
+  SINGLE_THREADED,
+};
+
+// Stream operators so TaskPriority and TaskShutdownBehavior can be used in
+// DCHECK statements.
 BASE_EXPORT std::ostream& operator<<(std::ostream& os,
                                      const TaskPriority& shutdown_behavior);
+
 BASE_EXPORT std::ostream& operator<<(
     std::ostream& os,
     const TaskShutdownBehavior& shutdown_behavior);
diff --git a/base/task_scheduler/test_utils.h b/base/task_scheduler/test_utils.h
index dbd1227..bafd09a 100644
--- a/base/task_scheduler/test_utils.h
+++ b/base/task_scheduler/test_utils.h
@@ -5,16 +5,15 @@
 #ifndef BASE_TASK_SCHEDULER_TEST_UTILS_H_
 #define BASE_TASK_SCHEDULER_TEST_UTILS_H_
 
-namespace base {
-namespace internal {
-namespace test {
+#include "base/logging.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
 
-// An enumeration of possible task scheduler TaskRunner types. Used to
-// parametrize relevant task_scheduler tests.
-enum class ExecutionMode { PARALLEL, SEQUENCED, SINGLE_THREADED };
-
-}  // namespace test
-}  // namespace internal
-}  // namespace base
+// Death tests misbehave on Android.
+#if DCHECK_IS_ON() && defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+#define EXPECT_DCHECK_DEATH(statement, regex) EXPECT_DEATH(statement, regex)
+#else
+#define EXPECT_DCHECK_DEATH(statement, regex)
+#endif
 
 #endif  // BASE_TASK_SCHEDULER_TEST_UTILS_H_
diff --git a/base/template_util.h b/base/template_util.h
index 4255210..1bfc1ac 100644
--- a/base/template_util.h
+++ b/base/template_util.h
@@ -23,28 +23,6 @@
 #define CR_USE_FALLBACKS_FOR_OLD_GLIBCXX
 #endif
 
-// Some versions of libstdc++ have partial support for type_traits, but misses
-// a smaller subset while removing some of the older non-standard stuff. Assume
-// that all versions below 5.0 fall in this category, along with one 5.0
-// experimental release. Test for this by consulting compiler major version,
-// the only reliable option available, so theoretically this could fail should
-// you attempt to mix an earlier version of libstdc++ with >= GCC5. But
-// that's unlikely to work out, especially as GCC5 changed ABI.
-#define CR_GLIBCXX_5_0_0 20150123
-#if (defined(__GNUC__) && __GNUC__ < 5) || \
-    (defined(__GLIBCXX__) && __GLIBCXX__ == CR_GLIBCXX_5_0_0)
-#define CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX
-#endif
-
-// This hacks around using gcc with libc++ which has some incompatibilies.
-// - is_trivially_* doesn't work: https://llvm.org/bugs/show_bug.cgi?id=27538
-// TODO(danakj): Remove this when android builders are all using a newer version
-// of gcc, or the android ndk is updated to a newer libc++ that works with older
-// gcc versions.
-#if !defined(__clang__) && defined(_LIBCPP_VERSION)
-#define CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX
-#endif
-
 namespace base {
 
 template <class T> struct is_non_const_reference : std::false_type {};
@@ -148,53 +126,8 @@
 using is_trivially_destructible = std::is_trivially_destructible<T>;
 #endif
 
-// is_trivially_copyable is especially hard to get right.
-// - Older versions of libstdc++ will fail to have it like they do for other
-//   type traits. In this case we should provide it based on compiler
-//   intrinsics. This is covered by the CR_USE_FALLBACKS_FOR_OLD_GLIBCXX define.
-// - An experimental release of gcc includes most of type_traits but misses
-//   is_trivially_copyable, so we still have to avoid using libstdc++ in this
-//   case, which is covered by CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX.
-// - When compiling libc++ from before r239653, with a gcc compiler, the
-//   std::is_trivially_copyable can fail. So we need to work around that by not
-//   using the one in libc++ in this case. This is covered by the
-//   CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX define, and is discussed in
-//   https://llvm.org/bugs/show_bug.cgi?id=27538#c1 where they point out that
-//   in libc++'s commit r239653 this is fixed by libc++ checking for gcc 5.1.
-// - In both of the above cases we are using the gcc compiler. When defining
-//   this ourselves on compiler intrinsics, the __is_trivially_copyable()
-//   intrinsic is not available on gcc before version 5.1 (see the discussion in
-//   https://llvm.org/bugs/show_bug.cgi?id=27538#c1 again), so we must check for
-//   that version.
-// - When __is_trivially_copyable() is not available because we are on gcc older
-//   than 5.1, we need to fall back to something, so we use __has_trivial_copy()
-//   instead based on what was done one-off in bit_cast() previously.
-
-// TODO(crbug.com/554293): Remove this when all platforms have this in the std
-// namespace and it works with gcc as needed.
-#if defined(CR_USE_FALLBACKS_FOR_OLD_GLIBCXX) ||              \
-    defined(CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX) || \
-    defined(CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX)
-template <typename T>
-struct is_trivially_copyable {
-// TODO(danakj): Remove this when android builders are all using a newer version
-// of gcc, or the android ndk is updated to a newer libc++ that does this for
-// us.
-#if _GNUC_VER >= 501
-  static constexpr bool value = __is_trivially_copyable(T);
-#else
-  static constexpr bool value = __has_trivial_copy(T);
-#endif
-};
-#else
-template <class T>
-using is_trivially_copyable = std::is_trivially_copyable<T>;
-#endif
-
 }  // namespace base
 
 #undef CR_USE_FALLBACKS_FOR_OLD_GLIBCXX
-#undef CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX
-#undef CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX
 
 #endif  // BASE_TEMPLATE_UTIL_H_
diff --git a/base/test/BUILD.gn b/base/test/BUILD.gn
index 844707e..51863a2 100644
--- a/base/test/BUILD.gn
+++ b/base/test/BUILD.gn
@@ -22,16 +22,11 @@
   ]
 }
 
+# GYP: //base/base.gyp:test_support_base
 static_library("test_support") {
   testonly = true
   sources = [
     "../trace_event/trace_config_memory_test_util.h",
-    "android/java_handler_thread_for_testing.cc",
-    "android/java_handler_thread_for_testing.h",
-    "android/test_system_message_handler_link_android.cc",
-    "android/test_system_message_handler_link_android.h",
-    "fuzzed_data_provider.cc",
-    "fuzzed_data_provider.h",
     "gtest_util.cc",
     "gtest_util.h",
     "gtest_xml_unittest_result_printer.cc",
@@ -40,15 +35,12 @@
     "gtest_xml_util.h",
     "histogram_tester.cc",
     "histogram_tester.h",
-    "icu_test_util.cc",
-    "icu_test_util.h",
     "ios/wait_util.h",
     "ios/wait_util.mm",
     "launcher/test_result.cc",
     "launcher/test_result.h",
     "launcher/test_results_tracker.h",
     "launcher/unit_test_launcher.h",
-    "mock_callback.h",
     "mock_chrome_application_mac.h",
     "mock_chrome_application_mac.mm",
     "mock_devices_changed_observer.cc",
@@ -57,9 +49,7 @@
     "mock_entropy_provider.h",
     "mock_log.cc",
     "mock_log.h",
-    "multiprocess_test.cc",
     "multiprocess_test.h",
-    "multiprocess_test_android.cc",
     "null_task_runner.cc",
     "null_task_runner.h",
     "opaque_ref_counted.cc",
@@ -72,20 +62,12 @@
     "perf_time_logger.h",
     "power_monitor_test_base.cc",
     "power_monitor_test_base.h",
-    "scoped_async_task_scheduler.cc",
-    "scoped_async_task_scheduler.h",
     "scoped_command_line.cc",
     "scoped_command_line.h",
-    "scoped_feature_list.cc",
-    "scoped_feature_list.h",
     "scoped_locale.cc",
     "scoped_locale.h",
-    "scoped_mock_time_message_loop_task_runner.cc",
-    "scoped_mock_time_message_loop_task_runner.h",
     "scoped_path_override.cc",
     "scoped_path_override.h",
-    "scoped_task_scheduler.cc",
-    "scoped_task_scheduler.h",
     "sequenced_task_runner_test_template.cc",
     "sequenced_task_runner_test_template.h",
     "sequenced_worker_pool_owner.cc",
@@ -151,6 +133,8 @@
       "launcher/test_launcher_tracer.h",
       "launcher/test_results_tracker.cc",
       "launcher/unit_test_launcher.cc",
+      "multiprocess_test.cc",
+      "multiprocess_test_android.cc",
     ]
   }
 
@@ -194,11 +178,7 @@
   }
 
   if (is_android) {
-    deps += [
-      ":base_unittests_jni_headers",
-      ":test_support_jni_headers",
-    ]
-    public_deps += [ ":test_support_java" ]
+    deps += [ ":base_unittests_jni_headers" ]
   }
 
   if (is_nacl_nonsfi) {
@@ -211,8 +191,6 @@
     sources -= [
       "gtest_xml_util.cc",
       "gtest_xml_util.h",
-      "icu_test_util.cc",
-      "icu_test_util.h",
       "perf_test_suite.cc",
       "perf_test_suite.h",
       "scoped_path_override.cc",
@@ -277,41 +255,6 @@
   ]
 }
 
-# These sources are linked into both the base_unittests binary and the test
-# shared library target below.
-source_set("native_library_test_utils") {
-  testonly = true
-  sources = [
-    "native_library_test_utils.cc",
-    "native_library_test_utils.h",
-  ]
-}
-
-# This shared library is dynamically loaded by NativeLibrary unittests.
-shared_library("test_shared_library") {
-  testonly = true
-  sources = [
-    "test_shared_library.cc",
-  ]
-
-  deps = [
-    ":native_library_test_utils",
-  ]
-}
-
-static_library("run_all_base_unittests") {
-  # Only targets in base should depend on this, targets outside base
-  # should depend on run_all_unittests above.
-  visibility = [ "//base/*" ]
-  testonly = true
-  sources = [
-    "run_all_base_unittests.cc",
-  ]
-  deps = [
-    ":test_support",
-  ]
-}
-
 if (is_linux) {
   shared_library("malloc_wrapper") {
     testonly = true
@@ -329,47 +272,8 @@
   generate_jni("base_unittests_jni_headers") {
     sources = [
       "android/java/src/org/chromium/base/ContentUriTestUtils.java",
-      "android/java/src/org/chromium/base/TestSystemMessageHandler.java",
       "android/java/src/org/chromium/base/TestUiThread.java",
     ]
     jni_package = "base"
   }
-
-  generate_jni("test_support_jni_headers") {
-    sources = [
-      "android/java/src/org/chromium/base/MainReturnCodeResult.java",
-      "android/java/src/org/chromium/base/MultiprocessTestClientLauncher.java",
-    ]
-    jni_package = "base"
-  }
-
-  android_library("test_support_java") {
-    testonly = true
-    deps = [
-      "//base:base_java",
-      "//testing/android/native_test:native_main_runner_java",
-      "//third_party/android_tools:android_support_annotations_java",
-      "//third_party/jsr-305:jsr_305_javalib",
-    ]
-    srcjar_deps = [ ":test_support_java_aidl" ]
-    java_files = [
-      "android/java/src/org/chromium/base/FileDescriptorInfo.java",
-      "android/java/src/org/chromium/base/MainReturnCodeResult.java",
-      "android/java/src/org/chromium/base/MultiprocessTestClientLauncher.java",
-      "android/java/src/org/chromium/base/MultiprocessTestClientService.java",
-      "android/java/src/org/chromium/base/MultiprocessTestClientService0.java",
-      "android/java/src/org/chromium/base/MultiprocessTestClientService1.java",
-      "android/java/src/org/chromium/base/MultiprocessTestClientService2.java",
-      "android/java/src/org/chromium/base/MultiprocessTestClientService3.java",
-      "android/java/src/org/chromium/base/MultiprocessTestClientService4.java",
-    ]
-  }
-
-  android_aidl("test_support_java_aidl") {
-    testonly = true
-    import_include = [ "android/java/src" ]
-    sources = [
-      "android/java/src/org/chromium/base/ITestClient.aidl",
-    ]
-  }
 }
diff --git a/base/test/gtest_util.cc b/base/test/gtest_util.cc
deleted file mode 100644
index 6da902d..0000000
--- a/base/test/gtest_util.cc
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/test/gtest_util.h"
-
-#include <stddef.h>
-
-#include <memory>
-
-#include "base/files/file_path.h"
-#include "base/json/json_file_value_serializer.h"
-#include "base/strings/string_util.h"
-#include "base/values.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-
-TestIdentifier::TestIdentifier() {
-}
-
-TestIdentifier::TestIdentifier(const TestIdentifier& other) = default;
-
-std::string FormatFullTestName(const std::string& test_case_name,
-                               const std::string& test_name) {
-  return test_case_name + "." + test_name;
-}
-
-std::string TestNameWithoutDisabledPrefix(const std::string& full_test_name) {
-  std::string test_name_no_disabled(full_test_name);
-  ReplaceSubstringsAfterOffset(&test_name_no_disabled, 0, "DISABLED_", "");
-  return test_name_no_disabled;
-}
-
-std::vector<TestIdentifier> GetCompiledInTests() {
-  testing::UnitTest* const unit_test = testing::UnitTest::GetInstance();
-
-  std::vector<TestIdentifier> tests;
-  for (int i = 0; i < unit_test->total_test_case_count(); ++i) {
-    const testing::TestCase* test_case = unit_test->GetTestCase(i);
-    for (int j = 0; j < test_case->total_test_count(); ++j) {
-      const testing::TestInfo* test_info = test_case->GetTestInfo(j);
-      TestIdentifier test_data;
-      test_data.test_case_name = test_case->name();
-      test_data.test_name = test_info->name();
-      test_data.file = test_info->file();
-      test_data.line = test_info->line();
-      tests.push_back(test_data);
-    }
-  }
-  return tests;
-}
-
-bool WriteCompiledInTestsToFile(const FilePath& path) {
-  std::vector<TestIdentifier> tests(GetCompiledInTests());
-
-  ListValue root;
-  for (size_t i = 0; i < tests.size(); ++i) {
-    std::unique_ptr<DictionaryValue> test_info(new DictionaryValue);
-    test_info->SetString("test_case_name", tests[i].test_case_name);
-    test_info->SetString("test_name", tests[i].test_name);
-    test_info->SetString("file", tests[i].file);
-    test_info->SetInteger("line", tests[i].line);
-    root.Append(std::move(test_info));
-  }
-
-  JSONFileValueSerializer serializer(path);
-  return serializer.Serialize(root);
-}
-
-bool ReadTestNamesFromFile(const FilePath& path,
-                           std::vector<TestIdentifier>* output) {
-  JSONFileValueDeserializer deserializer(path);
-  int error_code = 0;
-  std::string error_message;
-  std::unique_ptr<base::Value> value =
-      deserializer.Deserialize(&error_code, &error_message);
-  if (!value.get())
-    return false;
-
-  base::ListValue* tests = nullptr;
-  if (!value->GetAsList(&tests))
-    return false;
-
-  std::vector<base::TestIdentifier> result;
-  for (base::ListValue::iterator i = tests->begin(); i != tests->end(); ++i) {
-    base::DictionaryValue* test = nullptr;
-    if (!(*i)->GetAsDictionary(&test))
-      return false;
-
-    TestIdentifier test_data;
-
-    if (!test->GetStringASCII("test_case_name", &test_data.test_case_name))
-      return false;
-
-    if (!test->GetStringASCII("test_name", &test_data.test_name))
-      return false;
-
-    if (!test->GetStringASCII("file", &test_data.file))
-      return false;
-
-    if (!test->GetInteger("line", &test_data.line))
-      return false;
-
-    result.push_back(test_data);
-  }
-
-  output->swap(result);
-  return true;
-}
-
-}  // namespace base
diff --git a/base/test/gtest_util.h b/base/test/gtest_util.h
deleted file mode 100644
index 8dfb1f2..0000000
--- a/base/test/gtest_util.h
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TEST_GTEST_UTIL_H_
-#define BASE_TEST_GTEST_UTIL_H_
-
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "base/compiler_specific.h"
-#include "base/logging.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-// EXPECT/ASSERT_DCHECK_DEATH is intended to replace EXPECT/ASSERT_DEBUG_DEATH
-// when the death is expected to be caused by a DCHECK. Contrary to
-// EXPECT/ASSERT_DEBUG_DEATH however, it doesn't execute the statement in non-
-// dcheck builds as DCHECKs are intended to catch things that should never
-// happen and as such executing the statement results in undefined behavior
-// (|statement| is compiled in unsupported configurations nonetheless).
-// Death tests misbehave on Android.
-#if DCHECK_IS_ON() && defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
-
-// EXPECT/ASSERT_DCHECK_DEATH tests verify that a DCHECK is hit ("Check failed"
-// is part of the error message), but intentionally do not expose the gtest
-// death test's full |regex| parameter to avoid users having to verify the exact
-// syntax of the error message produced by the DCHECK.
-#define EXPECT_DCHECK_DEATH(statement) EXPECT_DEATH(statement, "Check failed")
-#define ASSERT_DCHECK_DEATH(statement) ASSERT_DEATH(statement, "Check failed")
-
-#else
-// DCHECK_IS_ON() && defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
-
-// Macro copied from gtest-death-test-internal.h as it's (1) internal for now
-// and (2) only defined if !GTEST_HAS_DEATH_TEST which is only a subset of the
-// conditions in which it's needed here.
-// TODO(gab): Expose macro in upstream gtest repo for consumers like us that
-// want more specific death tests and remove this hack.
-# define GTEST_UNSUPPORTED_DEATH_TEST(statement, regex, terminator) \
-    GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
-    if (::testing::internal::AlwaysTrue()) { \
-      GTEST_LOG_(WARNING) \
-          << "Death tests are not supported on this platform.\n" \
-          << "Statement '" #statement "' cannot be verified."; \
-    } else if (::testing::internal::AlwaysFalse()) { \
-      ::testing::internal::RE::PartialMatch(".*", (regex)); \
-      GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
-      terminator; \
-    } else \
-      ::testing::Message()
-
-#define EXPECT_DCHECK_DEATH(statement) \
-    GTEST_UNSUPPORTED_DEATH_TEST(statement, "Check failed", )
-#define ASSERT_DCHECK_DEATH(statement) \
-    GTEST_UNSUPPORTED_DEATH_TEST(statement, "Check failed", return)
-
-#endif
-// DCHECK_IS_ON() && defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
-
-namespace base {
-
-class FilePath;
-
-struct TestIdentifier {
-  TestIdentifier();
-  TestIdentifier(const TestIdentifier& other);
-
-  std::string test_case_name;
-  std::string test_name;
-  std::string file;
-  int line;
-};
-
-// Constructs a full test name given a test case name and a test name,
-// e.g. for test case "A" and test name "B" returns "A.B".
-std::string FormatFullTestName(const std::string& test_case_name,
-                               const std::string& test_name);
-
-// Returns the full test name with the "DISABLED_" prefix stripped out.
-// e.g. for the full test names "A.DISABLED_B", "DISABLED_A.B", and
-// "DISABLED_A.DISABLED_B", returns "A.B".
-std::string TestNameWithoutDisabledPrefix(const std::string& full_test_name);
-
-// Returns a vector of gtest-based tests compiled into
-// current executable.
-std::vector<TestIdentifier> GetCompiledInTests();
-
-// Writes the list of gtest-based tests compiled into
-// current executable as a JSON file. Returns true on success.
-bool WriteCompiledInTestsToFile(const FilePath& path) WARN_UNUSED_RESULT;
-
-// Reads the list of gtest-based tests from |path| into |output|.
-// Returns true on success.
-bool ReadTestNamesFromFile(
-    const FilePath& path,
-    std::vector<TestIdentifier>* output) WARN_UNUSED_RESULT;
-
-}  // namespace base
-
-#endif  // BASE_TEST_GTEST_UTIL_H_
diff --git a/base/test/mock_entropy_provider.cc b/base/test/mock_entropy_provider.cc
deleted file mode 100644
index 5ebf19a..0000000
--- a/base/test/mock_entropy_provider.cc
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/test/mock_entropy_provider.h"
-
-namespace base {
-
-MockEntropyProvider::MockEntropyProvider() : entropy_value_(0.5) {}
-MockEntropyProvider::MockEntropyProvider(double entropy_value)
-    : entropy_value_(entropy_value) {}
-MockEntropyProvider::~MockEntropyProvider() {}
-
-double MockEntropyProvider::GetEntropyForTrial(
-    const std::string& trial_name,
-    uint32_t randomization_seed) const {
-  return entropy_value_;
-}
-
-}  // namespace base
diff --git a/base/test/mock_entropy_provider.h b/base/test/mock_entropy_provider.h
deleted file mode 100644
index ca2b4bc..0000000
--- a/base/test/mock_entropy_provider.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TEST_MOCK_ENTROPY_PROVIDER_H_
-#define BASE_TEST_MOCK_ENTROPY_PROVIDER_H_
-
-#include <stdint.h>
-
-#include "base/metrics/field_trial.h"
-
-namespace base {
-
-class MockEntropyProvider : public base::FieldTrial::EntropyProvider {
- public:
-  MockEntropyProvider();
-  explicit MockEntropyProvider(double entropy_value);
-  ~MockEntropyProvider() override;
-
-  // base::FieldTrial::EntropyProvider:
-  double GetEntropyForTrial(const std::string& trial_name,
-                            uint32_t randomization_seed) const override;
-
- private:
-  double entropy_value_;
-
-  DISALLOW_COPY_AND_ASSIGN(MockEntropyProvider);
-};
-
-}  // namespace base
-
-#endif  // BASE_TEST_MOCK_ENTROPY_PROVIDER_H_
diff --git a/base/test/multiprocess_test.cc b/base/test/multiprocess_test.cc
index fcc4d12..de56e7f 100644
--- a/base/test/multiprocess_test.cc
+++ b/base/test/multiprocess_test.cc
@@ -26,19 +26,6 @@
 
   return LaunchProcess(command_line, options);
 }
-
-bool WaitForMultiprocessTestChildExit(const Process& process,
-                                      TimeDelta timeout,
-                                      int* exit_code) {
-  return process.WaitForExitWithTimeout(timeout, exit_code);
-}
-
-bool TerminateMultiProcessTestChild(const Process& process,
-                                    int exit_code,
-                                    bool wait) {
-  return process.Terminate(exit_code, wait);
-}
-
 #endif  // !OS_ANDROID && !__ANDROID__ && !__ANDROID_HOST__
 
 CommandLine GetMultiProcessTestChildBaseCommandLine() {
@@ -52,8 +39,6 @@
 MultiProcessTest::MultiProcessTest() {
 }
 
-// Don't compile on Arc++.
-#if 0
 Process MultiProcessTest::SpawnChild(const std::string& procname) {
   LaunchOptions options;
 #if defined(OS_WIN)
@@ -67,7 +52,6 @@
     const LaunchOptions& options) {
   return SpawnMultiProcessTestChild(procname, MakeCmdLine(procname), options);
 }
-#endif
 
 CommandLine MultiProcessTest::MakeCmdLine(const std::string& procname) {
   CommandLine command_line = GetMultiProcessTestChildBaseCommandLine();
diff --git a/base/test/multiprocess_test.h b/base/test/multiprocess_test.h
index bf96637..ae4c3eb 100644
--- a/base/test/multiprocess_test.h
+++ b/base/test/multiprocess_test.h
@@ -40,7 +40,7 @@
 //     // Do stuff involving |test_child_process| and the child process....
 //
 //     int rv = -1;
-//     ASSERT_TRUE(base::WaitForMultiprocessTestChildExit(test_child_process,
+//     ASSERT_TRUE(test_child_process.WaitForExitWithTimeout(
 //         TestTimeouts::action_timeout(), &rv));
 //     EXPECT_EQ(0, rv);
 //   }
@@ -51,10 +51,6 @@
 //     // Code here runs in a child process....
 //     return 0;
 //   }
-//
-// If you need to terminate the child process, use the
-// TerminateMultiProcessTestChild method to ensure that test will work on
-// Android.
 
 // Spawns a child process and executes the function |procname| declared using
 // |MULTIPROCESS_TEST_MAIN()| or |MULTIPROCESS_TEST_MAIN_WITH_SETUP()|.
@@ -70,17 +66,24 @@
 // may add any flags needed for your child process.
 CommandLine GetMultiProcessTestChildBaseCommandLine();
 
-// Waits for the child process to exit. Returns true if the process exited
-// within |timeout| and sets |exit_code| if non null.
-bool WaitForMultiprocessTestChildExit(const Process& process,
-                                      TimeDelta timeout,
-                                      int* exit_code);
+#if defined(OS_ANDROID)
 
-// Terminates |process| with |exit_code|. If |wait| is true, this call blocks
-// until the process actually terminates.
-bool TerminateMultiProcessTestChild(const Process& process,
-                                    int exit_code,
-                                    bool wait);
+// Enable the alternate test child implementation which support spawning a child
+// after threads have been created. If used, this MUST be the first line of
+// main(). The main function is passed in to avoid a link-time dependency in
+// component builds.
+void InitAndroidMultiProcessTestHelper(int (*main)(int, char**));
+
+// Returns true if the current process is a test child.
+bool AndroidIsChildProcess();
+
+// Wait for a test child to exit if the alternate test child implementation is
+// being used.
+bool AndroidWaitForChildExitWithTimeout(
+    const Process& process, TimeDelta timeout, int* exit_code)
+    WARN_UNUSED_RESULT;
+
+#endif  // defined(OS_ANDROID)
 
 // MultiProcessTest ------------------------------------------------------------
 
diff --git a/base/test/multiprocess_test_android.cc b/base/test/multiprocess_test_android.cc
index c74f013..f58b452 100644
--- a/base/test/multiprocess_test_android.cc
+++ b/base/test/multiprocess_test_android.cc
@@ -4,87 +4,451 @@
 
 #include "base/test/multiprocess_test.h"
 
+#include <errno.h>
 #include <string.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+#include <memory>
+#include <utility>
 #include <vector>
 
-#include "base/android/context_utils.h"
-#include "base/android/jni_android.h"
-#include "base/android/jni_array.h"
-#include "base/android/scoped_java_ref.h"
 #include "base/base_switches.h"
 #include "base/command_line.h"
+#include "base/containers/hash_tables.h"
+#include "base/lazy_instance.h"
 #include "base/logging.h"
-#include "jni/MainReturnCodeResult_jni.h"
-#include "jni/MultiprocessTestClientLauncher_jni.h"
+#include "base/macros.h"
+#include "base/pickle.h"
+#include "base/posix/global_descriptors.h"
+#include "base/posix/unix_domain_socket_linux.h"
+#include "testing/multiprocess_func_list.h"
 
 namespace base {
 
+namespace {
+
+const int kMaxMessageSize = 1024 * 1024;
+const int kFragmentSize = 4096;
+
+// Message sent between parent process and helper child process.
+enum class MessageType : uint32_t {
+  START_REQUEST,
+  START_RESPONSE,
+  WAIT_REQUEST,
+  WAIT_RESPONSE,
+};
+
+struct MessageHeader {
+  uint32_t size;
+  MessageType type;
+};
+
+struct StartProcessRequest {
+  MessageHeader header =
+      {sizeof(StartProcessRequest), MessageType::START_REQUEST};
+
+  uint32_t num_args = 0;
+  uint32_t num_fds = 0;
+};
+
+struct StartProcessResponse {
+  MessageHeader header =
+      {sizeof(StartProcessResponse), MessageType::START_RESPONSE};
+
+  pid_t child_pid;
+};
+
+struct WaitProcessRequest {
+  MessageHeader header =
+      {sizeof(WaitProcessRequest), MessageType::WAIT_REQUEST};
+
+  pid_t pid;
+  uint64_t timeout_ms;
+};
+
+struct WaitProcessResponse {
+  MessageHeader header =
+      {sizeof(WaitProcessResponse), MessageType::WAIT_RESPONSE};
+
+  bool success = false;
+  int32_t exit_code = 0;
+};
+
+// Helper class that implements an alternate test child launcher for
+// multi-process tests. The default implementation doesn't work if the child is
+// launched after starting threads. However, for some tests (i.e. Mojo), this
+// is necessary. This implementation works around that issue by forking a helper
+// process very early in main(), before any real work is done. Then, when a
+// child needs to be spawned, a message is sent to that helper process, which
+// then forks and returns the result to the parent. The forked child then calls
+// main() and things look as though a brand new process has been fork/exec'd.
+class LaunchHelper {
+ public:
+  using MainFunction = int (*)(int, char**);
+
+  LaunchHelper() {}
+
+  // Initialise the alternate test child implementation.
+  void Init(MainFunction main);
+
+  // Starts a child test helper process.
+  Process StartChildTestHelper(const std::string& procname,
+                               const CommandLine& base_command_line,
+                               const LaunchOptions& options);
+
+  // Waits for a child test helper process.
+  bool WaitForChildExitWithTimeout(const Process& process, TimeDelta timeout,
+                                   int* exit_code);
+
+  bool IsReady() const { return child_fd_ != -1; }
+  bool IsChild() const { return is_child_; }
+
+ private:
+  // Wrappers around sendmsg/recvmsg that supports message fragmentation.
+  void Send(int fd, const MessageHeader* msg, const std::vector<int>& fds);
+  ssize_t Recv(int fd, void* buf, std::vector<ScopedFD>* fds);
+
+  // Parent process implementation.
+  void DoParent(int fd);
+  // Helper process implementation.
+  void DoHelper(int fd);
+
+  void StartProcessInHelper(const StartProcessRequest* request,
+                           std::vector<ScopedFD> fds);
+  void WaitForChildInHelper(const WaitProcessRequest* request);
+
+  bool is_child_ = false;
+
+  // Parent vars.
+  int child_fd_ = -1;
+
+  // Helper vars.
+  int parent_fd_ = -1;
+  MainFunction main_ = nullptr;
+
+  DISALLOW_COPY_AND_ASSIGN(LaunchHelper);
+};
+
+void LaunchHelper::Init(MainFunction main) {
+  main_ = main;
+
+  // Create a communication channel between the parent and child launch helper.
+  // fd[0] belongs to the parent, fd[1] belongs to the child.
+  int fds[2] = {-1, -1};
+  int rv = socketpair(AF_UNIX, SOCK_SEQPACKET, 0, fds);
+  PCHECK(rv == 0);
+  CHECK_NE(-1, fds[0]);
+  CHECK_NE(-1, fds[1]);
+
+  pid_t pid = fork();
+  PCHECK(pid >= 0) << "Fork failed";
+  if (pid) {
+    // Parent.
+    rv = close(fds[1]);
+    PCHECK(rv == 0);
+    DoParent(fds[0]);
+  } else {
+    // Helper.
+    rv = close(fds[0]);
+    PCHECK(rv == 0);
+    DoHelper(fds[1]);
+    NOTREACHED();
+    _exit(0);
+  }
+}
+
+void LaunchHelper::Send(
+    int fd, const MessageHeader* msg, const std::vector<int>& fds) {
+  uint32_t bytes_remaining = msg->size;
+  const char* buf = reinterpret_cast<const char*>(msg);
+  while (bytes_remaining) {
+    size_t send_size =
+        (bytes_remaining > kFragmentSize) ? kFragmentSize : bytes_remaining;
+    bool success = UnixDomainSocket::SendMsg(
+        fd, buf, send_size,
+        (bytes_remaining == msg->size) ? fds : std::vector<int>());
+    CHECK(success);
+    bytes_remaining -= send_size;
+    buf += send_size;
+  }
+}
+
+ssize_t LaunchHelper::Recv(int fd, void* buf, std::vector<ScopedFD>* fds) {
+  ssize_t size = UnixDomainSocket::RecvMsg(fd, buf, kFragmentSize, fds);
+  if (size <= 0)
+    return size;
+
+  const MessageHeader* header = reinterpret_cast<const MessageHeader*>(buf);
+  CHECK(header->size < kMaxMessageSize);
+  uint32_t bytes_remaining = header->size - size;
+  char* buffer = reinterpret_cast<char*>(buf);
+  buffer += size;
+  while (bytes_remaining) {
+    std::vector<ScopedFD> dummy_fds;
+    size = UnixDomainSocket::RecvMsg(fd, buffer, kFragmentSize, &dummy_fds);
+    if (size <= 0)
+      return size;
+
+    CHECK(dummy_fds.empty());
+    CHECK(size == kFragmentSize ||
+          static_cast<size_t>(size) == bytes_remaining);
+    bytes_remaining -= size;
+    buffer += size;
+  }
+  return header->size;
+}
+
+void LaunchHelper::DoParent(int fd) {
+  child_fd_ = fd;
+}
+
+void LaunchHelper::DoHelper(int fd) {
+  parent_fd_ = fd;
+  is_child_ = true;
+  std::unique_ptr<char[]> buf(new char[kMaxMessageSize]);
+  while (true) {
+    // Wait for a message from the parent.
+    std::vector<ScopedFD> fds;
+    ssize_t size = Recv(parent_fd_, buf.get(), &fds);
+    if (size == 0 || (size < 0 && errno == ECONNRESET)) {
+      _exit(0);
+    }
+    PCHECK(size > 0);
+
+    const MessageHeader* header =
+        reinterpret_cast<const MessageHeader*>(buf.get());
+    CHECK_EQ(static_cast<ssize_t>(header->size), size);
+    switch (header->type) {
+      case MessageType::START_REQUEST:
+        StartProcessInHelper(
+            reinterpret_cast<const StartProcessRequest*>(buf.get()),
+            std::move(fds));
+        break;
+      case MessageType::WAIT_REQUEST:
+        WaitForChildInHelper(
+            reinterpret_cast<const WaitProcessRequest*>(buf.get()));
+        break;
+      default:
+        LOG(FATAL) << "Unsupported message type: "
+                   << static_cast<uint32_t>(header->type);
+    }
+  }
+}
+
+void LaunchHelper::StartProcessInHelper(const StartProcessRequest* request,
+                                        std::vector<ScopedFD> fds) {
+  pid_t pid = fork();
+  PCHECK(pid >= 0) << "Fork failed";
+  if (pid) {
+    // Helper.
+    StartProcessResponse resp;
+    resp.child_pid = pid;
+    Send(parent_fd_, reinterpret_cast<const MessageHeader*>(&resp),
+         std::vector<int>());
+  } else {
+    // Child.
+    PCHECK(close(parent_fd_) == 0);
+    parent_fd_ = -1;
+    CommandLine::Reset();
+
+    Pickle serialised_extra(reinterpret_cast<const char*>(request + 1),
+                            request->header.size - sizeof(StartProcessRequest));
+    PickleIterator iter(serialised_extra);
+    std::vector<std::string> args;
+    for (size_t i = 0; i < request->num_args; i++) {
+      std::string arg;
+      CHECK(iter.ReadString(&arg));
+      args.push_back(std::move(arg));
+    }
+
+    CHECK_EQ(request->num_fds, fds.size());
+    for (size_t i = 0; i < request->num_fds; i++) {
+      int new_fd;
+      CHECK(iter.ReadInt(&new_fd));
+      int old_fd = fds[i].release();
+      if (new_fd != old_fd) {
+        if (dup2(old_fd, new_fd) < 0) {
+          PLOG(FATAL) << "dup2";
+        }
+        PCHECK(close(old_fd) == 0);
+      }
+    }
+
+    // argv has argc+1 elements, where the last element is NULL.
+    std::unique_ptr<char*[]> argv(new char*[args.size() + 1]);
+    for (size_t i = 0; i < args.size(); i++) {
+      argv[i] = const_cast<char*>(args[i].c_str());
+    }
+    argv[args.size()] = nullptr;
+    _exit(main_(args.size(), argv.get()));
+    NOTREACHED();
+  }
+}
+
+void LaunchHelper::WaitForChildInHelper(const WaitProcessRequest* request) {
+  Process process(request->pid);
+  TimeDelta timeout = TimeDelta::FromMilliseconds(request->timeout_ms);
+  int exit_code = -1;
+  bool success = process.WaitForExitWithTimeout(timeout, &exit_code);
+
+  WaitProcessResponse resp;
+  resp.exit_code = exit_code;
+  resp.success = success;
+  Send(parent_fd_, reinterpret_cast<const MessageHeader*>(&resp),
+       std::vector<int>());
+}
+
+Process LaunchHelper::StartChildTestHelper(const std::string& procname,
+                                           const CommandLine& base_command_line,
+                                           const LaunchOptions& options) {
+
+  CommandLine command_line(base_command_line);
+  if (!command_line.HasSwitch(switches::kTestChildProcess))
+    command_line.AppendSwitchASCII(switches::kTestChildProcess, procname);
+
+  StartProcessRequest request;
+  Pickle serialised_extra;
+  const CommandLine::StringVector& argv = command_line.argv();
+  for (const auto& arg : argv)
+    CHECK(serialised_extra.WriteString(arg));
+  request.num_args = argv.size();
+
+  std::vector<int> fds_to_send;
+  if (options.fds_to_remap) {
+    for (auto p : *options.fds_to_remap) {
+      CHECK(serialised_extra.WriteInt(p.second));
+      fds_to_send.push_back(p.first);
+    }
+    request.num_fds = options.fds_to_remap->size();
+  }
+
+  size_t buf_size = sizeof(StartProcessRequest) + serialised_extra.size();
+  request.header.size = buf_size;
+  std::unique_ptr<char[]> buffer(new char[buf_size]);
+  memcpy(buffer.get(), &request, sizeof(StartProcessRequest));
+  memcpy(buffer.get() + sizeof(StartProcessRequest), serialised_extra.data(),
+         serialised_extra.size());
+
+  // Send start message.
+  Send(child_fd_, reinterpret_cast<const MessageHeader*>(buffer.get()),
+       fds_to_send);
+
+  // Synchronously get response.
+  StartProcessResponse response;
+  std::vector<ScopedFD> recv_fds;
+  ssize_t resp_size = Recv(child_fd_, &response, &recv_fds);
+  PCHECK(resp_size == sizeof(StartProcessResponse));
+
+  return Process(response.child_pid);
+}
+
+bool LaunchHelper::WaitForChildExitWithTimeout(
+    const Process& process, TimeDelta timeout, int* exit_code) {
+
+  WaitProcessRequest request;
+  request.pid = process.Handle();
+  request.timeout_ms = timeout.InMilliseconds();
+
+  Send(child_fd_, reinterpret_cast<const MessageHeader*>(&request),
+       std::vector<int>());
+
+  WaitProcessResponse response;
+  std::vector<ScopedFD> recv_fds;
+  ssize_t resp_size = Recv(child_fd_, &response, &recv_fds);
+  PCHECK(resp_size == sizeof(WaitProcessResponse));
+
+  if (!response.success)
+    return false;
+
+  *exit_code = response.exit_code;
+  return true;
+}
+
+LazyInstance<LaunchHelper>::Leaky g_launch_helper;
+
+}  // namespace
+
+void InitAndroidMultiProcessTestHelper(int (*main)(int, char**)) {
+  DCHECK(main);
+  // Don't allow child processes to themselves create new child processes.
+  if (g_launch_helper.Get().IsChild())
+    return;
+  g_launch_helper.Get().Init(main);
+}
+
+bool AndroidIsChildProcess() {
+  return g_launch_helper.Get().IsChild();
+}
+
+bool AndroidWaitForChildExitWithTimeout(
+    const Process& process, TimeDelta timeout, int* exit_code) {
+  CHECK(g_launch_helper.Get().IsReady());
+  return g_launch_helper.Get().WaitForChildExitWithTimeout(
+      process, timeout, exit_code);
+}
+
 // A very basic implementation for Android. On Android tests can run in an APK
 // and we don't have an executable to exec*. This implementation does the bare
 // minimum to execute the method specified by procname (in the child process).
 //  - All options except |fds_to_remap| are ignored.
-//
-// NOTE: This MUST NOT run on the main thread of the NativeTest application.
 Process SpawnMultiProcessTestChild(const std::string& procname,
                                    const CommandLine& base_command_line,
                                    const LaunchOptions& options) {
-  JNIEnv* env = android::AttachCurrentThread();
-  DCHECK(env);
+  if (g_launch_helper.Get().IsReady()) {
+    return g_launch_helper.Get().StartChildTestHelper(
+        procname, base_command_line, options);
+  }
 
-  std::vector<int> fd_keys;
-  std::vector<int> fd_fds;
-  if (options.fds_to_remap) {
-    for (auto& iter : *options.fds_to_remap) {
-      fd_keys.push_back(iter.second);
-      fd_fds.push_back(iter.first);
+  // TODO(viettrungluu): The FD-remapping done below is wrong in the presence of
+  // cycles (e.g., fd1 -> fd2, fd2 -> fd1). crbug.com/326576
+  FileHandleMappingVector empty;
+  const FileHandleMappingVector* fds_to_remap =
+      options.fds_to_remap ? options.fds_to_remap : &empty;
+
+  pid_t pid = fork();
+
+  if (pid < 0) {
+    PLOG(ERROR) << "fork";
+    return Process();
+  }
+  if (pid > 0) {
+    // Parent process.
+    return Process(pid);
+  }
+  // Child process.
+  base::hash_set<int> fds_to_keep_open;
+  for (FileHandleMappingVector::const_iterator it = fds_to_remap->begin();
+       it != fds_to_remap->end(); ++it) {
+    fds_to_keep_open.insert(it->first);
+  }
+  // Keep standard FDs (stdin, stdout, stderr, etc.) open since this
+  // is not meant to spawn a daemon.
+  int base = GlobalDescriptors::kBaseDescriptor;
+  for (int fd = base; fd < sysconf(_SC_OPEN_MAX); ++fd) {
+    if (fds_to_keep_open.find(fd) == fds_to_keep_open.end()) {
+      close(fd);
     }
   }
-
-  android::ScopedJavaLocalRef<jobjectArray> fds =
-      android::Java_MultiprocessTestClientLauncher_makeFdInfoArray(
-          env, base::android::ToJavaIntArray(env, fd_keys),
-          base::android::ToJavaIntArray(env, fd_fds));
-
-  CommandLine command_line(base_command_line);
-  if (!command_line.HasSwitch(switches::kTestChildProcess)) {
-    command_line.AppendSwitchASCII(switches::kTestChildProcess, procname);
+  for (FileHandleMappingVector::const_iterator it = fds_to_remap->begin();
+       it != fds_to_remap->end(); ++it) {
+    int old_fd = it->first;
+    int new_fd = it->second;
+    if (dup2(old_fd, new_fd) < 0) {
+      PLOG(FATAL) << "dup2";
+    }
+    close(old_fd);
   }
+  CommandLine::Reset();
+  CommandLine::Init(0, nullptr);
+  CommandLine* command_line = CommandLine::ForCurrentProcess();
+  command_line->InitFromArgv(base_command_line.argv());
+  if (!command_line->HasSwitch(switches::kTestChildProcess))
+    command_line->AppendSwitchASCII(switches::kTestChildProcess, procname);
 
-  android::ScopedJavaLocalRef<jobjectArray> j_argv =
-      android::ToJavaArrayOfStrings(env, command_line.argv());
-  jint pid = android::Java_MultiprocessTestClientLauncher_launchClient(
-      env, android::GetApplicationContext(), j_argv, fds);
-  return Process(pid);
-}
-
-bool WaitForMultiprocessTestChildExit(const Process& process,
-                                      TimeDelta timeout,
-                                      int* exit_code) {
-  JNIEnv* env = android::AttachCurrentThread();
-  DCHECK(env);
-
-  base::android::ScopedJavaLocalRef<jobject> result_code =
-      android::Java_MultiprocessTestClientLauncher_waitForMainToReturn(
-          env, android::GetApplicationContext(), process.Pid(),
-          static_cast<int32_t>(timeout.InMilliseconds()));
-  if (result_code.is_null() ||
-      Java_MainReturnCodeResult_hasTimedOut(env, result_code)) {
-    return false;
-  }
-  if (exit_code) {
-    *exit_code = Java_MainReturnCodeResult_getReturnCode(env, result_code);
-  }
-  return true;
-}
-
-bool TerminateMultiProcessTestChild(const Process& process,
-                                    int exit_code,
-                                    bool wait) {
-  JNIEnv* env = android::AttachCurrentThread();
-  DCHECK(env);
-
-  return android::Java_MultiprocessTestClientLauncher_terminate(
-      env, android::GetApplicationContext(), process.Pid(), exit_code, wait);
+  _exit(multi_process_function_list::InvokeChildProcessTest(procname));
+  return Process();
 }
 
 }  // namespace base
diff --git a/base/test/opaque_ref_counted.cc b/base/test/opaque_ref_counted.cc
index 36253e5..ed6c36f 100644
--- a/base/test/opaque_ref_counted.cc
+++ b/base/test/opaque_ref_counted.cc
@@ -11,31 +11,17 @@
 
 class OpaqueRefCounted : public RefCounted<OpaqueRefCounted> {
  public:
-  OpaqueRefCounted() = default;
+  OpaqueRefCounted() {}
 
   int Return42() { return 42; }
 
  private:
-  friend class RefCounted<OpaqueRefCounted>;
-  ~OpaqueRefCounted() = default;
+  virtual ~OpaqueRefCounted() {}
 
+  friend RefCounted<OpaqueRefCounted>;
   DISALLOW_COPY_AND_ASSIGN(OpaqueRefCounted);
 };
 
-class OpaqueRefCountedThreadSafe
-    : public RefCounted<OpaqueRefCountedThreadSafe> {
- public:
-  OpaqueRefCountedThreadSafe() = default;
-
-  int Return42() { return 42; }
-
- private:
-  friend class RefCounted<OpaqueRefCountedThreadSafe>;
-  ~OpaqueRefCountedThreadSafe() = default;
-
-  DISALLOW_COPY_AND_ASSIGN(OpaqueRefCountedThreadSafe);
-};
-
 scoped_refptr<OpaqueRefCounted> MakeOpaqueRefCounted() {
   return new OpaqueRefCounted();
 }
@@ -44,16 +30,6 @@
   EXPECT_EQ(42, p->Return42());
 }
 
-scoped_refptr<OpaqueRefCountedThreadSafe> MakeOpaqueRefCountedThreadSafe() {
-  return new OpaqueRefCountedThreadSafe();
-}
-
-void TestOpaqueRefCountedThreadSafe(
-    scoped_refptr<OpaqueRefCountedThreadSafe> p) {
-  EXPECT_EQ(42, p->Return42());
-}
-
 }  // namespace base
 
 template class scoped_refptr<base::OpaqueRefCounted>;
-template class scoped_refptr<base::OpaqueRefCountedThreadSafe>;
diff --git a/base/test/opaque_ref_counted.h b/base/test/opaque_ref_counted.h
index c0ddc87..faf6a65 100644
--- a/base/test/opaque_ref_counted.h
+++ b/base/test/opaque_ref_counted.h
@@ -12,18 +12,13 @@
 // OpaqueRefCounted is a test class for scoped_refptr to ensure it still works
 // when the pointed-to type is opaque (i.e., incomplete).
 class OpaqueRefCounted;
-class OpaqueRefCountedThreadSafe;
 
 // Test functions that return and accept scoped_refptr<OpaqueRefCounted> values.
 scoped_refptr<OpaqueRefCounted> MakeOpaqueRefCounted();
 void TestOpaqueRefCounted(scoped_refptr<OpaqueRefCounted> p);
-scoped_refptr<OpaqueRefCountedThreadSafe> MakeOpaqueRefCountedThreadSafe();
-void TestOpaqueRefCountedThreadSafe(
-    scoped_refptr<OpaqueRefCountedThreadSafe> p);
 
 }  // namespace base
 
 extern template class scoped_refptr<base::OpaqueRefCounted>;
-extern template class scoped_refptr<base::OpaqueRefCountedThreadSafe>;
 
 #endif  // BASE_TEST_OPAQUE_REF_COUNTED_H_
diff --git a/base/test/scoped_feature_list.cc b/base/test/scoped_feature_list.cc
deleted file mode 100644
index f0f3f4e..0000000
--- a/base/test/scoped_feature_list.cc
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/test/scoped_feature_list.h"
-
-#include <string>
-
-namespace base {
-namespace test {
-
-namespace {
-
-static std::string GetFeatureString(
-    const std::initializer_list<base::Feature>& features) {
-  std::string output;
-  for (const base::Feature& feature : features) {
-    if (!output.empty())
-      output += ",";
-    output += feature.name;
-  }
-  return output;
-}
-
-}  // namespace
-
-ScopedFeatureList::ScopedFeatureList() {}
-
-ScopedFeatureList::~ScopedFeatureList() {
-  if (original_feature_list_) {
-    base::FeatureList::ClearInstanceForTesting();
-    base::FeatureList::RestoreInstanceForTesting(
-        std::move(original_feature_list_));
-  }
-}
-
-void ScopedFeatureList::Init() {
-  std::unique_ptr<base::FeatureList> feature_list(new base::FeatureList);
-  feature_list->InitializeFromCommandLine(std::string(), std::string());
-  InitWithFeatureList(std::move(feature_list));
-}
-
-void ScopedFeatureList::InitWithFeatures(
-    const std::initializer_list<base::Feature>& enabled_features,
-    const std::initializer_list<base::Feature>& disabled_features) {
-  InitFromCommandLine(GetFeatureString(enabled_features),
-                      GetFeatureString(disabled_features));
-}
-
-void ScopedFeatureList::InitWithFeatureList(
-    std::unique_ptr<FeatureList> feature_list) {
-  DCHECK(!original_feature_list_);
-  original_feature_list_ = base::FeatureList::ClearInstanceForTesting();
-  base::FeatureList::SetInstance(std::move(feature_list));
-}
-
-void ScopedFeatureList::InitFromCommandLine(
-    const std::string& enable_features,
-    const std::string& disable_features) {
-  std::unique_ptr<base::FeatureList> feature_list(new base::FeatureList);
-  feature_list->InitializeFromCommandLine(enable_features, disable_features);
-  InitWithFeatureList(std::move(feature_list));
-}
-
-void ScopedFeatureList::InitAndEnableFeature(const base::Feature& feature) {
-  InitFromCommandLine(feature.name, std::string());
-}
-
-void ScopedFeatureList::InitAndDisableFeature(const base::Feature& feature) {
-  InitFromCommandLine(std::string(), feature.name);
-}
-
-}  // namespace test
-}  // namespace base
diff --git a/base/test/scoped_feature_list.h b/base/test/scoped_feature_list.h
deleted file mode 100644
index 99e07f5..0000000
--- a/base/test/scoped_feature_list.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TEST_SCOPED_FEATURE_LIST_H_
-#define BASE_TEST_SCOPED_FEATURE_LIST_H_
-
-#include <initializer_list>
-
-#include "base/feature_list.h"
-
-namespace base {
-namespace test {
-
-// ScopedFeatureList resets the global FeatureList instance to a new empty
-// instance and restores the original instance upon destruction.
-// Note: Re-using the same object is not allowed. To reset the feature
-// list and initialize it anew, destroy an existing scoped list and init
-// a new one.
-class ScopedFeatureList final {
- public:
-  ScopedFeatureList();
-  ~ScopedFeatureList();
-
-  // Initializes and registers a FeatureList instance with no overrides.
-  void Init();
-
-  // Initializes and registers the given FeatureList instance.
-  void InitWithFeatureList(std::unique_ptr<FeatureList> feature_list);
-
-  // Initializes and registers a FeatureList instance with the given enabled
-  // and disabled features.
-  void InitWithFeatures(
-      const std::initializer_list<base::Feature>& enabled_features,
-      const std::initializer_list<base::Feature>& disabled_features);
-
-  // Initializes and registers a FeatureList instance with the given
-  // enabled and disabled features (comma-separated names).
-  void InitFromCommandLine(const std::string& enable_features,
-                           const std::string& disable_features);
-
-  // Initializes and registers a FeatureList instance enabling a single
-  // feature.
-  void InitAndEnableFeature(const base::Feature& feature);
-
-  // Initializes and registers a FeatureList instance disabling a single
-  // feature.
-  void InitAndDisableFeature(const base::Feature& feature);
-
- private:
-  std::unique_ptr<FeatureList> original_feature_list_;
-
-  DISALLOW_COPY_AND_ASSIGN(ScopedFeatureList);
-};
-
-}  // namespace test
-}  // namespace base
-
-#endif  // BASE_TEST_SCOPED_FEATURE_LIST_H_
diff --git a/base/test/sequenced_worker_pool_owner.cc b/base/test/sequenced_worker_pool_owner.cc
index 324d071..8781495 100644
--- a/base/test/sequenced_worker_pool_owner.cc
+++ b/base/test/sequenced_worker_pool_owner.cc
@@ -14,10 +14,7 @@
     size_t max_threads,
     const std::string& thread_name_prefix)
     : constructor_message_loop_(MessageLoop::current()),
-      pool_(new SequencedWorkerPool(max_threads,
-                                    thread_name_prefix,
-                                    TaskPriority::USER_VISIBLE,
-                                    this)),
+      pool_(new SequencedWorkerPool(max_threads, thread_name_prefix, this)),
       has_work_call_count_(0) {}
 
 SequencedWorkerPoolOwner::~SequencedWorkerPoolOwner() {
@@ -28,8 +25,7 @@
   exit_loop_.Run();
 }
 
-const scoped_refptr<SequencedWorkerPool>& SequencedWorkerPoolOwner::pool()
-    const {
+const scoped_refptr<SequencedWorkerPool>& SequencedWorkerPoolOwner::pool() {
   return pool_;
 }
 
diff --git a/base/test/sequenced_worker_pool_owner.h b/base/test/sequenced_worker_pool_owner.h
index 28a6cf0..05fc750 100644
--- a/base/test/sequenced_worker_pool_owner.h
+++ b/base/test/sequenced_worker_pool_owner.h
@@ -39,7 +39,7 @@
   ~SequencedWorkerPoolOwner() override;
 
   // Don't change the returned pool's testing observer.
-  const scoped_refptr<SequencedWorkerPool>& pool() const;
+  const scoped_refptr<SequencedWorkerPool>& pool();
 
   // The given callback will be called on WillWaitForShutdown().
   void SetWillWaitForShutdownCallback(const Closure& callback);
diff --git a/base/test/test_file_util.h b/base/test/test_file_util.h
index d9172d7..7042e48 100644
--- a/base/test/test_file_util.h
+++ b/base/test/test_file_util.h
@@ -20,10 +20,6 @@
 #include <jni.h>
 #endif
 
-#if defined(OS_WIN)
-#include <windows.h>
-#endif
-
 namespace base {
 
 class FilePath;
@@ -44,12 +40,14 @@
 bool EvictFileFromSystemCache(const FilePath& file);
 
 #if defined(OS_WIN)
-// Deny |permission| on the file |path| for the current user. |permission| is an
-// ACCESS_MASK structure which is defined in
-// https://msdn.microsoft.com/en-us/library/windows/desktop/aa374892.aspx
-// Refer to https://msdn.microsoft.com/en-us/library/aa822867.aspx for a list of
-// possible values.
-bool DenyFilePermission(const FilePath& path, DWORD permission);
+// Returns true if the volume supports Alternate Data Streams.
+bool VolumeSupportsADS(const FilePath& path);
+
+// Returns true if the ZoneIdentifier is correctly set to "Internet" (3).
+// Note that this function must be called from the same process as
+// the one that set the zone identifier.  I.e. don't use it in UI/automation
+// based tests.
+bool HasInternetZoneIdentifier(const FilePath& full_path);
 #endif  // defined(OS_WIN)
 
 // For testing, make the file unreadable or unwritable.
@@ -72,6 +70,9 @@
 };
 
 #if defined(OS_ANDROID)
+// Register the ContentUriTestUrils JNI bindings.
+bool RegisterContentUriTestUtils(JNIEnv* env);
+
 // Insert an image file into the MediaStore, and retrieve the content URI for
 // testing purpose.
 FilePath InsertImageIntoMediaStore(const FilePath& path);
diff --git a/base/test/test_io_thread.cc b/base/test/test_io_thread.cc
index ce4a8d1..1fa0412 100644
--- a/base/test/test_io_thread.cc
+++ b/base/test/test_io_thread.cc
@@ -4,7 +4,19 @@
 
 #include "base/test/test_io_thread.h"
 
-#include "base/logging.h"
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/synchronization/waitable_event.h"
+
+namespace {
+
+void PostTaskAndWaitHelper(base::WaitableEvent* event,
+                           const base::Closure& task) {
+  task.Run();
+  event->Signal();
+}
+
+}  // namespace
 
 namespace base {
 
@@ -42,4 +54,13 @@
   task_runner()->PostTask(from_here, task);
 }
 
+void TestIOThread::PostTaskAndWait(const tracked_objects::Location& from_here,
+                                   const base::Closure& task) {
+  base::WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                            WaitableEvent::InitialState::NOT_SIGNALED);
+  task_runner()->PostTask(from_here,
+                          base::Bind(&PostTaskAndWaitHelper, &event, task));
+  event.Wait();
+}
+
 }  // namespace base
diff --git a/base/test/test_io_thread.h b/base/test/test_io_thread.h
index 5d3885e..c2ed187 100644
--- a/base/test/test_io_thread.h
+++ b/base/test/test_io_thread.h
@@ -18,13 +18,6 @@
 // Create and run an IO thread with a MessageLoop, and
 // making the MessageLoop accessible from its client.
 // It also provides some ideomatic API like PostTaskAndWait().
-//
-// This API is not thread-safe:
-//   - Start()/Stop() should only be called from the main (creation) thread.
-//   - PostTask()/message_loop()/task_runner() are also safe to call from the
-//     underlying thread itself (to post tasks from other threads: get the
-//     task_runner() from the main thread first, it is then safe to pass _it_
-//     around).
 class TestIOThread {
  public:
   enum Mode { kAutoStart, kManualStart };
@@ -32,14 +25,19 @@
   // Stops the I/O thread if necessary.
   ~TestIOThread();
 
-  // After Stop(), Start() may be called again to start a new I/O thread.
-  // Stop() may be called even when the I/O thread is not started.
+  // |Start()|/|Stop()| should only be called from the main (creation) thread.
+  // After |Stop()|, |Start()| may be called again to start a new I/O thread.
+  // |Stop()| may be called even when the I/O thread is not started.
   void Start();
   void Stop();
 
   // Post |task| to the IO thread.
   void PostTask(const tracked_objects::Location& from_here,
                 const base::Closure& task);
+  // Posts |task| to the IO-thread with an WaitableEvent associated blocks on
+  // it until the posted |task| is executed, then returns.
+  void PostTaskAndWait(const tracked_objects::Location& from_here,
+                       const base::Closure& task);
 
   base::MessageLoopForIO* message_loop() {
     return static_cast<base::MessageLoopForIO*>(io_thread_.message_loop());
diff --git a/base/test/test_mock_time_task_runner.cc b/base/test/test_mock_time_task_runner.cc
deleted file mode 100644
index f4bd724..0000000
--- a/base/test/test_mock_time_task_runner.cc
+++ /dev/null
@@ -1,321 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/test/test_mock_time_task_runner.h"
-
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/memory/ptr_util.h"
-#include "base/memory/ref_counted.h"
-#include "base/threading/thread_task_runner_handle.h"
-#include "base/time/clock.h"
-#include "base/time/tick_clock.h"
-
-namespace base {
-
-namespace {
-
-// MockTickClock --------------------------------------------------------------
-
-// TickClock that always returns the then-current mock time ticks of
-// |task_runner| as the current time ticks.
-class MockTickClock : public TickClock {
- public:
-  explicit MockTickClock(
-      scoped_refptr<const TestMockTimeTaskRunner> task_runner);
-
-  // TickClock:
-  TimeTicks NowTicks() override;
-
- private:
-  scoped_refptr<const TestMockTimeTaskRunner> task_runner_;
-
-  DISALLOW_COPY_AND_ASSIGN(MockTickClock);
-};
-
-MockTickClock::MockTickClock(
-    scoped_refptr<const TestMockTimeTaskRunner> task_runner)
-    : task_runner_(task_runner) {
-}
-
-TimeTicks MockTickClock::NowTicks() {
-  return task_runner_->NowTicks();
-}
-
-// MockClock ------------------------------------------------------------------
-
-// Clock that always returns the then-current mock time of |task_runner| as the
-// current time.
-class MockClock : public Clock {
- public:
-  explicit MockClock(scoped_refptr<const TestMockTimeTaskRunner> task_runner);
-
-  // Clock:
-  Time Now() override;
-
- private:
-  scoped_refptr<const TestMockTimeTaskRunner> task_runner_;
-
-  DISALLOW_COPY_AND_ASSIGN(MockClock);
-};
-
-MockClock::MockClock(scoped_refptr<const TestMockTimeTaskRunner> task_runner)
-    : task_runner_(task_runner) {
-}
-
-Time MockClock::Now() {
-  return task_runner_->Now();
-}
-
-}  // namespace
-
-// TestMockTimeTaskRunner::TestOrderedPendingTask -----------------------------
-
-// Subclass of TestPendingTask which has a strictly monotonically increasing ID
-// for every task, so that tasks posted with the same 'time to run' can be run
-// in the order of being posted.
-struct TestMockTimeTaskRunner::TestOrderedPendingTask
-    : public base::TestPendingTask {
-  TestOrderedPendingTask();
-  TestOrderedPendingTask(const tracked_objects::Location& location,
-                         const Closure& task,
-                         TimeTicks post_time,
-                         TimeDelta delay,
-                         size_t ordinal,
-                         TestNestability nestability);
-  TestOrderedPendingTask(TestOrderedPendingTask&&);
-  ~TestOrderedPendingTask();
-
-  TestOrderedPendingTask& operator=(TestOrderedPendingTask&&);
-
-  size_t ordinal;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(TestOrderedPendingTask);
-};
-
-TestMockTimeTaskRunner::TestOrderedPendingTask::TestOrderedPendingTask()
-    : ordinal(0) {
-}
-
-TestMockTimeTaskRunner::TestOrderedPendingTask::TestOrderedPendingTask(
-    TestOrderedPendingTask&&) = default;
-
-TestMockTimeTaskRunner::TestOrderedPendingTask::TestOrderedPendingTask(
-    const tracked_objects::Location& location,
-    const Closure& task,
-    TimeTicks post_time,
-    TimeDelta delay,
-    size_t ordinal,
-    TestNestability nestability)
-    : base::TestPendingTask(location, task, post_time, delay, nestability),
-      ordinal(ordinal) {}
-
-TestMockTimeTaskRunner::TestOrderedPendingTask::~TestOrderedPendingTask() {
-}
-
-TestMockTimeTaskRunner::TestOrderedPendingTask&
-TestMockTimeTaskRunner::TestOrderedPendingTask::operator=(
-    TestOrderedPendingTask&&) = default;
-
-// TestMockTimeTaskRunner -----------------------------------------------------
-
-// TODO(gab): This should also set the SequenceToken for the current thread.
-// Ref. TestMockTimeTaskRunner::RunsTasksOnCurrentThread().
-TestMockTimeTaskRunner::ScopedContext::ScopedContext(
-    scoped_refptr<TestMockTimeTaskRunner> scope)
-    : on_destroy_(ThreadTaskRunnerHandle::OverrideForTesting(scope)) {
-  scope->RunUntilIdle();
-}
-
-TestMockTimeTaskRunner::ScopedContext::~ScopedContext() = default;
-
-bool TestMockTimeTaskRunner::TemporalOrder::operator()(
-    const TestOrderedPendingTask& first_task,
-    const TestOrderedPendingTask& second_task) const {
-  if (first_task.GetTimeToRun() == second_task.GetTimeToRun())
-    return first_task.ordinal > second_task.ordinal;
-  return first_task.GetTimeToRun() > second_task.GetTimeToRun();
-}
-
-TestMockTimeTaskRunner::TestMockTimeTaskRunner()
-    : now_(Time::UnixEpoch()), next_task_ordinal_(0) {
-}
-
-TestMockTimeTaskRunner::TestMockTimeTaskRunner(Time start_time,
-                                               TimeTicks start_ticks)
-    : now_(Time::UnixEpoch()), now_ticks_(start_ticks), next_task_ordinal_(0) {}
-
-TestMockTimeTaskRunner::~TestMockTimeTaskRunner() {
-}
-
-void TestMockTimeTaskRunner::FastForwardBy(TimeDelta delta) {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  DCHECK_GE(delta, TimeDelta());
-
-  const TimeTicks original_now_ticks = now_ticks_;
-  ProcessAllTasksNoLaterThan(delta);
-  ForwardClocksUntilTickTime(original_now_ticks + delta);
-}
-
-void TestMockTimeTaskRunner::RunUntilIdle() {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  ProcessAllTasksNoLaterThan(TimeDelta());
-}
-
-void TestMockTimeTaskRunner::FastForwardUntilNoTasksRemain() {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  ProcessAllTasksNoLaterThan(TimeDelta::Max());
-}
-
-void TestMockTimeTaskRunner::ClearPendingTasks() {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  AutoLock scoped_lock(tasks_lock_);
-  while (!tasks_.empty())
-    tasks_.pop();
-}
-
-Time TestMockTimeTaskRunner::Now() const {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  return now_;
-}
-
-TimeTicks TestMockTimeTaskRunner::NowTicks() const {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  return now_ticks_;
-}
-
-std::unique_ptr<Clock> TestMockTimeTaskRunner::GetMockClock() const {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  return MakeUnique<MockClock>(this);
-}
-
-std::unique_ptr<TickClock> TestMockTimeTaskRunner::GetMockTickClock() const {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  return MakeUnique<MockTickClock>(this);
-}
-
-std::deque<TestPendingTask> TestMockTimeTaskRunner::TakePendingTasks() {
-  AutoLock scoped_lock(tasks_lock_);
-  std::deque<TestPendingTask> tasks;
-  while (!tasks_.empty()) {
-    // It's safe to remove const and consume |task| here, since |task| is not
-    // used for ordering the item.
-    tasks.push_back(
-        std::move(const_cast<TestOrderedPendingTask&>(tasks_.top())));
-    tasks_.pop();
-  }
-  return tasks;
-}
-
-bool TestMockTimeTaskRunner::HasPendingTask() const {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  return !tasks_.empty();
-}
-
-size_t TestMockTimeTaskRunner::GetPendingTaskCount() const {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  return tasks_.size();
-}
-
-TimeDelta TestMockTimeTaskRunner::NextPendingTaskDelay() const {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  return tasks_.empty() ? TimeDelta::Max()
-                        : tasks_.top().GetTimeToRun() - now_ticks_;
-}
-
-// TODO(gab): Combine |thread_checker_| with a SequenceToken to differentiate
-// between tasks running in the scope of this TestMockTimeTaskRunner and other
-// task runners sharing this thread. http://crbug.com/631186
-bool TestMockTimeTaskRunner::RunsTasksOnCurrentThread() const {
-  return thread_checker_.CalledOnValidThread();
-}
-
-bool TestMockTimeTaskRunner::PostDelayedTask(
-    const tracked_objects::Location& from_here,
-    const Closure& task,
-    TimeDelta delay) {
-  AutoLock scoped_lock(tasks_lock_);
-  tasks_.push(TestOrderedPendingTask(from_here, task, now_ticks_, delay,
-                                     next_task_ordinal_++,
-                                     TestPendingTask::NESTABLE));
-  return true;
-}
-
-bool TestMockTimeTaskRunner::PostNonNestableDelayedTask(
-    const tracked_objects::Location& from_here,
-    const Closure& task,
-    TimeDelta delay) {
-  return PostDelayedTask(from_here, task, delay);
-}
-
-bool TestMockTimeTaskRunner::IsElapsingStopped() {
-  return false;
-}
-
-void TestMockTimeTaskRunner::OnBeforeSelectingTask() {
-  // Empty default implementation.
-}
-
-void TestMockTimeTaskRunner::OnAfterTimePassed() {
-  // Empty default implementation.
-}
-
-void TestMockTimeTaskRunner::OnAfterTaskRun() {
-  // Empty default implementation.
-}
-
-void TestMockTimeTaskRunner::ProcessAllTasksNoLaterThan(TimeDelta max_delta) {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  DCHECK_GE(max_delta, TimeDelta());
-
-  // Multiple test task runners can share the same thread for determinism in
-  // unit tests. Make sure this TestMockTimeTaskRunner's tasks run in its scope.
-  ScopedClosureRunner undo_override;
-  if (!ThreadTaskRunnerHandle::IsSet() ||
-      ThreadTaskRunnerHandle::Get() != this) {
-    undo_override = ThreadTaskRunnerHandle::OverrideForTesting(this);
-  }
-
-  const TimeTicks original_now_ticks = now_ticks_;
-  while (!IsElapsingStopped()) {
-    OnBeforeSelectingTask();
-    TestPendingTask task_info;
-    if (!DequeueNextTask(original_now_ticks, max_delta, &task_info))
-      break;
-    // If tasks were posted with a negative delay, task_info.GetTimeToRun() will
-    // be less than |now_ticks_|. ForwardClocksUntilTickTime() takes care of not
-    // moving the clock backwards in this case.
-    ForwardClocksUntilTickTime(task_info.GetTimeToRun());
-    std::move(task_info.task).Run();
-    OnAfterTaskRun();
-  }
-}
-
-void TestMockTimeTaskRunner::ForwardClocksUntilTickTime(TimeTicks later_ticks) {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  if (later_ticks <= now_ticks_)
-    return;
-
-  now_ += later_ticks - now_ticks_;
-  now_ticks_ = later_ticks;
-  OnAfterTimePassed();
-}
-
-bool TestMockTimeTaskRunner::DequeueNextTask(const TimeTicks& reference,
-                                             const TimeDelta& max_delta,
-                                             TestPendingTask* next_task) {
-  AutoLock scoped_lock(tasks_lock_);
-  if (!tasks_.empty() &&
-      (tasks_.top().GetTimeToRun() - reference) <= max_delta) {
-    // It's safe to remove const and consume |task| here, since |task| is not
-    // used for ordering the item.
-    *next_task = std::move(const_cast<TestOrderedPendingTask&>(tasks_.top()));
-    tasks_.pop();
-    return true;
-  }
-  return false;
-}
-
-}  // namespace base
diff --git a/base/test/test_mock_time_task_runner.h b/base/test/test_mock_time_task_runner.h
deleted file mode 100644
index 54ebbdb..0000000
--- a/base/test/test_mock_time_task_runner.h
+++ /dev/null
@@ -1,223 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TEST_TEST_MOCK_TIME_TASK_RUNNER_H_
-#define BASE_TEST_TEST_MOCK_TIME_TASK_RUNNER_H_
-
-#include <stddef.h>
-
-#include <deque>
-#include <memory>
-#include <queue>
-#include <vector>
-
-#include "base/callback_helpers.h"
-#include "base/macros.h"
-#include "base/single_thread_task_runner.h"
-#include "base/synchronization/lock.h"
-#include "base/test/test_pending_task.h"
-#include "base/threading/thread_checker_impl.h"
-#include "base/time/time.h"
-
-namespace base {
-
-class Clock;
-class TickClock;
-
-// Runs pending tasks in the order of the tasks' post time + delay, and keeps
-// track of a mock (virtual) tick clock time that can be fast-forwarded.
-//
-// TestMockTimeTaskRunner has the following properties:
-//
-//   - Methods RunsTasksOnCurrentThread() and Post[Delayed]Task() can be called
-//     from any thread, but the rest of the methods must be called on the same
-//     thread the TaskRunner was created on.
-//   - It allows for reentrancy, in that it handles the running of tasks that in
-//     turn call back into it (e.g., to post more tasks).
-//   - Tasks are stored in a priority queue, and executed in the increasing
-//     order of post time + delay, but ignoring nestability.
-//   - It does not check for overflow when doing time arithmetic. A sufficient
-//     condition for preventing overflows is to make sure that the sum of all
-//     posted task delays and fast-forward increments is still representable by
-//     a TimeDelta, and that adding this delta to the starting values of Time
-//     and TickTime is still within their respective range.
-//   - Tasks aren't guaranteed to be destroyed immediately after they're run.
-//
-// This is a slightly more sophisticated version of TestSimpleTaskRunner, in
-// that it supports running delayed tasks in the correct temporal order.
-class TestMockTimeTaskRunner : public SingleThreadTaskRunner {
- public:
-  // Everything that is executed in the scope of a ScopedContext will behave as
-  // though it ran under |scope| (i.e. ThreadTaskRunnerHandle,
-  // RunsTasksOnCurrentThread, etc.). This allows the test body to be all in one
-  // block when multiple TestMockTimeTaskRunners share the main thread. For
-  // example:
-  //
-  //   class ExampleFixture {
-  //    protected:
-  //     DoBarOnFoo() {
-  //       DCHECK(foo_task_runner_->RunsOnCurrentThread());
-  //       EXPECT_EQ(foo_task_runner_, ThreadTaskRunnerHandle::Get());
-  //       DoBar();
-  //     }
-  //
-  //     // Mock main task runner.
-  //     base::MessageLoop message_loop_;
-  //     base::ScopedMockTimeMessageLoopTaskRunner main_task_runner_;
-  //
-  //     // Mock foo task runner.
-  //     scoped_refptr<TestMockTimeTaskRunner> foo_task_runner_ =
-  //         new TestMockTimeTaskRunner();
-  //   };
-  //
-  //   TEST_F(ExampleFixture, DoBarOnFoo) {
-  //     DoThingsOnMain();
-  //     {
-  //       TestMockTimeTaskRunner::ScopedContext scoped_context(
-  //           foo_task_runner_.get());
-  //       DoBarOnFoo();
-  //     }
-  //     DoMoreThingsOnMain();
-  //   }
-  //
-  class ScopedContext {
-   public:
-    // Note: |scope| is ran until idle as part of this constructor to ensure
-    // that anything which runs in the underlying scope runs after any already
-    // pending tasks (the contrary would break the SequencedTraskRunner
-    // contract).
-    explicit ScopedContext(scoped_refptr<TestMockTimeTaskRunner> scope);
-    ~ScopedContext();
-
-   private:
-    ScopedClosureRunner on_destroy_;
-    DISALLOW_COPY_AND_ASSIGN(ScopedContext);
-  };
-
-  // Constructs an instance whose virtual time will start at the Unix epoch, and
-  // whose time ticks will start at zero.
-  TestMockTimeTaskRunner();
-
-  // Constructs an instance starting at the given virtual time and time ticks.
-  TestMockTimeTaskRunner(Time start_time, TimeTicks start_ticks);
-
-  // Fast-forwards virtual time by |delta|, causing all tasks with a remaining
-  // delay less than or equal to |delta| to be executed. |delta| must be
-  // non-negative.
-  void FastForwardBy(TimeDelta delta);
-
-  // Fast-forwards virtual time just until all tasks are executed.
-  void FastForwardUntilNoTasksRemain();
-
-  // Executes all tasks that have no remaining delay. Tasks with a remaining
-  // delay greater than zero will remain enqueued, and no virtual time will
-  // elapse.
-  void RunUntilIdle();
-
-  // Clears the queue of pending tasks without running them.
-  void ClearPendingTasks();
-
-  // Returns the current virtual time (initially starting at the Unix epoch).
-  Time Now() const;
-
-  // Returns the current virtual tick time (initially starting at 0).
-  TimeTicks NowTicks() const;
-
-  // Returns a Clock that uses the virtual time of |this| as its time source.
-  // The returned Clock will hold a reference to |this|.
-  std::unique_ptr<Clock> GetMockClock() const;
-
-  // Returns a TickClock that uses the virtual time ticks of |this| as its tick
-  // source. The returned TickClock will hold a reference to |this|.
-  std::unique_ptr<TickClock> GetMockTickClock() const;
-
-  std::deque<TestPendingTask> TakePendingTasks();
-  bool HasPendingTask() const;
-  size_t GetPendingTaskCount() const;
-  TimeDelta NextPendingTaskDelay() const;
-
-  // SingleThreadTaskRunner:
-  bool RunsTasksOnCurrentThread() const override;
-  bool PostDelayedTask(const tracked_objects::Location& from_here,
-                       const Closure& task,
-                       TimeDelta delay) override;
-  bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
-                                  const Closure& task,
-                                  TimeDelta delay) override;
-
- protected:
-  ~TestMockTimeTaskRunner() override;
-
-  // Whether the elapsing of virtual time is stopped or not. Subclasses can
-  // override this method to perform early exits from a running task runner.
-  // Defaults to always return false.
-  virtual bool IsElapsingStopped();
-
-  // Called before the next task to run is selected, so that subclasses have a
-  // last chance to make sure all tasks are posted.
-  virtual void OnBeforeSelectingTask();
-
-  // Called after the current mock time has been incremented so that subclasses
-  // can react to the passing of time.
-  virtual void OnAfterTimePassed();
-
-  // Called after each task is run so that subclasses may perform additional
-  // activities, e.g., pump additional task runners.
-  virtual void OnAfterTaskRun();
-
- private:
-  struct TestOrderedPendingTask;
-
-  // Predicate that defines a strict weak temporal ordering of tasks.
-  class TemporalOrder {
-   public:
-    bool operator()(const TestOrderedPendingTask& first_task,
-                    const TestOrderedPendingTask& second_task) const;
-  };
-
-  typedef std::priority_queue<TestOrderedPendingTask,
-                              std::vector<TestOrderedPendingTask>,
-                              TemporalOrder> TaskPriorityQueue;
-
-  // Core of the implementation for all flavors of fast-forward methods. Given a
-  // non-negative |max_delta|, runs all tasks with a remaining delay less than
-  // or equal to |max_delta|, and moves virtual time forward as needed for each
-  // processed task. Pass in TimeDelta::Max() as |max_delta| to run all tasks.
-  void ProcessAllTasksNoLaterThan(TimeDelta max_delta);
-
-  // Forwards |now_ticks_| until it equals |later_ticks|, and forwards |now_| by
-  // the same amount. Calls OnAfterTimePassed() if |later_ticks| > |now_ticks_|.
-  // Does nothing if |later_ticks| <= |now_ticks_|.
-  void ForwardClocksUntilTickTime(TimeTicks later_ticks);
-
-  // Returns the |next_task| to run if there is any with a running time that is
-  // at most |reference| + |max_delta|. This additional complexity is required
-  // so that |max_delta| == TimeDelta::Max() can be supported.
-  bool DequeueNextTask(const TimeTicks& reference,
-                       const TimeDelta& max_delta,
-                       TestPendingTask* next_task);
-
-  // Also used for non-dcheck logic (RunsTasksOnCurrentThread()) and as such
-  // needs to be a ThreadCheckerImpl.
-  ThreadCheckerImpl thread_checker_;
-
-  Time now_;
-  TimeTicks now_ticks_;
-
-  // Temporally ordered heap of pending tasks. Must only be accessed while the
-  // |tasks_lock_| is held.
-  TaskPriorityQueue tasks_;
-
-  // The ordinal to use for the next task. Must only be accessed while the
-  // |tasks_lock_| is held.
-  size_t next_task_ordinal_;
-
-  Lock tasks_lock_;
-
-  DISALLOW_COPY_AND_ASSIGN(TestMockTimeTaskRunner);
-};
-
-}  // namespace base
-
-#endif  // BASE_TEST_TEST_MOCK_TIME_TASK_RUNNER_H_
diff --git a/base/test/test_pending_task.cc b/base/test/test_pending_task.cc
index 98bc017..87b107e 100644
--- a/base/test/test_pending_task.cc
+++ b/base/test/test_pending_task.cc
@@ -22,9 +22,7 @@
       delay(delay),
       nestability(nestability) {}
 
-TestPendingTask::TestPendingTask(TestPendingTask&& other) = default;
-
-TestPendingTask& TestPendingTask::operator=(TestPendingTask&& other) = default;
+TestPendingTask::TestPendingTask(const TestPendingTask& other) = default;
 
 TimeTicks TestPendingTask::GetTimeToRun() const {
   return post_time + delay;
diff --git a/base/test/test_pending_task.h b/base/test/test_pending_task.h
index 42f3f42..2dbdb7e 100644
--- a/base/test/test_pending_task.h
+++ b/base/test/test_pending_task.h
@@ -21,7 +21,7 @@
   enum TestNestability { NESTABLE, NON_NESTABLE };
 
   TestPendingTask();
-  TestPendingTask(TestPendingTask&& other);
+  TestPendingTask(const TestPendingTask& other);
   TestPendingTask(const tracked_objects::Location& location,
                   const Closure& task,
                   TimeTicks post_time,
@@ -29,8 +29,6 @@
                   TestNestability nestability);
   ~TestPendingTask();
 
-  TestPendingTask& operator=(TestPendingTask&& other);
-
   // Returns post_time + delay.
   TimeTicks GetTimeToRun() const;
 
@@ -53,7 +51,7 @@
   bool ShouldRunBefore(const TestPendingTask& other) const;
 
   tracked_objects::Location location;
-  OnceClosure task;
+  Closure task;
   TimeTicks post_time;
   TimeDelta delay;
   TestNestability nestability;
@@ -63,9 +61,6 @@
   void AsValueInto(base::trace_event::TracedValue* state) const;
   std::unique_ptr<base::trace_event::ConvertableToTraceFormat> AsValue() const;
   std::string ToString() const;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(TestPendingTask);
 };
 
 // gtest helpers which allow pretty printing of the tasks, very useful in unit
diff --git a/base/test/test_simple_task_runner.cc b/base/test/test_simple_task_runner.cc
index 090a72e..cc39fab 100644
--- a/base/test/test_simple_task_runner.cc
+++ b/base/test/test_simple_task_runner.cc
@@ -5,20 +5,20 @@
 #include "base/test/test_simple_task_runner.h"
 
 #include "base/logging.h"
-#include "base/memory/ptr_util.h"
-#include "base/threading/thread_task_runner_handle.h"
 
 namespace base {
 
-TestSimpleTaskRunner::TestSimpleTaskRunner() = default;
+TestSimpleTaskRunner::TestSimpleTaskRunner() {}
 
-TestSimpleTaskRunner::~TestSimpleTaskRunner() = default;
+TestSimpleTaskRunner::~TestSimpleTaskRunner() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+}
 
 bool TestSimpleTaskRunner::PostDelayedTask(
     const tracked_objects::Location& from_here,
     const Closure& task,
     TimeDelta delay) {
-  AutoLock auto_lock(lock_);
+  DCHECK(thread_checker_.CalledOnValidThread());
   pending_tasks_.push_back(
       TestPendingTask(from_here, task, TimeTicks(), delay,
                       TestPendingTask::NESTABLE));
@@ -29,70 +29,48 @@
     const tracked_objects::Location& from_here,
     const Closure& task,
     TimeDelta delay) {
-  AutoLock auto_lock(lock_);
+  DCHECK(thread_checker_.CalledOnValidThread());
   pending_tasks_.push_back(
       TestPendingTask(from_here, task, TimeTicks(), delay,
                       TestPendingTask::NON_NESTABLE));
   return true;
 }
 
-// TODO(gab): Use SequenceToken here to differentiate between tasks running in
-// the scope of this TestSimpleTaskRunner and other task runners sharing this
-// thread. http://crbug.com/631186
 bool TestSimpleTaskRunner::RunsTasksOnCurrentThread() const {
-  return thread_ref_ == PlatformThread::CurrentRef();
+  DCHECK(thread_checker_.CalledOnValidThread());
+  return true;
 }
 
-std::deque<TestPendingTask> TestSimpleTaskRunner::TakePendingTasks() {
-  AutoLock auto_lock(lock_);
-  return std::move(pending_tasks_);
-}
-
-size_t TestSimpleTaskRunner::NumPendingTasks() const {
-  AutoLock auto_lock(lock_);
-  return pending_tasks_.size();
+const std::deque<TestPendingTask>&
+TestSimpleTaskRunner::GetPendingTasks() const {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  return pending_tasks_;
 }
 
 bool TestSimpleTaskRunner::HasPendingTask() const {
-  AutoLock auto_lock(lock_);
+  DCHECK(thread_checker_.CalledOnValidThread());
   return !pending_tasks_.empty();
 }
 
 base::TimeDelta TestSimpleTaskRunner::NextPendingTaskDelay() const {
-  AutoLock auto_lock(lock_);
+  DCHECK(thread_checker_.CalledOnValidThread());
   return pending_tasks_.front().GetTimeToRun() - base::TimeTicks();
 }
 
-base::TimeDelta TestSimpleTaskRunner::FinalPendingTaskDelay() const {
-  AutoLock auto_lock(lock_);
-  return pending_tasks_.back().GetTimeToRun() - base::TimeTicks();
-}
-
 void TestSimpleTaskRunner::ClearPendingTasks() {
-  AutoLock auto_lock(lock_);
+  DCHECK(thread_checker_.CalledOnValidThread());
   pending_tasks_.clear();
 }
 
 void TestSimpleTaskRunner::RunPendingTasks() {
-  DCHECK(RunsTasksOnCurrentThread());
-
+  DCHECK(thread_checker_.CalledOnValidThread());
   // Swap with a local variable to avoid re-entrancy problems.
   std::deque<TestPendingTask> tasks_to_run;
-  {
-    AutoLock auto_lock(lock_);
-    tasks_to_run.swap(pending_tasks_);
+  tasks_to_run.swap(pending_tasks_);
+  for (std::deque<TestPendingTask>::iterator it = tasks_to_run.begin();
+       it != tasks_to_run.end(); ++it) {
+    it->task.Run();
   }
-
-  // Multiple test task runners can share the same thread for determinism in
-  // unit tests. Make sure this TestSimpleTaskRunner's tasks run in its scope.
-  ScopedClosureRunner undo_override;
-  if (!ThreadTaskRunnerHandle::IsSet() ||
-      ThreadTaskRunnerHandle::Get() != this) {
-    undo_override = ThreadTaskRunnerHandle::OverrideForTesting(this);
-  }
-
-  for (auto& task : tasks_to_run)
-    std::move(task.task).Run();
 }
 
 void TestSimpleTaskRunner::RunUntilIdle() {
diff --git a/base/test/test_simple_task_runner.h b/base/test/test_simple_task_runner.h
index d089ba8..338c634 100644
--- a/base/test/test_simple_task_runner.h
+++ b/base/test/test_simple_task_runner.h
@@ -10,9 +10,8 @@
 #include "base/compiler_specific.h"
 #include "base/macros.h"
 #include "base/single_thread_task_runner.h"
-#include "base/synchronization/lock.h"
 #include "base/test/test_pending_task.h"
-#include "base/threading/platform_thread.h"
+#include "base/threading/thread_checker.h"
 
 namespace base {
 
@@ -26,6 +25,8 @@
 //
 // TestSimpleTaskRunner has the following properties which make it simple:
 //
+//   - It is non-thread safe; all member functions must be called on
+//     the same thread.
 //   - Tasks are simply stored in a queue in FIFO order, ignoring delay
 //     and nestability.
 //   - Tasks aren't guaranteed to be destroyed immediately after
@@ -35,6 +36,10 @@
 // handles the running of tasks that in turn call back into itself
 // (e.g., to post more tasks).
 //
+// If you need more complicated properties, consider using this class
+// as a template for writing a test TaskRunner implementation using
+// TestPendingTask.
+//
 // Note that, like any TaskRunner, TestSimpleTaskRunner is
 // ref-counted.
 class TestSimpleTaskRunner : public SingleThreadTaskRunner {
@@ -51,36 +56,27 @@
 
   bool RunsTasksOnCurrentThread() const override;
 
-  std::deque<TestPendingTask> TakePendingTasks();
-  size_t NumPendingTasks() const;
+  const std::deque<TestPendingTask>& GetPendingTasks() const;
   bool HasPendingTask() const;
   base::TimeDelta NextPendingTaskDelay() const;
-  base::TimeDelta FinalPendingTaskDelay() const;
 
   // Clears the queue of pending tasks without running them.
   void ClearPendingTasks();
 
-  // Runs each current pending task in order and clears the queue. Tasks posted
-  // by the tasks that run within this call do not run within this call. Can
-  // only be called on the thread that created this TestSimpleTaskRunner.
-  void RunPendingTasks();
+  // Runs each current pending task in order and clears the queue.
+  // Any tasks posted by the tasks are not run.
+  virtual void RunPendingTasks();
 
-  // Runs pending tasks until the queue is empty. Can only be called on the
-  // thread that created this TestSimpleTaskRunner.
+  // Runs pending tasks until the queue is empty.
   void RunUntilIdle();
 
  protected:
   ~TestSimpleTaskRunner() override;
 
- private:
-  // Thread on which this was instantiated.
-  const PlatformThreadRef thread_ref_ = PlatformThread::CurrentRef();
-
-  // Synchronizes access to |pending_tasks_|.
-  mutable Lock lock_;
-
   std::deque<TestPendingTask> pending_tasks_;
+  ThreadChecker thread_checker_;
 
+ private:
   DISALLOW_COPY_AND_ASSIGN(TestSimpleTaskRunner);
 };
 
diff --git a/base/test/test_timeouts.cc b/base/test/test_timeouts.cc
index 0dc0f49..55e9a79 100644
--- a/base/test/test_timeouts.cc
+++ b/base/test/test_timeouts.cc
@@ -46,10 +46,7 @@
     std::string string_value(base::CommandLine::ForCurrentProcess()->
          GetSwitchValueASCII(switch_name));
     int timeout;
-    if (string_value == TestTimeouts::kNoTimeoutSwitchValue)
-      timeout = kAlmostInfiniteTimeoutMs;
-    else
-      base::StringToInt(string_value, &timeout);
+    base::StringToInt(string_value, &timeout);
     *value = std::max(*value, timeout);
   }
   *value *= kTimeoutMultiplier;
@@ -68,9 +65,6 @@
 }  // namespace
 
 // static
-constexpr const char TestTimeouts::kNoTimeoutSwitchValue[];
-
-// static
 bool TestTimeouts::initialized_ = false;
 
 // The timeout values should increase in the order they appear in this block.
diff --git a/base/test/test_timeouts.h b/base/test/test_timeouts.h
index 9d42eb9..ddaf05b 100644
--- a/base/test/test_timeouts.h
+++ b/base/test/test_timeouts.h
@@ -13,9 +13,6 @@
 // the timeouts for different environments (like Valgrind).
 class TestTimeouts {
  public:
-  // Argument that can be passed on the command line to indicate "no timeout".
-  static constexpr const char kNoTimeoutSwitchValue[] = "-1";
-
   // Initializes the timeouts. Non thread-safe. Should be called exactly once
   // by the test suite.
   static void Initialize();
diff --git a/base/test/trace_event_analyzer.cc b/base/test/trace_event_analyzer.cc
index e61337c..55a349a 100644
--- a/base/test/trace_event_analyzer.cc
+++ b/base/test/trace_event_analyzer.cc
@@ -34,8 +34,8 @@
 TraceEvent& TraceEvent::operator=(TraceEvent&& rhs) = default;
 
 bool TraceEvent::SetFromJSON(const base::Value* event_value) {
-  if (event_value->GetType() != base::Value::Type::DICTIONARY) {
-    LOG(ERROR) << "Value must be Type::DICTIONARY";
+  if (event_value->GetType() != base::Value::TYPE_DICTIONARY) {
+    LOG(ERROR) << "Value must be TYPE_DICTIONARY";
     return false;
   }
   const base::DictionaryValue* dictionary =
diff --git a/base/test/trace_event_analyzer_unittest.cc b/base/test/trace_event_analyzer_unittest.cc
index ce7bce2..b4f0950 100644
--- a/base/test/trace_event_analyzer_unittest.cc
+++ b/base/test/trace_event_analyzer_unittest.cc
@@ -123,7 +123,7 @@
 
   std::unique_ptr<base::Value> arg;
   EXPECT_TRUE(event.GetArgAsValue("dict", &arg));
-  EXPECT_EQ(base::Value::Type::DICTIONARY, arg->GetType());
+  EXPECT_EQ(base::Value::TYPE_DICTIONARY, arg->GetType());
 }
 
 TEST_F(TraceEventAnalyzerTest, QueryEventMember) {
diff --git a/base/third_party/dynamic_annotations/dynamic_annotations.c b/base/third_party/dynamic_annotations/dynamic_annotations.c
deleted file mode 100644
index 4313ecc..0000000
--- a/base/third_party/dynamic_annotations/dynamic_annotations.c
+++ /dev/null
@@ -1,269 +0,0 @@
-/* Copyright (c) 2011, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifdef _MSC_VER
-# include <windows.h>
-#endif
-
-#ifdef __cplusplus
-# error "This file should be built as pure C to avoid name mangling"
-#endif
-
-#include <stdlib.h>
-#include <string.h>
-
-#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
-
-#ifdef __GNUC__
-/* valgrind.h uses gcc extensions so it won't build with other compilers */
-# include "base/third_party/valgrind/valgrind.h"
-#endif
-
-/* Compiler-based ThreadSanitizer defines
-   DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL = 1
-   and provides its own definitions of the functions. */
-
-#ifndef DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL
-# define DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL 0
-#endif
-
-/* Each function is empty and called (via a macro) only in debug mode.
-   The arguments are captured by dynamic tools at runtime. */
-
-#if DYNAMIC_ANNOTATIONS_ENABLED == 1 \
-    && DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0
-
-/* Identical code folding(-Wl,--icf=all) countermeasures.
-   This makes all Annotate* functions different, which prevents the linker from
-   folding them. */
-#ifdef __COUNTER__
-#define DYNAMIC_ANNOTATIONS_IMPL \
-  volatile unsigned short lineno = (__LINE__ << 8) + __COUNTER__; (void)lineno;
-#else
-#define DYNAMIC_ANNOTATIONS_IMPL \
-  volatile unsigned short lineno = (__LINE__ << 8); (void)lineno;
-#endif
-
-/* WARNING: always add new annotations to the end of the list.
-   Otherwise, lineno (see above) numbers for different Annotate* functions may
-   conflict. */
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockCreate)(
-    const char *file, int line, const volatile void *lock)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockDestroy)(
-    const char *file, int line, const volatile void *lock)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockAcquired)(
-    const char *file, int line, const volatile void *lock, long is_w)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockReleased)(
-    const char *file, int line, const volatile void *lock, long is_w)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierInit)(
-    const char *file, int line, const volatile void *barrier, long count,
-    long reinitialization_allowed)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitBefore)(
-    const char *file, int line, const volatile void *barrier)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitAfter)(
-    const char *file, int line, const volatile void *barrier)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierDestroy)(
-    const char *file, int line, const volatile void *barrier)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarWait)(
-    const char *file, int line, const volatile void *cv,
-    const volatile void *lock)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignal)(
-    const char *file, int line, const volatile void *cv)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignalAll)(
-    const char *file, int line, const volatile void *cv)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensBefore)(
-    const char *file, int line, const volatile void *obj)
-{DYNAMIC_ANNOTATIONS_IMPL};
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensAfter)(
-    const char *file, int line, const volatile void *obj)
-{DYNAMIC_ANNOTATIONS_IMPL};
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotatePublishMemoryRange)(
-    const char *file, int line, const volatile void *address, long size)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateUnpublishMemoryRange)(
-    const char *file, int line, const volatile void *address, long size)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQCreate)(
-    const char *file, int line, const volatile void *pcq)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQDestroy)(
-    const char *file, int line, const volatile void *pcq)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQPut)(
-    const char *file, int line, const volatile void *pcq)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQGet)(
-    const char *file, int line, const volatile void *pcq)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateNewMemory)(
-    const char *file, int line, const volatile void *mem, long size)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateExpectRace)(
-    const char *file, int line, const volatile void *mem,
-    const char *description)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushExpectedRaces)(
-    const char *file, int line)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRace)(
-    const char *file, int line, const volatile void *mem,
-    const char *description)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRaceSized)(
-    const char *file, int line, const volatile void *mem, long size,
-    const char *description)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsUsedAsCondVar)(
-    const char *file, int line, const volatile void *mu)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsNotPHB)(
-    const char *file, int line, const volatile void *mu)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateTraceMemory)(
-    const char *file, int line, const volatile void *arg)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateThreadName)(
-    const char *file, int line, const char *name)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsBegin)(
-    const char *file, int line)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsEnd)(
-    const char *file, int line)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesBegin)(
-    const char *file, int line)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesEnd)(
-    const char *file, int line)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncBegin)(
-    const char *file, int line)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncEnd)(
-    const char *file, int line)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateEnableRaceDetection)(
-    const char *file, int line, int enable)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateNoOp)(
-    const char *file, int line, const volatile void *arg)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-void DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushState)(
-    const char *file, int line)
-{DYNAMIC_ANNOTATIONS_IMPL}
-
-#endif  /* DYNAMIC_ANNOTATIONS_ENABLED == 1
-    && DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 */
-
-#if DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND == 1 \
-    && DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0
-static int GetRunningOnValgrind(void) {
-#ifdef RUNNING_ON_VALGRIND
-  if (RUNNING_ON_VALGRIND) return 1;
-#endif
-
-#ifndef _MSC_VER
-  char *running_on_valgrind_str = getenv("RUNNING_ON_VALGRIND");
-  if (running_on_valgrind_str) {
-    return strcmp(running_on_valgrind_str, "0") != 0;
-  }
-#else
-  /* Visual Studio issues warnings if we use getenv,
-   * so we use GetEnvironmentVariableA instead.
-   */
-  char value[100] = "1";
-  int res = GetEnvironmentVariableA("RUNNING_ON_VALGRIND",
-                                    value, sizeof(value));
-  /* value will remain "1" if res == 0 or res >= sizeof(value). The latter
-   * can happen only if the given value is long, in this case it can't be "0".
-   */
-  if (res > 0 && strcmp(value, "0") != 0)
-    return 1;
-#endif
-  return 0;
-}
-
-/* See the comments in dynamic_annotations.h */
-int RunningOnValgrind(void) {
-  static volatile int running_on_valgrind = -1;
-  /* C doesn't have thread-safe initialization of statics, and we
-     don't want to depend on pthread_once here, so hack it. */
-  int local_running_on_valgrind = running_on_valgrind;
-  if (local_running_on_valgrind == -1)
-    running_on_valgrind = local_running_on_valgrind = GetRunningOnValgrind();
-  return local_running_on_valgrind;
-}
-
-#endif /* DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND == 1
-    && DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 */
diff --git a/base/threading/non_thread_safe.h b/base/threading/non_thread_safe.h
index 64ae8e4..d41c086 100644
--- a/base/threading/non_thread_safe.h
+++ b/base/threading/non_thread_safe.h
@@ -10,7 +10,14 @@
 // There is a specific macro to do it: NON_EXPORTED_BASE(), defined in
 // compiler_specific.h
 #include "base/compiler_specific.h"
-#include "base/logging.h"
+
+// See comment at top of thread_checker.h
+#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
+#define ENABLE_NON_THREAD_SAFE 1
+#else
+#define ENABLE_NON_THREAD_SAFE 0
+#endif
+
 #include "base/threading/non_thread_safe_impl.h"
 
 namespace base {
@@ -51,11 +58,13 @@
 // to have a base::ThreadChecker as a member, rather than inherit from
 // NonThreadSafe. For more details about when to choose one over the other, see
 // the documentation for base::ThreadChecker.
-#if DCHECK_IS_ON()
+#if ENABLE_NON_THREAD_SAFE
 typedef NonThreadSafeImpl NonThreadSafe;
 #else
 typedef NonThreadSafeDoNothing NonThreadSafe;
-#endif  // DCHECK_IS_ON()
+#endif  // ENABLE_NON_THREAD_SAFE
+
+#undef ENABLE_NON_THREAD_SAFE
 
 }  // namespace base
 
diff --git a/base/threading/non_thread_safe_unittest.cc b/base/threading/non_thread_safe_unittest.cc
index 5752d5f..d523fc5 100644
--- a/base/threading/non_thread_safe_unittest.cc
+++ b/base/threading/non_thread_safe_unittest.cc
@@ -8,7 +8,6 @@
 
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/test/gtest_util.h"
 #include "base/threading/simple_thread.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -107,6 +106,8 @@
   delete_on_thread.Join();
 }
 
+#if GTEST_HAS_DEATH_TEST || !ENABLE_NON_THREAD_SAFE
+
 void NonThreadSafeClass::MethodOnDifferentThreadImpl() {
   std::unique_ptr<NonThreadSafeClass> non_thread_safe_class(
       new NonThreadSafeClass);
@@ -119,15 +120,17 @@
   call_on_thread.Join();
 }
 
-#if DCHECK_IS_ON()
+#if ENABLE_NON_THREAD_SAFE
 TEST(NonThreadSafeDeathTest, MethodNotAllowedOnDifferentThreadInDebug) {
-  ASSERT_DCHECK_DEATH({ NonThreadSafeClass::MethodOnDifferentThreadImpl(); });
+  ASSERT_DEATH({
+      NonThreadSafeClass::MethodOnDifferentThreadImpl();
+    }, "");
 }
 #else
 TEST(NonThreadSafeTest, MethodAllowedOnDifferentThreadInRelease) {
   NonThreadSafeClass::MethodOnDifferentThreadImpl();
 }
-#endif  // DCHECK_IS_ON()
+#endif  // ENABLE_NON_THREAD_SAFE
 
 void NonThreadSafeClass::DestructorOnDifferentThreadImpl() {
   std::unique_ptr<NonThreadSafeClass> non_thread_safe_class(
@@ -142,15 +145,21 @@
   delete_on_thread.Join();
 }
 
-#if DCHECK_IS_ON()
+#if ENABLE_NON_THREAD_SAFE
 TEST(NonThreadSafeDeathTest, DestructorNotAllowedOnDifferentThreadInDebug) {
-  ASSERT_DCHECK_DEATH(
-      { NonThreadSafeClass::DestructorOnDifferentThreadImpl(); });
+  ASSERT_DEATH({
+      NonThreadSafeClass::DestructorOnDifferentThreadImpl();
+    }, "");
 }
 #else
 TEST(NonThreadSafeTest, DestructorAllowedOnDifferentThreadInRelease) {
   NonThreadSafeClass::DestructorOnDifferentThreadImpl();
 }
-#endif  // DCHECK_IS_ON()
+#endif  // ENABLE_NON_THREAD_SAFE
+
+#endif  // GTEST_HAS_DEATH_TEST || !ENABLE_NON_THREAD_SAFE
+
+// Just in case we ever get lumped together with other compilation units.
+#undef ENABLE_NON_THREAD_SAFE
 
 }  // namespace base
diff --git a/base/threading/platform_thread.h b/base/threading/platform_thread.h
index 8c0d8e4..9b217a9 100644
--- a/base/threading/platform_thread.h
+++ b/base/threading/platform_thread.h
@@ -18,8 +18,6 @@
 
 #if defined(OS_WIN)
 #include <windows.h>
-#elif defined(OS_MACOSX)
-#include <mach/mach_types.h>
 #elif defined(OS_POSIX)
 #include <pthread.h>
 #include <unistd.h>
@@ -30,8 +28,6 @@
 // Used for logging. Always an integer value.
 #if defined(OS_WIN)
 typedef DWORD PlatformThreadId;
-#elif defined(OS_MACOSX)
-typedef mach_port_t PlatformThreadId;
 #elif defined(OS_POSIX)
 typedef pid_t PlatformThreadId;
 #endif
@@ -63,8 +59,6 @@
     return id_ == other.id_;
   }
 
-  bool operator!=(PlatformThreadRef other) const { return id_ != other.id_; }
-
   bool is_null() const {
     return id_ == 0;
   }
@@ -181,12 +175,6 @@
   // PlatformThreadHandle.
   static bool CreateNonJoinable(size_t stack_size, Delegate* delegate);
 
-  // CreateNonJoinableWithPriority() does the same thing as CreateNonJoinable()
-  // except the priority of the thread is set based on |priority|.
-  static bool CreateNonJoinableWithPriority(size_t stack_size,
-                                            Delegate* delegate,
-                                            ThreadPriority priority);
-
   // Joins with a thread created via the Create function.  This function blocks
   // the caller until the designated thread exits.  This will invalidate
   // |thread_handle|.
@@ -196,10 +184,6 @@
   // and |thread_handle| is invalidated after this call.
   static void Detach(PlatformThreadHandle thread_handle);
 
-  // Returns true if SetCurrentThreadPriority() can be used to increase the
-  // priority of the current thread.
-  static bool CanIncreaseCurrentThreadPriority();
-
   // Toggles the current thread's priority at runtime. A thread may not be able
   // to raise its priority back up after lowering it if the process does not
   // have a proper permission, e.g. CAP_SYS_NICE on Linux. A thread may not be
@@ -211,20 +195,6 @@
 
   static ThreadPriority GetCurrentThreadPriority();
 
-#if defined(OS_LINUX)
-  // Toggles a specific thread's priority at runtime. This can be used to
-  // change the priority of a thread in a different process and will fail
-  // if the calling process does not have proper permissions. The
-  // SetCurrentThreadPriority() function above is preferred in favor of
-  // security but on platforms where sandboxed processes are not allowed to
-  // change priority this function exists to allow a non-sandboxed process
-  // to change the priority of sandboxed threads for improved performance.
-  // Warning: Don't use this for a main thread because that will change the
-  // whole thread group's (i.e. process) priority.
-  static void SetThreadPriority(PlatformThreadId thread_id,
-                                ThreadPriority priority);
-#endif
-
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(PlatformThread);
 };
diff --git a/base/threading/platform_thread_linux.cc b/base/threading/platform_thread_linux.cc
index 474410f..ab7c97e 100644
--- a/base/threading/platform_thread_linux.cc
+++ b/base/threading/platform_thread_linux.cc
@@ -8,10 +8,8 @@
 #include <sched.h>
 #include <stddef.h>
 
-#include "base/files/file_util.h"
 #include "base/lazy_instance.h"
 #include "base/logging.h"
-#include "base/strings/string_number_conversions.h"
 #include "base/threading/platform_thread_internal_posix.h"
 #include "base/threading/thread_id_name_manager.h"
 #include "base/tracked_objects.h"
@@ -20,68 +18,11 @@
 #if !defined(OS_NACL)
 #include <pthread.h>
 #include <sys/prctl.h>
-#include <sys/resource.h>
-#include <sys/time.h>
 #include <sys/types.h>
 #include <unistd.h>
 #endif
 
 namespace base {
-namespace {
-#if !defined(OS_NACL)
-const FilePath::CharType kCgroupDirectory[] =
-    FILE_PATH_LITERAL("/sys/fs/cgroup");
-
-FilePath ThreadPriorityToCgroupDirectory(const FilePath& cgroup_filepath,
-                                         ThreadPriority priority) {
-  switch (priority) {
-    case ThreadPriority::NORMAL:
-      return cgroup_filepath;
-    case ThreadPriority::BACKGROUND:
-      return cgroup_filepath.Append(FILE_PATH_LITERAL("non-urgent"));
-    case ThreadPriority::DISPLAY:
-    case ThreadPriority::REALTIME_AUDIO:
-      return cgroup_filepath.Append(FILE_PATH_LITERAL("urgent"));
-  }
-  NOTREACHED();
-  return FilePath();
-}
-
-void SetThreadCgroup(PlatformThreadId thread_id,
-                     const FilePath& cgroup_directory) {
-  FilePath tasks_filepath = cgroup_directory.Append(FILE_PATH_LITERAL("tasks"));
-  std::string tid = IntToString(thread_id);
-  int bytes_written = WriteFile(tasks_filepath, tid.c_str(), tid.size());
-  if (bytes_written != static_cast<int>(tid.size())) {
-    DVLOG(1) << "Failed to add " << tid << " to " << tasks_filepath.value();
-  }
-}
-
-void SetThreadCgroupForThreadPriority(PlatformThreadId thread_id,
-                                      const FilePath& cgroup_filepath,
-                                      ThreadPriority priority) {
-  // Append "chrome" suffix.
-  FilePath cgroup_directory = ThreadPriorityToCgroupDirectory(
-      cgroup_filepath.Append(FILE_PATH_LITERAL("chrome")), priority);
-
-  // Silently ignore request if cgroup directory doesn't exist.
-  if (!DirectoryExists(cgroup_directory))
-    return;
-
-  SetThreadCgroup(thread_id, cgroup_directory);
-}
-
-void SetThreadCgroupsForThreadPriority(PlatformThreadId thread_id,
-                                       ThreadPriority priority) {
-  FilePath cgroup_filepath(kCgroupDirectory);
-  SetThreadCgroupForThreadPriority(
-      thread_id, cgroup_filepath.Append(FILE_PATH_LITERAL("cpuset")), priority);
-  SetThreadCgroupForThreadPriority(
-      thread_id, cgroup_filepath.Append(FILE_PATH_LITERAL("schedtune")),
-      priority);
-}
-#endif
-}  // namespace
 
 namespace internal {
 
@@ -100,7 +41,6 @@
 
 bool SetCurrentThreadPriorityForPlatform(ThreadPriority priority) {
 #if !defined(OS_NACL)
-  SetThreadCgroupsForThreadPriority(PlatformThread::CurrentId(), priority);
   return priority == ThreadPriority::REALTIME_AUDIO &&
          pthread_setschedparam(pthread_self(), SCHED_RR, &kRealTimePrio) == 0;
 #else
@@ -150,25 +90,6 @@
 #endif  //  !defined(OS_NACL)
 }
 
-#if !defined(OS_NACL)
-// static
-void PlatformThread::SetThreadPriority(PlatformThreadId thread_id,
-                                       ThreadPriority priority) {
-  // Changing current main threads' priority is not permitted in favor of
-  // security, this interface is restricted to change only non-main thread
-  // priority.
-  CHECK_NE(thread_id, getpid());
-
-  SetThreadCgroupsForThreadPriority(thread_id, priority);
-
-  const int nice_setting = internal::ThreadPriorityToNiceValue(priority);
-  if (setpriority(PRIO_PROCESS, thread_id, nice_setting)) {
-    DVPLOG(1) << "Failed to set nice value of thread (" << thread_id << ") to "
-              << nice_setting;
-  }
-}
-#endif  //  !defined(OS_NACL)
-
 void InitThreading() {}
 
 void TerminateOnThread() {}
diff --git a/base/threading/platform_thread_mac.mm b/base/threading/platform_thread_mac.mm
index e743044..51f3621 100644
--- a/base/threading/platform_thread_mac.mm
+++ b/base/threading/platform_thread_mac.mm
@@ -162,11 +162,6 @@
 }  // anonymous namespace
 
 // static
-bool PlatformThread::CanIncreaseCurrentThreadPriority() {
-  return true;
-}
-
-// static
 void PlatformThread::SetCurrentThreadPriority(ThreadPriority priority) {
   // Convert from pthread_t to mach thread identifier.
   mach_port_t mach_thread_id =
diff --git a/base/threading/platform_thread_posix.cc b/base/threading/platform_thread_posix.cc
index 9a6a2bb..2321b3c 100644
--- a/base/threading/platform_thread_posix.cc
+++ b/base/threading/platform_thread_posix.cc
@@ -11,12 +11,9 @@
 #include <stdint.h>
 #include <sys/resource.h>
 #include <sys/time.h>
-#include <sys/types.h>
-#include <unistd.h>
 
 #include <memory>
 
-#include "base/debug/activity_tracker.h"
 #include "base/lazy_instance.h"
 #include "base/logging.h"
 #include "base/threading/platform_thread_internal_posix.h"
@@ -26,6 +23,8 @@
 
 #if defined(OS_LINUX)
 #include <sys/syscall.h>
+#elif defined(OS_ANDROID)
+#include <sys/types.h>
 #endif
 
 namespace base {
@@ -188,32 +187,21 @@
 bool PlatformThread::CreateWithPriority(size_t stack_size, Delegate* delegate,
                                         PlatformThreadHandle* thread_handle,
                                         ThreadPriority priority) {
-  return CreateThread(stack_size, true /* joinable thread */, delegate,
-                      thread_handle, priority);
+  return CreateThread(stack_size, true,  // joinable thread
+                      delegate, thread_handle, priority);
 }
 
 // static
 bool PlatformThread::CreateNonJoinable(size_t stack_size, Delegate* delegate) {
-  return CreateNonJoinableWithPriority(stack_size, delegate,
-                                       ThreadPriority::NORMAL);
-}
-
-// static
-bool PlatformThread::CreateNonJoinableWithPriority(size_t stack_size,
-                                                   Delegate* delegate,
-                                                   ThreadPriority priority) {
   PlatformThreadHandle unused;
 
   bool result = CreateThread(stack_size, false /* non-joinable thread */,
-                             delegate, &unused, priority);
+                             delegate, &unused, ThreadPriority::NORMAL);
   return result;
 }
 
 // static
 void PlatformThread::Join(PlatformThreadHandle thread_handle) {
-  // Record the event that this thread is blocking upon (for hang diagnosis).
-  base::debug::ScopedThreadJoinActivity thread_activity(&thread_handle);
-
   // Joining another thread may block the current thread for a long time, since
   // the thread referred to by |thread_handle| may still be running long-lived /
   // blocking tasks.
@@ -230,18 +218,6 @@
 #if !defined(OS_MACOSX)
 
 // static
-bool PlatformThread::CanIncreaseCurrentThreadPriority() {
-#if defined(OS_NACL)
-  return false;
-#else
-  // Only root can raise thread priority on POSIX environment. On Linux, users
-  // who have CAP_SYS_NICE permission also can raise the thread priority, but
-  // libcap.so would be needed to check the capability.
-  return geteuid() == 0;
-#endif  // defined(OS_NACL)
-}
-
-// static
 void PlatformThread::SetCurrentThreadPriority(ThreadPriority priority) {
 #if defined(OS_NACL)
   NOTIMPLEMENTED();
diff --git a/base/threading/platform_thread_unittest.cc b/base/threading/platform_thread_unittest.cc
index 0febf8b..2d99ed8 100644
--- a/base/threading/platform_thread_unittest.cc
+++ b/base/threading/platform_thread_unittest.cc
@@ -12,6 +12,8 @@
 #include "testing/gtest/include/gtest/gtest.h"
 
 #if defined(OS_POSIX)
+#include <sys/types.h>
+#include <unistd.h>
 #include "base/threading/platform_thread_internal_posix.h"
 #elif defined(OS_WIN)
 #include <windows.h>
@@ -233,6 +235,17 @@
     ThreadPriority::NORMAL,
     ThreadPriority::BACKGROUND};
 
+bool IsBumpingPriorityAllowed() {
+#if defined(OS_POSIX)
+  // Only root can raise thread priority on POSIX environment. On Linux, users
+  // who have CAP_SYS_NICE permission also can raise the thread priority, but
+  // libcap.so would be needed to check the capability.
+  return geteuid() == 0;
+#else
+  return true;
+#endif
+}
+
 class ThreadPriorityTestThread : public FunctionTestThread {
  public:
   explicit ThreadPriorityTestThread(ThreadPriority priority)
@@ -260,9 +273,8 @@
 // Test changing a created thread's priority (which has different semantics on
 // some platforms).
 TEST(PlatformThreadTest, ThreadPriorityCurrentThread) {
-  const bool increase_priority_allowed =
-      PlatformThread::CanIncreaseCurrentThreadPriority();
-  if (increase_priority_allowed) {
+  const bool bumping_priority_allowed = IsBumpingPriorityAllowed();
+  if (bumping_priority_allowed) {
     // Bump the priority in order to verify that new threads are started with
     // normal priority.
     PlatformThread::SetCurrentThreadPriority(ThreadPriority::DISPLAY);
@@ -270,7 +282,7 @@
 
   // Toggle each supported priority on the thread and confirm it affects it.
   for (size_t i = 0; i < arraysize(kThreadPriorityTestValues); ++i) {
-    if (!increase_priority_allowed &&
+    if (!bumping_priority_allowed &&
         kThreadPriorityTestValues[i] >
             PlatformThread::GetCurrentThreadPriority()) {
       continue;
diff --git a/base/threading/post_task_and_reply_impl.cc b/base/threading/post_task_and_reply_impl.cc
index d16f8bd..c906866 100644
--- a/base/threading/post_task_and_reply_impl.cc
+++ b/base/threading/post_task_and_reply_impl.cc
@@ -4,46 +4,42 @@
 
 #include "base/threading/post_task_and_reply_impl.h"
 
-#include <utility>
-
 #include "base/bind.h"
-#include "base/debug/leak_annotations.h"
-#include "base/logging.h"
-#include "base/memory/ref_counted.h"
-#include "base/sequence_checker.h"
-#include "base/sequenced_task_runner.h"
-#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/location.h"
+#include "base/single_thread_task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
 
 namespace base {
 
 namespace {
 
-// This relay class remembers the sequence that it was created on, and ensures
-// that both the |task| and |reply| Closures are deleted on this same sequence.
-// Also, |task| is guaranteed to be deleted before |reply| is run or deleted.
+// This relay class remembers the MessageLoop that it was created on, and
+// ensures that both the |task| and |reply| Closures are deleted on this same
+// thread. Also, |task| is guaranteed to be deleted before |reply| is run or
+// deleted.
 //
-// If RunReplyAndSelfDestruct() doesn't run because the originating execution
-// context is no longer available, then the |task| and |reply| Closures are
-// leaked. Leaking is considered preferable to having a thread-safetey
-// violations caused by invoking the Closure destructor on the wrong sequence.
+// If this is not possible because the originating MessageLoop is no longer
+// available, the the |task| and |reply| Closures are leaked.  Leaking is
+// considered preferable to having a thread-safetey violations caused by
+// invoking the Closure destructor on the wrong thread.
 class PostTaskAndReplyRelay {
  public:
   PostTaskAndReplyRelay(const tracked_objects::Location& from_here,
-                        Closure task,
-                        Closure reply)
-      : sequence_checker_(),
-        from_here_(from_here),
-        origin_task_runner_(SequencedTaskRunnerHandle::Get()),
-        reply_(std::move(reply)),
-        task_(std::move(task)) {}
+                        const Closure& task,
+                        const Closure& reply)
+      : from_here_(from_here),
+        origin_task_runner_(ThreadTaskRunnerHandle::Get()) {
+    task_ = task;
+    reply_ = reply;
+  }
 
   ~PostTaskAndReplyRelay() {
-    DCHECK(sequence_checker_.CalledOnValidSequence());
+    DCHECK(origin_task_runner_->BelongsToCurrentThread());
     task_.Reset();
     reply_.Reset();
   }
 
-  void RunTaskAndPostReply() {
+  void Run() {
     task_.Run();
     origin_task_runner_->PostTask(
         from_here_, Bind(&PostTaskAndReplyRelay::RunReplyAndSelfDestruct,
@@ -52,7 +48,7 @@
 
  private:
   void RunReplyAndSelfDestruct() {
-    DCHECK(sequence_checker_.CalledOnValidSequence());
+    DCHECK(origin_task_runner_->BelongsToCurrentThread());
 
     // Force |task_| to be released before |reply_| is to ensure that no one
     // accidentally depends on |task_| keeping one of its arguments alive while
@@ -65,9 +61,8 @@
     delete this;
   }
 
-  const SequenceChecker sequence_checker_;
-  const tracked_objects::Location from_here_;
-  const scoped_refptr<SequencedTaskRunner> origin_task_runner_;
+  tracked_objects::Location from_here_;
+  scoped_refptr<SingleThreadTaskRunner> origin_task_runner_;
   Closure reply_;
   Closure task_;
 };
@@ -78,19 +73,14 @@
 
 bool PostTaskAndReplyImpl::PostTaskAndReply(
     const tracked_objects::Location& from_here,
-    Closure task,
-    Closure reply) {
-  DCHECK(!task.is_null()) << from_here.ToString();
-  DCHECK(!reply.is_null()) << from_here.ToString();
+    const Closure& task,
+    const Closure& reply) {
+  // TODO(tzik): Use DCHECK here once the crash is gone. http://crbug.com/541319
+  CHECK(!task.is_null()) << from_here.ToString();
+  CHECK(!reply.is_null()) << from_here.ToString();
   PostTaskAndReplyRelay* relay =
-      new PostTaskAndReplyRelay(from_here, std::move(task), std::move(reply));
-  // PostTaskAndReplyRelay self-destructs after executing |reply|. On the flip
-  // side though, it is intentionally leaked if the |task| doesn't complete
-  // before the origin sequence stops executing tasks. Annotate |relay| as leaky
-  // to avoid having to suppress every callsite which happens to flakily trigger
-  // this race.
-  ANNOTATE_LEAKING_OBJECT_PTR(relay);
-  if (!PostTask(from_here, Bind(&PostTaskAndReplyRelay::RunTaskAndPostReply,
+      new PostTaskAndReplyRelay(from_here, task, reply);
+  if (!PostTask(from_here, Bind(&PostTaskAndReplyRelay::Run,
                                 Unretained(relay)))) {
     delete relay;
     return false;
diff --git a/base/threading/post_task_and_reply_impl.h b/base/threading/post_task_and_reply_impl.h
index 696b668..d21ab78 100644
--- a/base/threading/post_task_and_reply_impl.h
+++ b/base/threading/post_task_and_reply_impl.h
@@ -8,29 +8,30 @@
 #ifndef BASE_THREADING_POST_TASK_AND_REPLY_IMPL_H_
 #define BASE_THREADING_POST_TASK_AND_REPLY_IMPL_H_
 
-#include "base/base_export.h"
-#include "base/callback.h"
+#include "base/callback_forward.h"
 #include "base/location.h"
 
 namespace base {
 namespace internal {
 
-// Inherit from this in a class that implements PostTask to send a task to a
-// custom execution context.
+// Inherit from this in a class that implements PostTask appropriately
+// for sending to a destination thread.
 //
-// If you're looking for a concrete implementation of PostTaskAndReply, you
-// probably want base::TaskRunner, or you may want base::WorkerPool.
-class BASE_EXPORT PostTaskAndReplyImpl {
+// Note that 'reply' will always get posted back to your current
+// MessageLoop.
+//
+// If you're looking for a concrete implementation of
+// PostTaskAndReply, you probably want base::SingleThreadTaskRunner, or you
+// may want base::WorkerPool.
+class PostTaskAndReplyImpl {
  public:
   virtual ~PostTaskAndReplyImpl() = default;
 
-  // Posts |task| by calling PostTask(). On completion, |reply| is posted to the
-  // sequence or thread that called this. Can only be called when
-  // SequencedTaskRunnerHandle::IsSet(). Both |task| and |reply| are guaranteed
-  // to be deleted on the sequence or thread that called this.
+  // Implementation for TaskRunner::PostTaskAndReply and
+  // WorkerPool::PostTaskAndReply.
   bool PostTaskAndReply(const tracked_objects::Location& from_here,
-                        Closure task,
-                        Closure reply);
+                        const Closure& task,
+                        const Closure& reply);
 
  private:
   virtual bool PostTask(const tracked_objects::Location& from_here,
diff --git a/base/threading/sequenced_task_runner_handle.cc b/base/threading/sequenced_task_runner_handle.cc
index 90f68b3..88b36a8 100644
--- a/base/threading/sequenced_task_runner_handle.cc
+++ b/base/threading/sequenced_task_runner_handle.cc
@@ -16,56 +16,39 @@
 
 namespace {
 
-LazyInstance<ThreadLocalPointer<SequencedTaskRunnerHandle>>::Leaky
+base::LazyInstance<base::ThreadLocalPointer<SequencedTaskRunnerHandle>>::Leaky
     lazy_tls_ptr = LAZY_INSTANCE_INITIALIZER;
 
 }  // namespace
 
 // static
 scoped_refptr<SequencedTaskRunner> SequencedTaskRunnerHandle::Get() {
-  // Return the registered SingleThreadTaskRunner, if any. This must be at the
-  // top so that a SingleThreadTaskRunner has priority over a
-  // SequencedTaskRunner (RLZ registers both on the same thread despite that
-  // being prevented by DCHECKs).
-  // TODO(fdoray): Move this to the bottom once RLZ stops registering a
-  // SingleThreadTaskRunner and a SequencedTaskRunner on the same thread.
-  // https://crbug.com/618530#c14
-  if (ThreadTaskRunnerHandle::IsSet()) {
-    // Various modes of setting SequencedTaskRunnerHandle don't combine.
-    DCHECK(!lazy_tls_ptr.Pointer()->Get());
-    DCHECK(!SequencedWorkerPool::GetSequenceTokenForCurrentThread().IsValid());
-
-    return ThreadTaskRunnerHandle::Get();
-  }
-
   // Return the registered SequencedTaskRunner, if any.
   const SequencedTaskRunnerHandle* handle = lazy_tls_ptr.Pointer()->Get();
   if (handle) {
     // Various modes of setting SequencedTaskRunnerHandle don't combine.
-    DCHECK(!SequencedWorkerPool::GetSequenceTokenForCurrentThread().IsValid());
-
+    DCHECK(!base::ThreadTaskRunnerHandle::IsSet());
+    DCHECK(!SequencedWorkerPool::GetSequencedTaskRunnerForCurrentThread());
     return handle->task_runner_;
   }
 
-  // If we are on a worker thread for a SequencedBlockingPool that is running a
-  // sequenced task, return a SequencedTaskRunner for it.
-  scoped_refptr<SequencedWorkerPool> pool =
-      SequencedWorkerPool::GetWorkerPoolForCurrentThread();
-  DCHECK(pool);
-  SequencedWorkerPool::SequenceToken sequence_token =
-      SequencedWorkerPool::GetSequenceTokenForCurrentThread();
-  DCHECK(sequence_token.IsValid());
-  scoped_refptr<SequencedTaskRunner> sequenced_task_runner(
-      pool->GetSequencedTaskRunner(sequence_token));
-  DCHECK(sequenced_task_runner->RunsTasksOnCurrentThread());
-  return sequenced_task_runner;
+  // Return the SequencedTaskRunner obtained from SequencedWorkerPool, if any.
+  scoped_refptr<base::SequencedTaskRunner> task_runner =
+      SequencedWorkerPool::GetSequencedTaskRunnerForCurrentThread();
+  if (task_runner) {
+    DCHECK(!base::ThreadTaskRunnerHandle::IsSet());
+    return task_runner;
+  }
+
+  // Return the SingleThreadTaskRunner for the current thread otherwise.
+  return base::ThreadTaskRunnerHandle::Get();
 }
 
 // static
 bool SequencedTaskRunnerHandle::IsSet() {
   return lazy_tls_ptr.Pointer()->Get() ||
-         SequencedWorkerPool::GetSequenceTokenForCurrentThread().IsValid() ||
-         ThreadTaskRunnerHandle::IsSet();
+         SequencedWorkerPool::GetWorkerPoolForCurrentThread() ||
+         base::ThreadTaskRunnerHandle::IsSet();
 }
 
 SequencedTaskRunnerHandle::SequencedTaskRunnerHandle(
diff --git a/base/threading/sequenced_task_runner_handle.h b/base/threading/sequenced_task_runner_handle.h
index b7f4bae..e6dec1e 100644
--- a/base/threading/sequenced_task_runner_handle.h
+++ b/base/threading/sequenced_task_runner_handle.h
@@ -25,9 +25,8 @@
   //    instantiating a SequencedTaskRunnerHandle.
   // b) The current thread has a ThreadTaskRunnerHandle (which includes any
   //    thread that has a MessageLoop associated with it), or
-  // c) The current thread is a worker thread belonging to a SequencedWorkerPool
-  //    *and* is currently running a sequenced task (note: not supporting
-  //    unsequenced tasks is intentional: https://crbug.com/618043#c4).
+  // c) The current thread is a worker thread belonging to a
+  //    SequencedWorkerPool.
   static bool IsSet();
 
   // Binds |task_runner| to the current thread.
diff --git a/base/threading/sequenced_worker_pool.cc b/base/threading/sequenced_worker_pool.cc
index ce594cd..57961b5 100644
--- a/base/threading/sequenced_worker_pool.cc
+++ b/base/threading/sequenced_worker_pool.cc
@@ -10,7 +10,6 @@
 #include <map>
 #include <memory>
 #include <set>
-#include <unordered_map>
 #include <utility>
 #include <vector>
 
@@ -18,7 +17,6 @@
 #include "base/callback.h"
 #include "base/compiler_specific.h"
 #include "base/critical_closure.h"
-#include "base/debug/dump_without_crashing.h"
 #include "base/lazy_instance.h"
 #include "base/logging.h"
 #include "base/macros.h"
@@ -27,21 +25,15 @@
 #include "base/strings/stringprintf.h"
 #include "base/synchronization/condition_variable.h"
 #include "base/synchronization/lock.h"
-// Don't enable the redirect to TaskScheduler on Arc++ to avoid pulling a bunch
-// of dependencies. Some code also #ifdef'ed below.
-#if 0
-#include "base/task_scheduler/post_task.h"
-#include "base/task_scheduler/task_scheduler.h"
-#endif
 #include "base/threading/platform_thread.h"
-#include "base/threading/sequenced_task_runner_handle.h"
 #include "base/threading/simple_thread.h"
 #include "base/threading/thread_local.h"
 #include "base/threading/thread_restrictions.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "base/time/time.h"
+#include "base/trace_event/heap_profiler.h"
 #include "base/trace_event/trace_event.h"
 #include "base/tracked_objects.h"
-#include "base/tracking_info.h"
 #include "build/build_config.h"
 
 #if defined(OS_MACOSX)
@@ -51,36 +43,13 @@
 #endif
 
 #if !defined(OS_NACL)
-#include "base/metrics/histogram_macros.h"
+#include "base/metrics/histogram.h"
 #endif
 
 namespace base {
 
 namespace {
 
-// An enum representing the state of all pools. A non-test process should only
-// ever transition from POST_TASK_DISABLED to one of the active states. A test
-// process may transition from one of the active states to POST_TASK_DISABLED
-// when DisableForProcessForTesting() is called.
-//
-// External memory synchronization is required to call a method that reads
-// |g_all_pools_state| after calling a method that modifies it.
-//
-// TODO(gab): Remove this if http://crbug.com/622400 fails (SequencedWorkerPool
-// will be phased out completely otherwise).
-enum class AllPoolsState {
-  POST_TASK_DISABLED,
-  USE_WORKER_POOL,
-  REDIRECTED_TO_TASK_SCHEDULER,
-};
-
-// TODO(fdoray): Change the initial state to POST_TASK_DISABLED. It is initially
-// USE_WORKER_POOL to avoid a revert of the CL that adds
-// debug::DumpWithoutCrashing() in case of waterfall failures.
-AllPoolsState g_all_pools_state = AllPoolsState::USE_WORKER_POOL;
-
-TaskPriority g_max_task_priority = TaskPriority::HIGHEST;
-
 struct SequencedTask : public TrackingInfo  {
   SequencedTask()
       : sequence_token_id(0),
@@ -123,14 +92,6 @@
   }
 };
 
-// Create a process-wide unique ID to represent this task in trace events. This
-// will be mangled with a Process ID hash to reduce the likelyhood of colliding
-// with MessageLoop pointers on other processes.
-uint64_t GetTaskTraceID(const SequencedTask& task, void* pool) {
-  return (static_cast<uint64_t>(task.trace_id) << 32) |
-         static_cast<uint64_t>(reinterpret_cast<intptr_t>(pool));
-}
-
 // SequencedWorkerPoolTaskRunner ---------------------------------------------
 // A TaskRunner which posts tasks to a SequencedWorkerPool with a
 // fixed ShutdownBehavior.
@@ -181,17 +142,14 @@
   return pool_->RunsTasksOnCurrentThread();
 }
 
-}  // namespace
-
-// SequencedWorkerPool::PoolSequencedTaskRunner ------------------------------
+// SequencedWorkerPoolSequencedTaskRunner ------------------------------------
 // A SequencedTaskRunner which posts tasks to a SequencedWorkerPool with a
 // fixed sequence token.
 //
 // Note that this class is RefCountedThreadSafe (inherited from TaskRunner).
-class SequencedWorkerPool::PoolSequencedTaskRunner
-    : public SequencedTaskRunner {
+class SequencedWorkerPoolSequencedTaskRunner : public SequencedTaskRunner {
  public:
-  PoolSequencedTaskRunner(
+  SequencedWorkerPoolSequencedTaskRunner(
       scoped_refptr<SequencedWorkerPool> pool,
       SequencedWorkerPool::SequenceToken token,
       SequencedWorkerPool::WorkerShutdown shutdown_behavior);
@@ -208,7 +166,7 @@
                                   TimeDelta delay) override;
 
  private:
-  ~PoolSequencedTaskRunner() override;
+  ~SequencedWorkerPoolSequencedTaskRunner() override;
 
   const scoped_refptr<SequencedWorkerPool> pool_;
 
@@ -216,25 +174,25 @@
 
   const SequencedWorkerPool::WorkerShutdown shutdown_behavior_;
 
-  DISALLOW_COPY_AND_ASSIGN(PoolSequencedTaskRunner);
+  DISALLOW_COPY_AND_ASSIGN(SequencedWorkerPoolSequencedTaskRunner);
 };
 
-SequencedWorkerPool::PoolSequencedTaskRunner::
-    PoolSequencedTaskRunner(
-        scoped_refptr<SequencedWorkerPool> pool,
-        SequencedWorkerPool::SequenceToken token,
-        SequencedWorkerPool::WorkerShutdown shutdown_behavior)
+SequencedWorkerPoolSequencedTaskRunner::SequencedWorkerPoolSequencedTaskRunner(
+    scoped_refptr<SequencedWorkerPool> pool,
+    SequencedWorkerPool::SequenceToken token,
+    SequencedWorkerPool::WorkerShutdown shutdown_behavior)
     : pool_(std::move(pool)),
       token_(token),
       shutdown_behavior_(shutdown_behavior) {}
 
-SequencedWorkerPool::PoolSequencedTaskRunner::
-    ~PoolSequencedTaskRunner() = default;
+SequencedWorkerPoolSequencedTaskRunner::
+~SequencedWorkerPoolSequencedTaskRunner() {
+}
 
-bool SequencedWorkerPool::PoolSequencedTaskRunner::
-    PostDelayedTask(const tracked_objects::Location& from_here,
-                    const Closure& task,
-                    TimeDelta delay) {
+bool SequencedWorkerPoolSequencedTaskRunner::PostDelayedTask(
+    const tracked_objects::Location& from_here,
+    const Closure& task,
+    TimeDelta delay) {
   if (delay.is_zero()) {
     return pool_->PostSequencedWorkerTaskWithShutdownBehavior(
         token_, from_here, task, shutdown_behavior_);
@@ -242,20 +200,29 @@
   return pool_->PostDelayedSequencedWorkerTask(token_, from_here, task, delay);
 }
 
-bool SequencedWorkerPool::PoolSequencedTaskRunner::
-    RunsTasksOnCurrentThread() const {
+bool SequencedWorkerPoolSequencedTaskRunner::RunsTasksOnCurrentThread() const {
   return pool_->IsRunningSequenceOnCurrentThread(token_);
 }
 
-bool SequencedWorkerPool::PoolSequencedTaskRunner::
-    PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
-                               const Closure& task,
-                               TimeDelta delay) {
+bool SequencedWorkerPoolSequencedTaskRunner::PostNonNestableDelayedTask(
+    const tracked_objects::Location& from_here,
+    const Closure& task,
+    TimeDelta delay) {
   // There's no way to run nested tasks, so simply forward to
   // PostDelayedTask.
   return PostDelayedTask(from_here, task, delay);
 }
 
+// Create a process-wide unique ID to represent this task in trace events. This
+// will be mangled with a Process ID hash to reduce the likelyhood of colliding
+// with MessageLoop pointers on other processes.
+uint64_t GetTaskTraceID(const SequencedTask& task, void* pool) {
+  return (static_cast<uint64_t>(task.trace_id) << 32) |
+         static_cast<uint64_t>(reinterpret_cast<intptr_t>(pool));
+}
+
+}  // namespace
+
 // Worker ---------------------------------------------------------------------
 
 class SequencedWorkerPool::Worker : public SimpleThread {
@@ -280,14 +247,6 @@
     is_processing_task_ = true;
     task_sequence_token_ = token;
     task_shutdown_behavior_ = shutdown_behavior;
-
-    // It is dangerous for tasks with CONTINUE_ON_SHUTDOWN to access a class
-    // that implements a non-leaky base::Singleton because they are generally
-    // destroyed before the process terminates via an AtExitManager
-    // registration. This will trigger a DCHECK to warn of such cases. See the
-    // comment about CONTINUE_ON_SHUTDOWN for more details.
-    ThreadRestrictions::SetSingletonAllowed(task_shutdown_behavior_ !=
-                                            CONTINUE_ON_SHUTDOWN);
   }
 
   // Indicates that the task has finished running.
@@ -333,10 +292,8 @@
  public:
   // Take a raw pointer to |worker| to avoid cycles (since we're owned
   // by it).
-  Inner(SequencedWorkerPool* worker_pool,
-        size_t max_threads,
+  Inner(SequencedWorkerPool* worker_pool, size_t max_threads,
         const std::string& thread_name_prefix,
-        base::TaskPriority task_priority,
         TestingObserver* observer);
 
   ~Inner();
@@ -359,6 +316,11 @@
 
   bool IsRunningSequenceOnCurrentThread(SequenceToken sequence_token) const;
 
+  bool IsRunningSequence(SequenceToken sequence_token) const;
+
+  void SetRunningTaskInfoForCurrentThread(SequenceToken sequence_token,
+                                          WorkerShutdown shutdown_behavior);
+
   void CleanupForTesting();
 
   void SignalHasWorkForTesting();
@@ -387,25 +349,6 @@
     CLEANUP_DONE,
   };
 
-  // Clears ScheduledTasks in |tasks_to_delete| while ensuring that
-  // |this_worker| has the desired task info context during ~ScheduledTask() to
-  // allow sequence-checking.
-  void DeleteWithoutLock(std::vector<SequencedTask>* tasks_to_delete,
-                         Worker* this_worker);
-
-  // Helper used by PostTask() to complete the work when redirection is on.
-  // Returns true if the task may run at some point in the future and false if
-  // it will definitely not run.
-  // Coalesce upon resolution of http://crbug.com/622400.
-  bool PostTaskToTaskScheduler(const SequencedTask& sequenced,
-                               const TimeDelta& delay);
-
-  // Returns the TaskScheduler TaskRunner for the specified |sequence_token_id|
-  // and |traits|.
-  scoped_refptr<TaskRunner> GetTaskSchedulerTaskRunner(
-      int sequence_token_id,
-      const TaskTraits& traits);
-
   // Called from within the lock, this converts the given token name into a
   // token ID, creating a new one if necessary.
   int LockedGetNamedTokenID(const std::string& name);
@@ -430,7 +373,7 @@
   // See the implementation for a more detailed description.
   GetWorkStatus GetWork(SequencedTask* task,
                         TimeDelta* wait_time,
-                        std::vector<SequencedTask>* delete_these_outside_lock);
+                        std::vector<Closure>* delete_these_outside_lock);
 
   void HandleCleanup();
 
@@ -454,7 +397,7 @@
   // 0 or more. The caller should then call FinishStartingAdditionalThread to
   // complete initialization once the lock is released.
   //
-  // If another thread is not necessary, return 0;
+  // If another thread is not necessary, returne 0;
   //
   // See the implementedion for more.
   int PrepareToStartAdditionalThreadIfHelpful();
@@ -554,27 +497,6 @@
 
   TestingObserver* const testing_observer_;
 
-  // Members below are used for the experimental redirection to TaskScheduler.
-  // TODO(gab): Remove these if http://crbug.com/622400 fails
-  // (SequencedWorkerPool will be phased out completely otherwise).
-
-  // The TaskPriority to be used for SequencedWorkerPool tasks redirected to the
-  // TaskScheduler as an experiment (unused otherwise).
-  const base::TaskPriority task_priority_;
-
-  // A map of SequenceToken IDs to TaskScheduler TaskRunners used to redirect
-  // sequenced tasks to the TaskScheduler.
-  std::unordered_map<int, scoped_refptr<TaskRunner>> sequenced_task_runner_map_;
-
-  // TaskScheduler TaskRunners to redirect unsequenced tasks to the
-  // TaskScheduler. Indexed by TaskShutdownBehavior.
-  scoped_refptr<TaskRunner> unsequenced_task_runners_[3];
-
-  // A dummy TaskRunner obtained from TaskScheduler with the same TaskTraits as
-  // used by this SequencedWorkerPool to query for RunsTasksOnCurrentThread().
-  // Mutable so it can be lazily instantiated from RunsTasksOnCurrentThread().
-  mutable scoped_refptr<TaskRunner> runs_tasks_on_verifier_;
-
   DISALLOW_COPY_AND_ASSIGN(Inner);
 };
 
@@ -588,7 +510,6 @@
       worker_pool_(std::move(worker_pool)),
       task_shutdown_behavior_(BLOCK_SHUTDOWN),
       is_processing_task_(false) {
-  DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
   Start();
 }
 
@@ -596,8 +517,6 @@
 }
 
 void SequencedWorkerPool::Worker::Run() {
-  DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
-
 #if defined(OS_WIN)
   win::ScopedCOMInitializer com_initializer;
 #endif
@@ -633,11 +552,11 @@
 
 // Inner definitions ---------------------------------------------------------
 
-SequencedWorkerPool::Inner::Inner(SequencedWorkerPool* worker_pool,
-                                  size_t max_threads,
-                                  const std::string& thread_name_prefix,
-                                  base::TaskPriority task_priority,
-                                  TestingObserver* observer)
+SequencedWorkerPool::Inner::Inner(
+    SequencedWorkerPool* worker_pool,
+    size_t max_threads,
+    const std::string& thread_name_prefix,
+    TestingObserver* observer)
     : worker_pool_(worker_pool),
       lock_(),
       has_work_cv_(&lock_),
@@ -655,13 +574,7 @@
       cleanup_state_(CLEANUP_DONE),
       cleanup_idlers_(0),
       cleanup_cv_(&lock_),
-      testing_observer_(observer),
-      task_priority_(static_cast<int>(task_priority) <=
-                             static_cast<int>(g_max_task_priority)
-                         ? task_priority
-                         : g_max_task_priority) {
-  DCHECK_GT(max_threads_, 1U);
-}
+      testing_observer_(observer) {}
 
 SequencedWorkerPool::Inner::~Inner() {
   // You must call Shutdown() before destroying the pool.
@@ -698,13 +611,6 @@
     const tracked_objects::Location& from_here,
     const Closure& task,
     TimeDelta delay) {
-  // TODO(fdoray): Uncomment this DCHECK. It is initially commented to avoid a
-  // revert of the CL that adds debug::DumpWithoutCrashing() if it fails on the
-  // waterfall. https://crbug.com/622400
-  // DCHECK_NE(AllPoolsState::POST_TASK_DISABLED, g_all_pools_state);
-  if (g_all_pools_state == AllPoolsState::POST_TASK_DISABLED)
-    debug::DumpWithoutCrashing();
-
   DCHECK(delay.is_zero() || shutdown_behavior == SKIP_ON_SHUTDOWN);
   SequencedTask sequenced(from_here);
   sequenced.sequence_token_id = sequence_token.id_;
@@ -718,7 +624,6 @@
   int create_thread_id = 0;
   {
     AutoLock lock(lock_);
-
     if (shutdown_called_) {
       // Don't allow a new task to be posted if it doesn't block shutdown.
       if (shutdown_behavior != BLOCK_SHUTDOWN)
@@ -754,178 +659,64 @@
     if (optional_token_name)
       sequenced.sequence_token_id = LockedGetNamedTokenID(*optional_token_name);
 
-    // See on top of the file why we don't compile this on Arc++.
-#if 0
-    if (g_all_pools_state == AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
-      if (!PostTaskToTaskScheduler(sequenced, delay))
-        return false;
-    } else {
-#endif
-      pending_tasks_.insert(sequenced);
+    pending_tasks_.insert(sequenced);
+    if (shutdown_behavior == BLOCK_SHUTDOWN)
+      blocking_shutdown_pending_task_count_++;
 
-      if (sequenced.shutdown_behavior == BLOCK_SHUTDOWN)
-        blocking_shutdown_pending_task_count_++;
-
-      create_thread_id = PrepareToStartAdditionalThreadIfHelpful();
-    }
-#if 0
-  }
-#endif
-
-  // Use != REDIRECTED_TO_TASK_SCHEDULER instead of == USE_WORKER_POOL to ensure
-  // correct behavior if a task is posted to a SequencedWorkerPool before
-  // Enable(WithRedirectionToTaskScheduler)ForProcess() in a non-DCHECK build.
-  if (g_all_pools_state != AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
-    // Actually start the additional thread or signal an existing one outside
-    // the lock.
-    if (create_thread_id)
-      FinishStartingAdditionalThread(create_thread_id);
-    else
-      SignalHasWork();
+    create_thread_id = PrepareToStartAdditionalThreadIfHelpful();
   }
 
-#if DCHECK_IS_ON()
-  {
-    AutoLock lock_for_dcheck(lock_);
-    // Some variables are exposed in both modes for convenience but only really
-    // intended for one of them at runtime, confirm exclusive usage here.
-    if (g_all_pools_state == AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
-      DCHECK(pending_tasks_.empty());
-      DCHECK_EQ(0, create_thread_id);
-    } else {
-      DCHECK(sequenced_task_runner_map_.empty());
-    }
-  }
-#endif  // DCHECK_IS_ON()
+  // Actually start the additional thread or signal an existing one now that
+  // we're outside the lock.
+  if (create_thread_id)
+    FinishStartingAdditionalThread(create_thread_id);
+  else
+    SignalHasWork();
 
   return true;
 }
 
-bool SequencedWorkerPool::Inner::PostTaskToTaskScheduler(
-    const SequencedTask& sequenced,
-    const TimeDelta& delay) {
-#if 1
-  NOTREACHED();
-  ALLOW_UNUSED_PARAM(sequenced);
-  ALLOW_UNUSED_PARAM(delay);
-  return false;
-#else
-  DCHECK_EQ(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, g_all_pools_state);
-
-  lock_.AssertAcquired();
-
-  // Confirm that the TaskScheduler's shutdown behaviors use the same
-  // underlying values as SequencedWorkerPool.
-  static_assert(
-      static_cast<int>(TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN) ==
-          static_cast<int>(CONTINUE_ON_SHUTDOWN),
-      "TaskShutdownBehavior and WorkerShutdown enum mismatch for "
-      "CONTINUE_ON_SHUTDOWN.");
-  static_assert(static_cast<int>(TaskShutdownBehavior::SKIP_ON_SHUTDOWN) ==
-                    static_cast<int>(SKIP_ON_SHUTDOWN),
-                "TaskShutdownBehavior and WorkerShutdown enum mismatch for "
-                "SKIP_ON_SHUTDOWN.");
-  static_assert(static_cast<int>(TaskShutdownBehavior::BLOCK_SHUTDOWN) ==
-                    static_cast<int>(BLOCK_SHUTDOWN),
-                "TaskShutdownBehavior and WorkerShutdown enum mismatch for "
-                "BLOCK_SHUTDOWN.");
-
-  const TaskShutdownBehavior task_shutdown_behavior =
-      static_cast<TaskShutdownBehavior>(sequenced.shutdown_behavior);
-  const TaskTraits traits = TaskTraits()
-                                .MayBlock()
-                                .WithBaseSyncPrimitives()
-                                .WithPriority(task_priority_)
-                                .WithShutdownBehavior(task_shutdown_behavior);
-  return GetTaskSchedulerTaskRunner(sequenced.sequence_token_id, traits)
-      ->PostDelayedTask(sequenced.posted_from, sequenced.task, delay);
-#endif
-}
-
-scoped_refptr<TaskRunner>
-SequencedWorkerPool::Inner::GetTaskSchedulerTaskRunner(
-    int sequence_token_id,
-    const TaskTraits& traits) {
-#if 1
-  NOTREACHED();
-  ALLOW_UNUSED_PARAM(sequence_token_id);
-  ALLOW_UNUSED_PARAM(traits);
-  return scoped_refptr<TaskRunner>();
-#else
-  DCHECK_EQ(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, g_all_pools_state);
-
-  lock_.AssertAcquired();
-
-  static_assert(
-      static_cast<int>(TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN) == 0,
-      "TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN must be equal to 0 to be "
-      "used as an index in |unsequenced_task_runners_|.");
-  static_assert(static_cast<int>(TaskShutdownBehavior::SKIP_ON_SHUTDOWN) == 1,
-                "TaskShutdownBehavior::SKIP_ON_SHUTDOWN must be equal to 1 to "
-                "be used as an index in |unsequenced_task_runners_|.");
-  static_assert(static_cast<int>(TaskShutdownBehavior::BLOCK_SHUTDOWN) == 2,
-                "TaskShutdownBehavior::BLOCK_SHUTDOWN must be equal to 2 to be "
-                "used as an index in |unsequenced_task_runners_|.");
-  static_assert(arraysize(unsequenced_task_runners_) == 3,
-                "The size of |unsequenced_task_runners_| doesn't match the "
-                "number of shutdown behaviors.");
-
-  scoped_refptr<TaskRunner>& task_runner =
-      sequence_token_id ? sequenced_task_runner_map_[sequence_token_id]
-                        : unsequenced_task_runners_[static_cast<int>(
-                              traits.shutdown_behavior())];
-
-  // TODO(fdoray): DCHECK that all tasks posted to the same sequence have the
-  // same shutdown behavior.
-
-  if (!task_runner) {
-    task_runner = sequence_token_id
-                      ? CreateSequencedTaskRunnerWithTraits(traits)
-                      : CreateTaskRunnerWithTraits(traits);
-  }
-
-  return task_runner;
-#endif
-}
-
 bool SequencedWorkerPool::Inner::RunsTasksOnCurrentThread() const {
   AutoLock lock(lock_);
-  if (g_all_pools_state == AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
-#if 0
-    if (!runs_tasks_on_verifier_) {
-      runs_tasks_on_verifier_ = CreateTaskRunnerWithTraits(
-          TaskTraits().MayBlock().WithBaseSyncPrimitives().WithPriority(
-              task_priority_));
-    }
-#endif
-    return runs_tasks_on_verifier_->RunsTasksOnCurrentThread();
-  } else {
-    return ContainsKey(threads_, PlatformThread::CurrentId());
-  }
+  return ContainsKey(threads_, PlatformThread::CurrentId());
 }
 
 bool SequencedWorkerPool::Inner::IsRunningSequenceOnCurrentThread(
     SequenceToken sequence_token) const {
-  DCHECK(sequence_token.IsValid());
-
   AutoLock lock(lock_);
+  ThreadMap::const_iterator found = threads_.find(PlatformThread::CurrentId());
+  if (found == threads_.end())
+    return false;
+  return found->second->is_processing_task() &&
+         sequence_token.Equals(found->second->task_sequence_token());
+}
 
-  if (g_all_pools_state == AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
-    const auto sequenced_task_runner_it =
-        sequenced_task_runner_map_.find(sequence_token.id_);
-    return sequenced_task_runner_it != sequenced_task_runner_map_.end() &&
-           sequenced_task_runner_it->second->RunsTasksOnCurrentThread();
-  } else {
-    ThreadMap::const_iterator found =
-        threads_.find(PlatformThread::CurrentId());
-    return found != threads_.end() && found->second->is_processing_task() &&
-           sequence_token.Equals(found->second->task_sequence_token());
-  }
+bool SequencedWorkerPool::Inner::IsRunningSequence(
+    SequenceToken sequence_token) const {
+  DCHECK(sequence_token.IsValid());
+  AutoLock lock(lock_);
+  return !IsSequenceTokenRunnable(sequence_token.id_);
+}
+
+void SequencedWorkerPool::Inner::SetRunningTaskInfoForCurrentThread(
+    SequenceToken sequence_token,
+    WorkerShutdown shutdown_behavior) {
+  AutoLock lock(lock_);
+  ThreadMap::const_iterator found = threads_.find(PlatformThread::CurrentId());
+  DCHECK(found != threads_.end());
+  DCHECK(found->second->is_processing_task());
+  DCHECK(!found->second->task_sequence_token().IsValid());
+  found->second->set_running_task_info(sequence_token, shutdown_behavior);
+
+  // Mark the sequence token as in use.
+  bool success = current_sequences_.insert(sequence_token.id_).second;
+  DCHECK(success);
 }
 
 // See https://code.google.com/p/chromium/issues/detail?id=168415
 void SequencedWorkerPool::Inner::CleanupForTesting() {
-  DCHECK_NE(g_all_pools_state, AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER);
+  DCHECK(!RunsTasksOnCurrentThread());
+  base::ThreadRestrictions::ScopedAllowWait allow_wait;
   AutoLock lock(lock_);
   CHECK_EQ(CLEANUP_DONE, cleanup_state_);
   if (shutdown_called_)
@@ -953,12 +744,8 @@
     if (shutdown_called_)
       return;
     shutdown_called_ = true;
-
     max_blocking_tasks_after_shutdown_ = max_new_blocking_tasks_after_shutdown;
 
-    if (g_all_pools_state != AllPoolsState::USE_WORKER_POOL)
-      return;
-
     // Tickle the threads. This will wake up a waiting one so it will know that
     // it can exit, which in turn will wake up any other waiting ones.
     SignalHasWork();
@@ -996,7 +783,6 @@
 }
 
 void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
-  DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
   {
     AutoLock lock(lock_);
     DCHECK(thread_being_created_);
@@ -1015,15 +801,18 @@
       // See GetWork for what delete_these_outside_lock is doing.
       SequencedTask task;
       TimeDelta wait_time;
-      std::vector<SequencedTask> delete_these_outside_lock;
+      std::vector<Closure> delete_these_outside_lock;
       GetWorkStatus status =
           GetWork(&task, &wait_time, &delete_these_outside_lock);
       if (status == GET_WORK_FOUND) {
-        TRACE_TASK_EXECUTION("SequencedWorkerPool::Inner::ThreadLoop", task);
-        TRACE_EVENT_WITH_FLOW0(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
-            "SequencedWorkerPool::Inner::PostTask",
+        TRACE_EVENT_WITH_FLOW2(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
+            "SequencedWorkerPool::Inner::ThreadLoop",
             TRACE_ID_MANGLE(GetTaskTraceID(task, static_cast<void*>(this))),
-            TRACE_EVENT_FLAG_FLOW_IN);
+            TRACE_EVENT_FLAG_FLOW_IN,
+            "src_file", task.posted_from.file_name(),
+            "src_func", task.posted_from.function_name());
+        TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION task_event(
+            task.posted_from.file_name());
         int new_thread_id = WillRunWorkerTask(task);
         {
           AutoUnlock unlock(lock_);
@@ -1032,7 +821,7 @@
           // already get a signal for each new task, but it doesn't
           // hurt.)
           SignalHasWork();
-          DeleteWithoutLock(&delete_these_outside_lock, this_worker);
+          delete_these_outside_lock.clear();
 
           // Complete thread creation outside the lock if necessary.
           if (new_thread_id)
@@ -1049,6 +838,11 @@
           tracked_objects::ThreadData::TallyRunOnNamedThreadIfTracking(
               task, stopwatch);
 
+          // Update the sequence token in case it has been set from within the
+          // task, so it can be removed from the set of currently running
+          // sequences in DidRunWorkerTask() below.
+          task.sequence_token_id = this_worker->task_sequence_token().id_;
+
           // Make sure our task is erased outside the lock for the
           // same reason we do this with delete_these_oustide_lock.
           // Also, do it before calling reset_running_task_info() so
@@ -1063,7 +857,7 @@
         switch (status) {
           case GET_WORK_WAIT: {
               AutoUnlock unlock(lock_);
-              DeleteWithoutLock(&delete_these_outside_lock, this_worker);
+              delete_these_outside_lock.clear();
             }
             break;
           case GET_WORK_NOT_FOUND:
@@ -1085,7 +879,7 @@
         // help this case.
         if (shutdown_called_ && blocking_shutdown_pending_task_count_ == 0) {
           AutoUnlock unlock(lock_);
-          DeleteWithoutLock(&delete_these_outside_lock, this_worker);
+          delete_these_outside_lock.clear();
           break;
         }
 
@@ -1093,7 +887,7 @@
         // deletion must happen outside of the lock.
         if (delete_these_outside_lock.size()) {
           AutoUnlock unlock(lock_);
-          DeleteWithoutLock(&delete_these_outside_lock, this_worker);
+          delete_these_outside_lock.clear();
 
           // Since the lock has been released, |status| may no longer be
           // accurate. It might read GET_WORK_WAIT even if there are tasks
@@ -1116,9 +910,6 @@
         }
         waiting_thread_count_--;
       }
-      // |delete_these_outside_lock| should have been cleared via
-      // DeleteWithoutLock() above already.
-      DCHECK(delete_these_outside_lock.empty());
     }
   }  // Release lock_.
 
@@ -1130,22 +921,7 @@
   can_shutdown_cv_.Signal();
 }
 
-void SequencedWorkerPool::Inner::DeleteWithoutLock(
-    std::vector<SequencedTask>* tasks_to_delete,
-    Worker* this_worker) {
-  while (!tasks_to_delete->empty()) {
-    const SequencedTask& deleted_task = tasks_to_delete->back();
-    this_worker->set_running_task_info(
-        SequenceToken(deleted_task.sequence_token_id),
-        deleted_task.shutdown_behavior);
-    tasks_to_delete->pop_back();
-  }
-  this_worker->reset_running_task_info();
-}
-
 void SequencedWorkerPool::Inner::HandleCleanup() {
-  DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
-
   lock_.AssertAcquired();
   if (cleanup_state_ == CLEANUP_DONE)
     return;
@@ -1210,9 +986,7 @@
 SequencedWorkerPool::Inner::GetWorkStatus SequencedWorkerPool::Inner::GetWork(
     SequencedTask* task,
     TimeDelta* wait_time,
-    std::vector<SequencedTask>* delete_these_outside_lock) {
-  DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
-
+    std::vector<Closure>* delete_these_outside_lock) {
   lock_.AssertAcquired();
 
   // Find the next task with a sequence token that's not currently in use.
@@ -1256,17 +1030,18 @@
       // shutdown. Delete it and get more work.
       //
       // Note that we do not want to delete unrunnable tasks. Deleting a task
-      // can have side effects (like freeing some objects) and deleting a task
-      // that's supposed to run after one that's currently running could cause
-      // an obscure crash.
+      // can have side effects (like freeing some objects) and deleting a
+      // task that's supposed to run after one that's currently running could
+      // cause an obscure crash.
       //
       // We really want to delete these tasks outside the lock in case the
-      // closures are holding refs to objects that want to post work from their
-      // destructors (which would deadlock). The closures are internally
-      // refcounted, so we just need to keep a copy of them alive until the lock
-      // is exited. The calling code can just clear() the vector they passed to
-      // us once the lock is exited to make this happen.
-      delete_these_outside_lock->push_back(*i);
+      // closures are holding refs to objects that want to post work from
+      // their destructorss (which would deadlock). The closures are
+      // internally refcounted, so we just need to keep a copy of them alive
+      // until the lock is exited. The calling code can just clear() the
+      // vector they passed to us once the lock is exited to make this
+      // happen.
+      delete_these_outside_lock->push_back(i->task);
       pending_tasks_.erase(i++);
       continue;
     }
@@ -1277,7 +1052,7 @@
       status = GET_WORK_WAIT;
       if (cleanup_state_ == CLEANUP_RUNNING) {
         // Deferred tasks are deleted when cleaning up, see Inner::ThreadLoop.
-        delete_these_outside_lock->push_back(*i);
+        delete_these_outside_lock->push_back(i->task);
         pending_tasks_.erase(i);
       }
       break;
@@ -1298,8 +1073,6 @@
 }
 
 int SequencedWorkerPool::Inner::WillRunWorkerTask(const SequencedTask& task) {
-  DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
-
   lock_.AssertAcquired();
 
   // Mark the task's sequence number as in use.
@@ -1331,8 +1104,6 @@
 }
 
 void SequencedWorkerPool::Inner::DidRunWorkerTask(const SequencedTask& task) {
-  DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
-
   lock_.AssertAcquired();
 
   if (task.shutdown_behavior != CONTINUE_ON_SHUTDOWN) {
@@ -1346,8 +1117,6 @@
 
 bool SequencedWorkerPool::Inner::IsSequenceTokenRunnable(
     int sequence_token_id) const {
-  DCHECK_NE(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, g_all_pools_state);
-
   lock_.AssertAcquired();
   return !sequence_token_id ||
       current_sequences_.find(sequence_token_id) ==
@@ -1355,8 +1124,6 @@
 }
 
 int SequencedWorkerPool::Inner::PrepareToStartAdditionalThreadIfHelpful() {
-  DCHECK_NE(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, g_all_pools_state);
-
   lock_.AssertAcquired();
   // How thread creation works:
   //
@@ -1407,8 +1174,6 @@
 
 void SequencedWorkerPool::Inner::FinishStartingAdditionalThread(
     int thread_number) {
-  DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
-
   // Called outside of the lock.
   DCHECK_GT(thread_number, 0);
 
@@ -1418,8 +1183,6 @@
 }
 
 void SequencedWorkerPool::Inner::SignalHasWork() {
-  DCHECK_NE(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, g_all_pools_state);
-
   has_work_cv_.Signal();
   if (testing_observer_) {
     testing_observer_->OnHasWork();
@@ -1427,7 +1190,6 @@
 }
 
 bool SequencedWorkerPool::Inner::CanShutdown() const {
-  DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
   lock_.AssertAcquired();
   // See PrepareToStartAdditionalThreadIfHelpful for how thread creation works.
   return !thread_being_created_ &&
@@ -1465,61 +1227,44 @@
 }
 
 // static
-void SequencedWorkerPool::EnableForProcess() {
-  // TODO(fdoray): Uncomment this line. It is initially commented to avoid a
-  // revert of the CL that adds debug::DumpWithoutCrashing() in case of
-  // waterfall failures.
-  // DCHECK_EQ(AllPoolsState::POST_TASK_DISABLED, g_all_pools_state);
-  g_all_pools_state = AllPoolsState::USE_WORKER_POOL;
+scoped_refptr<SequencedTaskRunner>
+SequencedWorkerPool::GetSequencedTaskRunnerForCurrentThread() {
+  Worker* worker = Worker::GetForCurrentThread();
+
+  // If there is no worker, this thread is not a worker thread. Otherwise, it is
+  // currently running a task (sequenced or unsequenced).
+  if (!worker)
+    return nullptr;
+
+  scoped_refptr<SequencedWorkerPool> pool = worker->worker_pool();
+  SequenceToken sequence_token = worker->task_sequence_token();
+  WorkerShutdown shutdown_behavior = worker->task_shutdown_behavior();
+  if (!sequence_token.IsValid()) {
+    // Create a new sequence token and bind this thread to it, to make sure that
+    // a task posted to the SequencedTaskRunner we are going to return is not
+    // immediately going to run on a different thread.
+    sequence_token = Inner::GetSequenceToken();
+    pool->inner_->SetRunningTaskInfoForCurrentThread(sequence_token,
+                                                     shutdown_behavior);
+  }
+
+  DCHECK(pool->IsRunningSequenceOnCurrentThread(sequence_token));
+  return new SequencedWorkerPoolSequencedTaskRunner(
+      std::move(pool), sequence_token, shutdown_behavior);
 }
 
-// static
-void SequencedWorkerPool::EnableWithRedirectionToTaskSchedulerForProcess(
-    TaskPriority max_task_priority) {
-#if 1
-  NOTREACHED();
-  ALLOW_UNUSED_PARAM(max_task_priority);
-#else
-  // TODO(fdoray): Uncomment this line. It is initially commented to avoid a
-  // revert of the CL that adds debug::DumpWithoutCrashing() in case of
-  // waterfall failures.
-  // DCHECK_EQ(AllPoolsState::POST_TASK_DISABLED, g_all_pools_state);
-  DCHECK(TaskScheduler::GetInstance());
-  g_all_pools_state = AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER;
-  g_max_task_priority = max_task_priority;
-#endif
-}
-
-// static
-void SequencedWorkerPool::DisableForProcessForTesting() {
-  g_all_pools_state = AllPoolsState::POST_TASK_DISABLED;
-}
-
-// static
-bool SequencedWorkerPool::IsEnabled() {
-  return g_all_pools_state != AllPoolsState::POST_TASK_DISABLED;
+SequencedWorkerPool::SequencedWorkerPool(size_t max_threads,
+                                         const std::string& thread_name_prefix)
+    : constructor_task_runner_(ThreadTaskRunnerHandle::Get()),
+      inner_(new Inner(this, max_threads, thread_name_prefix, NULL)) {
 }
 
 SequencedWorkerPool::SequencedWorkerPool(size_t max_threads,
                                          const std::string& thread_name_prefix,
-                                         base::TaskPriority task_priority)
-    : constructor_task_runner_(SequencedTaskRunnerHandle::Get()),
-      inner_(new Inner(this,
-                       max_threads,
-                       thread_name_prefix,
-                       task_priority,
-                       NULL)) {}
-
-SequencedWorkerPool::SequencedWorkerPool(size_t max_threads,
-                                         const std::string& thread_name_prefix,
-                                         base::TaskPriority task_priority,
                                          TestingObserver* observer)
-    : constructor_task_runner_(SequencedTaskRunnerHandle::Get()),
-      inner_(new Inner(this,
-                       max_threads,
-                       thread_name_prefix,
-                       task_priority,
-                       observer)) {}
+    : constructor_task_runner_(ThreadTaskRunnerHandle::Get()),
+      inner_(new Inner(this, max_threads, thread_name_prefix, observer)) {
+}
 
 SequencedWorkerPool::~SequencedWorkerPool() {}
 
@@ -1550,7 +1295,7 @@
 scoped_refptr<SequencedTaskRunner>
 SequencedWorkerPool::GetSequencedTaskRunnerWithShutdownBehavior(
     SequenceToken token, WorkerShutdown shutdown_behavior) {
-  return new PoolSequencedTaskRunner(
+  return new SequencedWorkerPoolSequencedTaskRunner(
       this, token, shutdown_behavior);
 }
 
@@ -1633,19 +1378,18 @@
   return inner_->RunsTasksOnCurrentThread();
 }
 
+bool SequencedWorkerPool::IsRunningSequenceOnCurrentThread(
+    SequenceToken sequence_token) const {
+  return inner_->IsRunningSequenceOnCurrentThread(sequence_token);
+}
+
+bool SequencedWorkerPool::IsRunningSequence(
+    SequenceToken sequence_token) const {
+  return inner_->IsRunningSequence(sequence_token);
+}
+
 void SequencedWorkerPool::FlushForTesting() {
-  DCHECK(!RunsTasksOnCurrentThread());
-  base::ThreadRestrictions::ScopedAllowWait allow_wait;
-  if (g_all_pools_state == AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
-#if 1
-    NOTREACHED();
-#else
-    // TODO(gab): Remove this if http://crbug.com/622400 fails.
-    TaskScheduler::GetInstance()->FlushForTesting();
-#endif
-  } else {
-    inner_->CleanupForTesting();
-  }
+  inner_->CleanupForTesting();
 }
 
 void SequencedWorkerPool::SignalHasWorkForTesting() {
@@ -1653,7 +1397,7 @@
 }
 
 void SequencedWorkerPool::Shutdown(int max_new_blocking_tasks_after_shutdown) {
-  DCHECK(constructor_task_runner_->RunsTasksOnCurrentThread());
+  DCHECK(constructor_task_runner_->BelongsToCurrentThread());
   inner_->Shutdown(max_new_blocking_tasks_after_shutdown);
 }
 
@@ -1661,9 +1405,4 @@
   return inner_->IsShutdownInProgress();
 }
 
-bool SequencedWorkerPool::IsRunningSequenceOnCurrentThread(
-    SequenceToken sequence_token) const {
-  return inner_->IsRunningSequenceOnCurrentThread(sequence_token);
-}
-
 }  // namespace base
diff --git a/base/threading/sequenced_worker_pool.h b/base/threading/sequenced_worker_pool.h
index 0d42de9..cbec395 100644
--- a/base/threading/sequenced_worker_pool.h
+++ b/base/threading/sequenced_worker_pool.h
@@ -13,11 +13,10 @@
 
 #include "base/base_export.h"
 #include "base/callback_forward.h"
-#include "base/compiler_specific.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
+#include "base/single_thread_task_runner.h"
 #include "base/task_runner.h"
-#include "base/task_scheduler/task_traits.h"
 
 namespace tracked_objects {
 class Location;
@@ -25,10 +24,12 @@
 
 namespace base {
 
-class SequencedTaskRunner;
+class SingleThreadTaskRunner;
 
 template <class T> class DeleteHelper;
 
+class SequencedTaskRunner;
+
 // A worker thread pool that enforces ordering between sets of tasks. It also
 // allows you to specify what should happen to your tasks on shutdown.
 //
@@ -46,7 +47,8 @@
 //     destruction will be visible to T2.
 //
 // Example:
-//   SequencedWorkerPool::SequenceToken token = pool.GetSequenceToken();
+//   SequencedWorkerPool::SequenceToken token =
+//       SequencedWorkerPool::GetSequenceToken();
 //   pool.PostSequencedWorkerTask(token, SequencedWorkerPool::SKIP_ON_SHUTDOWN,
 //                                FROM_HERE, base::Bind(...));
 //   pool.PostSequencedWorkerTask(token, SequencedWorkerPool::SKIP_ON_SHUTDOWN,
@@ -59,10 +61,6 @@
 // These will be executed in an unspecified order. The order of execution
 // between tasks with different sequence tokens is also unspecified.
 //
-// You must call EnableForProcess() or
-// EnableWithRedirectionToTaskSchedulerForProcess() before starting to post
-// tasks to a process' SequencedWorkerPools.
-//
 // This class may be leaked on shutdown to facilitate fast shutdown. The
 // expected usage, however, is to call Shutdown(), which correctly accounts
 // for CONTINUE_ON_SHUTDOWN behavior and is required for BLOCK_SHUTDOWN
@@ -166,65 +164,36 @@
   // an unsequenced task, returns an invalid SequenceToken.
   static SequenceToken GetSequenceTokenForCurrentThread();
 
-  // Returns the SequencedWorkerPool that owns this thread, or null if the
-  // current thread is not a SequencedWorkerPool worker thread.
-  //
-  // Always returns nullptr when SequencedWorkerPool is redirected to
-  // TaskScheduler.
-  //
-  // DEPRECATED. Use SequencedTaskRunnerHandle::Get() instead. Consequentially
-  // the only remaining use case is in sequenced_task_runner_handle.cc to
-  // implement that and will soon be removed along with SequencedWorkerPool:
-  // http://crbug.com/622400.
-  static scoped_refptr<SequencedWorkerPool> GetWorkerPoolForCurrentThread();
+  // Gets a SequencedTaskRunner for the current thread. If the current thread is
+  // running an unsequenced task, a new SequenceToken will be generated and set,
+  // so that the returned SequencedTaskRunner is guaranteed to run tasks after
+  // the current task has finished running.
+  static scoped_refptr<SequencedTaskRunner>
+  GetSequencedTaskRunnerForCurrentThread();
 
   // Returns a unique token that can be used to sequence tasks posted to
   // PostSequencedWorkerTask(). Valid tokens are always nonzero.
+  // TODO(bauerb): Rename this to better differentiate from
+  // GetSequenceTokenForCurrentThread().
   static SequenceToken GetSequenceToken();
 
-  // Enables posting tasks to this process' SequencedWorkerPools. Cannot be
-  // called if already enabled. This is not thread-safe; proper synchronization
-  // is required to use any SequencedWorkerPool method after calling this.
-  static void EnableForProcess();
-
-  // Same as EnableForProcess(), but tasks are redirected to the registered
-  // TaskScheduler. All redirections' TaskPriority will be capped to
-  // |max_task_priority|. There must be a registered TaskScheduler when this is
-  // called.
-  // TODO(gab): Remove this if http://crbug.com/622400 fails
-  // (SequencedWorkerPool will be phased out completely otherwise).
-  static void EnableWithRedirectionToTaskSchedulerForProcess(
-      TaskPriority max_task_priority = TaskPriority::HIGHEST);
-
-  // Disables posting tasks to this process' SequencedWorkerPools. Calling this
-  // while there are active SequencedWorkerPools is not supported. This is not
-  // thread-safe; proper synchronization is required to use any
-  // SequencedWorkerPool method after calling this.
-  static void DisableForProcessForTesting();
-
-  // Returns true if posting tasks to this process' SequencedWorkerPool is
-  // enabled (with or without redirection to TaskScheduler).
-  static bool IsEnabled();
+  // Returns the SequencedWorkerPool that owns this thread, or null if the
+  // current thread is not a SequencedWorkerPool worker thread.
+  static scoped_refptr<SequencedWorkerPool> GetWorkerPoolForCurrentThread();
 
   // When constructing a SequencedWorkerPool, there must be a
   // ThreadTaskRunnerHandle on the current thread unless you plan to
   // deliberately leak it.
 
-  // Constructs a SequencedWorkerPool which will lazily create up to
-  // |max_threads| and a prefix for the thread name to aid in debugging.
-  // |max_threads| must be greater than 1. |task_priority| will be used to hint
-  // base::TaskScheduler for an experiment in which all SequencedWorkerPool
-  // tasks will be redirected to it in processes where a base::TaskScheduler was
-  // instantiated.
+  // Pass the maximum number of threads (they will be lazily created as needed)
+  // and a prefix for the thread name to aid in debugging.
   SequencedWorkerPool(size_t max_threads,
-                      const std::string& thread_name_prefix,
-                      base::TaskPriority task_priority);
+                      const std::string& thread_name_prefix);
 
   // Like above, but with |observer| for testing.  Does not take ownership of
   // |observer|.
   SequencedWorkerPool(size_t max_threads,
                       const std::string& thread_name_prefix,
-                      base::TaskPriority task_priority,
                       TestingObserver* observer);
 
   // Returns the sequence token associated with the given name. Calling this
@@ -238,7 +207,7 @@
   // delay are posted with SKIP_ON_SHUTDOWN behavior and tasks with zero delay
   // are posted with BLOCK_SHUTDOWN behavior.
   scoped_refptr<SequencedTaskRunner> GetSequencedTaskRunner(
-      SequenceToken token) WARN_UNUSED_RESULT;
+      SequenceToken token);
 
   // Returns a SequencedTaskRunner wrapper which posts to this
   // SequencedWorkerPool using the given sequence token. Tasks with nonzero
@@ -246,14 +215,14 @@
   // are posted with the given shutdown behavior.
   scoped_refptr<SequencedTaskRunner> GetSequencedTaskRunnerWithShutdownBehavior(
       SequenceToken token,
-      WorkerShutdown shutdown_behavior) WARN_UNUSED_RESULT;
+      WorkerShutdown shutdown_behavior);
 
   // Returns a TaskRunner wrapper which posts to this SequencedWorkerPool using
   // the given shutdown behavior. Tasks with nonzero delay are posted with
   // SKIP_ON_SHUTDOWN behavior and tasks with zero delay are posted with the
   // given shutdown behavior.
   scoped_refptr<TaskRunner> GetTaskRunnerWithShutdownBehavior(
-      WorkerShutdown shutdown_behavior) WARN_UNUSED_RESULT;
+      WorkerShutdown shutdown_behavior);
 
   // Posts the given task for execution in the worker pool. Tasks posted with
   // this function will execute in an unspecified order on a background thread.
@@ -347,21 +316,23 @@
                        TimeDelta delay) override;
   bool RunsTasksOnCurrentThread() const override;
 
+  // Returns true if the current thread is processing a task with the given
+  // sequence_token.
+  bool IsRunningSequenceOnCurrentThread(SequenceToken sequence_token) const;
+
+  // Returns true if any thread is currently processing a task with the given
+  // sequence token. Should only be called with a valid sequence token.
+  bool IsRunningSequence(SequenceToken sequence_token) const;
+
   // Blocks until all pending tasks are complete. This should only be called in
   // unit tests when you want to validate something that should have happened.
-  // Does not wait for delayed tasks. If redirection to TaskScheduler is
-  // disabled, delayed tasks are deleted. If redirection to TaskScheduler is
-  // enabled, this will wait for all tasks posted to TaskScheduler (not just
-  // tasks posted to this SequencedWorkerPool).
+  // This will not flush delayed tasks; delayed tasks get deleted.
   //
   // Note that calling this will not prevent other threads from posting work to
   // the queue while the calling thread is waiting on Flush(). In this case,
   // Flush will return only when there's no more work in the queue. Normally,
   // this doesn't come up since in a test, all the work is being posted from
   // the main thread.
-  //
-  // TODO(gab): Remove mentions of TaskScheduler in this comment if
-  // http://crbug.com/622400 fails.
   void FlushForTesting();
 
   // Spuriously signal that there is work to be done.
@@ -397,14 +368,9 @@
   friend class DeleteHelper<SequencedWorkerPool>;
 
   class Inner;
-  class PoolSequencedTaskRunner;
   class Worker;
 
-  // Returns true if the current thread is processing a task with the given
-  // sequence_token.
-  bool IsRunningSequenceOnCurrentThread(SequenceToken sequence_token) const;
-
-  const scoped_refptr<SequencedTaskRunner> constructor_task_runner_;
+  const scoped_refptr<SingleThreadTaskRunner> constructor_task_runner_;
 
   // Avoid pulling in too many headers by putting (almost) everything
   // into |inner_|.
diff --git a/base/threading/simple_thread.cc b/base/threading/simple_thread.cc
index 9eb443a..6c64a17 100644
--- a/base/threading/simple_thread.cc
+++ b/base/threading/simple_thread.cc
@@ -12,55 +12,62 @@
 namespace base {
 
 SimpleThread::SimpleThread(const std::string& name_prefix)
-    : SimpleThread(name_prefix, Options()) {}
+    : name_prefix_(name_prefix),
+      name_(name_prefix),
+      thread_(),
+      event_(WaitableEvent::ResetPolicy::MANUAL,
+             WaitableEvent::InitialState::NOT_SIGNALED),
+      tid_(0),
+      joined_(false) {}
 
 SimpleThread::SimpleThread(const std::string& name_prefix,
                            const Options& options)
     : name_prefix_(name_prefix),
+      name_(name_prefix),
       options_(options),
+      thread_(),
       event_(WaitableEvent::ResetPolicy::MANUAL,
-             WaitableEvent::InitialState::NOT_SIGNALED) {}
+             WaitableEvent::InitialState::NOT_SIGNALED),
+      tid_(0),
+      joined_(false) {}
 
 SimpleThread::~SimpleThread() {
   DCHECK(HasBeenStarted()) << "SimpleThread was never started.";
-  DCHECK(!options_.joinable || HasBeenJoined())
-      << "Joinable SimpleThread destroyed without being Join()ed.";
+  DCHECK(HasBeenJoined()) << "SimpleThread destroyed without being Join()ed.";
 }
 
 void SimpleThread::Start() {
   DCHECK(!HasBeenStarted()) << "Tried to Start a thread multiple times.";
-  bool success =
-      options_.joinable
-          ? PlatformThread::CreateWithPriority(options_.stack_size, this,
-                                               &thread_, options_.priority)
-          : PlatformThread::CreateNonJoinableWithPriority(
-                options_.stack_size, this, options_.priority);
+  bool success;
+  if (options_.priority() == ThreadPriority::NORMAL) {
+    success = PlatformThread::Create(options_.stack_size(), this, &thread_);
+  } else {
+    success = PlatformThread::CreateWithPriority(options_.stack_size(), this,
+                                                 &thread_, options_.priority());
+  }
   DCHECK(success);
-  ThreadRestrictions::ScopedAllowWait allow_wait;
+  base::ThreadRestrictions::ScopedAllowWait allow_wait;
   event_.Wait();  // Wait for the thread to complete initialization.
 }
 
 void SimpleThread::Join() {
-  DCHECK(options_.joinable) << "A non-joinable thread can't be joined.";
   DCHECK(HasBeenStarted()) << "Tried to Join a never-started thread.";
   DCHECK(!HasBeenJoined()) << "Tried to Join a thread multiple times.";
   PlatformThread::Join(thread_);
-  thread_ = PlatformThreadHandle();
   joined_ = true;
 }
 
 bool SimpleThread::HasBeenStarted() {
-  ThreadRestrictions::ScopedAllowWait allow_wait;
+  base::ThreadRestrictions::ScopedAllowWait allow_wait;
   return event_.IsSignaled();
 }
 
 void SimpleThread::ThreadMain() {
   tid_ = PlatformThread::CurrentId();
   // Construct our full name of the form "name_prefix_/TID".
-  std::string name(name_prefix_);
-  name.push_back('/');
-  name.append(IntToString(tid_));
-  PlatformThread::SetName(name);
+  name_.push_back('/');
+  name_.append(IntToString(tid_));
+  PlatformThread::SetName(name_);
 
   // We've initialized our new thread, signal that we're done to Start().
   event_.Signal();
@@ -70,26 +77,24 @@
 
 DelegateSimpleThread::DelegateSimpleThread(Delegate* delegate,
                                            const std::string& name_prefix)
-    : DelegateSimpleThread(delegate, name_prefix, Options()) {}
+    : SimpleThread(name_prefix),
+      delegate_(delegate) {
+}
 
 DelegateSimpleThread::DelegateSimpleThread(Delegate* delegate,
                                            const std::string& name_prefix,
                                            const Options& options)
     : SimpleThread(name_prefix, options),
       delegate_(delegate) {
-  DCHECK(delegate_);
 }
 
-DelegateSimpleThread::~DelegateSimpleThread() = default;
+DelegateSimpleThread::~DelegateSimpleThread() {
+}
 
 void DelegateSimpleThread::Run() {
   DCHECK(delegate_) << "Tried to call Run without a delegate (called twice?)";
-
-  // Non-joinable DelegateSimpleThreads are allowed to be deleted during Run().
-  // Member state must not be accessed after invoking Run().
-  Delegate* delegate = delegate_;
-  delegate_ = nullptr;
-  delegate->Run();
+  delegate_->Run();
+  delegate_ = NULL;
 }
 
 DelegateSimpleThreadPool::DelegateSimpleThreadPool(
diff --git a/base/threading/simple_thread.h b/base/threading/simple_thread.h
index f9f5e91..3deeb10 100644
--- a/base/threading/simple_thread.h
+++ b/base/threading/simple_thread.h
@@ -48,7 +48,6 @@
 
 #include "base/base_export.h"
 #include "base/compiler_specific.h"
-#include "base/macros.h"
 #include "base/synchronization/lock.h"
 #include "base/synchronization/waitable_event.h"
 #include "base/threading/platform_thread.h"
@@ -59,26 +58,25 @@
 // virtual Run method, or you can use the DelegateSimpleThread interface.
 class BASE_EXPORT SimpleThread : public PlatformThread::Delegate {
  public:
-  struct BASE_EXPORT Options {
+  class BASE_EXPORT Options {
    public:
-    Options() = default;
-    explicit Options(ThreadPriority priority_in) : priority(priority_in) {}
-    ~Options() = default;
+    Options() : stack_size_(0), priority_(ThreadPriority::NORMAL) {}
+    explicit Options(ThreadPriority priority)
+        : stack_size_(0), priority_(priority) {}
+    ~Options() {}
 
-    // Allow copies.
-    Options(const Options& other) = default;
-    Options& operator=(const Options& other) = default;
+    // We use the standard compiler-supplied copy constructor.
 
     // A custom stack size, or 0 for the system default.
-    size_t stack_size = 0;
+    void set_stack_size(size_t size) { stack_size_ = size; }
+    size_t stack_size() const { return stack_size_; }
 
-    ThreadPriority priority = ThreadPriority::NORMAL;
-
-    // If false, the underlying thread's PlatformThreadHandle will not be kept
-    // around and as such the SimpleThread instance will not be Join()able and
-    // must not be deleted before Run() is invoked. After that, it's up to
-    // the subclass to determine when it is safe to delete itself.
-    bool joinable = true;
+    // A custom thread priority.
+    void set_priority(ThreadPriority priority) { priority_ = priority; }
+    ThreadPriority priority() const { return priority_; }
+   private:
+    size_t stack_size_;
+    ThreadPriority priority_;
   };
 
   // Create a SimpleThread.  |options| should be used to manage any specific
@@ -96,13 +94,19 @@
   // Subclasses should override the Run method.
   virtual void Run() = 0;
 
+  // Return the thread name prefix, or "unnamed" if none was supplied.
+  std::string name_prefix() { return name_prefix_; }
+
+  // Return the completed name including TID, only valid after Start().
+  std::string name() { return name_; }
+
   // Return the thread id, only valid after Start().
   PlatformThreadId tid() { return tid_; }
 
   // Return True if Start() has ever been called.
   bool HasBeenStarted();
 
-  // Return True if Join() has ever been called.
+  // Return True if Join() has evern been called.
   bool HasBeenJoined() { return joined_; }
 
   // Overridden from PlatformThread::Delegate:
@@ -112,24 +116,18 @@
   const std::string name_prefix_;
   std::string name_;
   const Options options_;
-  PlatformThreadHandle thread_;  // PlatformThread handle, reset after Join.
+  PlatformThreadHandle thread_;  // PlatformThread handle, invalid after Join!
   WaitableEvent event_;          // Signaled if Start() was ever called.
-  PlatformThreadId tid_ = kInvalidThreadId;  // The backing thread's id.
-  bool joined_ = false;                      // True if Join has been called.
-
-  DISALLOW_COPY_AND_ASSIGN(SimpleThread);
+  PlatformThreadId tid_;         // The backing thread's id.
+  bool joined_;                  // True if Join has been called.
 };
 
-// A SimpleThread which delegates Run() to its Delegate. Non-joinable
-// DelegateSimpleThread are safe to delete after Run() was invoked, their
-// Delegates are also safe to delete after that point from this class' point of
-// view (although implementations must of course make sure that Run() will not
-// use their Delegate's member state after its deletion).
 class BASE_EXPORT DelegateSimpleThread : public SimpleThread {
  public:
   class BASE_EXPORT Delegate {
    public:
-    virtual ~Delegate() = default;
+    Delegate() { }
+    virtual ~Delegate() { }
     virtual void Run() = 0;
   };
 
@@ -144,8 +142,6 @@
 
  private:
   Delegate* delegate_;
-
-  DISALLOW_COPY_AND_ASSIGN(DelegateSimpleThread);
 };
 
 // DelegateSimpleThreadPool allows you to start up a fixed number of threads,
@@ -190,8 +186,6 @@
   std::queue<Delegate*> delegates_;
   base::Lock lock_;            // Locks delegates_
   WaitableEvent dry_;    // Not signaled when there is no work to do.
-
-  DISALLOW_COPY_AND_ASSIGN(DelegateSimpleThreadPool);
 };
 
 }  // namespace base
diff --git a/base/threading/simple_thread_unittest.cc b/base/threading/simple_thread_unittest.cc
index 0e52500..14dd459 100644
--- a/base/threading/simple_thread_unittest.cc
+++ b/base/threading/simple_thread_unittest.cc
@@ -2,13 +2,9 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include <memory>
-
 #include "base/atomic_sequence_num.h"
-#include "base/memory/ptr_util.h"
 #include "base/strings/string_number_conversions.h"
 #include "base/synchronization/waitable_event.h"
-#include "base/test/gtest_util.h"
 #include "base/threading/simple_thread.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -21,49 +17,11 @@
   SetIntRunner(int* ptr, int val) : ptr_(ptr), val_(val) { }
   ~SetIntRunner() override {}
 
- private:
   void Run() override { *ptr_ = val_; }
 
+ private:
   int* ptr_;
   int val_;
-
-  DISALLOW_COPY_AND_ASSIGN(SetIntRunner);
-};
-
-// Signals |started_| when Run() is invoked and waits until |released_| is
-// signaled to return, signaling |done_| before doing so. Useful for tests that
-// care to control Run()'s flow.
-class ControlledRunner : public DelegateSimpleThread::Delegate {
- public:
-  ControlledRunner()
-      : started_(WaitableEvent::ResetPolicy::MANUAL,
-                 WaitableEvent::InitialState::NOT_SIGNALED),
-        released_(WaitableEvent::ResetPolicy::MANUAL,
-                  WaitableEvent::InitialState::NOT_SIGNALED),
-        done_(WaitableEvent::ResetPolicy::MANUAL,
-              WaitableEvent::InitialState::NOT_SIGNALED) {}
-
-  ~ControlledRunner() override { ReleaseAndWaitUntilDone(); }
-
-  void WaitUntilStarted() { started_.Wait(); }
-
-  void ReleaseAndWaitUntilDone() {
-    released_.Signal();
-    done_.Wait();
-  }
-
- private:
-  void Run() override {
-    started_.Signal();
-    released_.Wait();
-    done_.Signal();
-  }
-
-  WaitableEvent started_;
-  WaitableEvent released_;
-  WaitableEvent done_;
-
-  DISALLOW_COPY_AND_ASSIGN(ControlledRunner);
 };
 
 class WaitEventRunner : public DelegateSimpleThread::Delegate {
@@ -71,28 +29,22 @@
   explicit WaitEventRunner(WaitableEvent* event) : event_(event) { }
   ~WaitEventRunner() override {}
 
- private:
   void Run() override {
     EXPECT_FALSE(event_->IsSignaled());
     event_->Signal();
     EXPECT_TRUE(event_->IsSignaled());
   }
-
+ private:
   WaitableEvent* event_;
-
-  DISALLOW_COPY_AND_ASSIGN(WaitEventRunner);
 };
 
 class SeqRunner : public DelegateSimpleThread::Delegate {
  public:
   explicit SeqRunner(AtomicSequenceNumber* seq) : seq_(seq) { }
-
- private:
   void Run() override { seq_->GetNext(); }
 
+ private:
   AtomicSequenceNumber* seq_;
-
-  DISALLOW_COPY_AND_ASSIGN(SeqRunner);
 };
 
 // We count up on a sequence number, firing on the event when we've hit our
@@ -104,7 +56,6 @@
                    int total, WaitableEvent* event)
       : seq_(seq), total_(total), event_(event) { }
 
- private:
   void Run() override {
     if (seq_->GetNext() == total_) {
       event_->Signal();
@@ -113,11 +64,10 @@
     }
   }
 
+ private:
   AtomicSequenceNumber* seq_;
   int total_;
   WaitableEvent* event_;
-
-  DISALLOW_COPY_AND_ASSIGN(VerifyPoolRunner);
 };
 
 }  // namespace
@@ -158,44 +108,29 @@
   thread.Join();
 }
 
-TEST(SimpleThreadTest, NonJoinableStartAndDieOnJoin) {
-  ControlledRunner runner;
+TEST(SimpleThreadTest, NamedWithOptions) {
+  WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
 
+  WaitEventRunner runner(&event);
   SimpleThread::Options options;
-  options.joinable = false;
-  DelegateSimpleThread thread(&runner, "non_joinable", options);
+  DelegateSimpleThread thread(&runner, "event_waiter", options);
+  EXPECT_EQ(thread.name_prefix(), "event_waiter");
+  EXPECT_FALSE(event.IsSignaled());
 
-  EXPECT_FALSE(thread.HasBeenStarted());
   thread.Start();
-  EXPECT_TRUE(thread.HasBeenStarted());
+  EXPECT_EQ(thread.name_prefix(), "event_waiter");
+  EXPECT_EQ(thread.name(),
+            std::string("event_waiter/") + IntToString(thread.tid()));
+  event.Wait();
 
-  // Note: this is not quite the same as |thread.HasBeenStarted()| which
-  // represents ThreadMain() getting ready to invoke Run() whereas
-  // |runner.WaitUntilStarted()| ensures Run() was actually invoked.
-  runner.WaitUntilStarted();
+  EXPECT_TRUE(event.IsSignaled());
+  thread.Join();
 
-  EXPECT_FALSE(thread.HasBeenJoined());
-  EXPECT_DCHECK_DEATH({ thread.Join(); });
-}
-
-TEST(SimpleThreadTest, NonJoinableInactiveDelegateDestructionIsOkay) {
-  std::unique_ptr<ControlledRunner> runner(new ControlledRunner);
-
-  SimpleThread::Options options;
-  options.joinable = false;
-  std::unique_ptr<DelegateSimpleThread> thread(
-      new DelegateSimpleThread(runner.get(), "non_joinable", options));
-
-  thread->Start();
-  runner->WaitUntilStarted();
-
-  // Deleting a non-joinable SimpleThread after Run() was invoked is okay.
-  thread.reset();
-
-  runner->WaitUntilStarted();
-  runner->ReleaseAndWaitUntilDone();
-  // It should be safe to destroy a Delegate after its Run() method completed.
-  runner.reset();
+  // We keep the name and tid, even after the thread is gone.
+  EXPECT_EQ(thread.name_prefix(), "event_waiter");
+  EXPECT_EQ(thread.name(),
+            std::string("event_waiter/") + IntToString(thread.tid()));
 }
 
 TEST(SimpleThreadTest, ThreadPool) {
diff --git a/base/threading/thread.cc b/base/threading/thread.cc
index c30320f..9cdc691 100644
--- a/base/threading/thread.cc
+++ b/base/threading/thread.cc
@@ -5,10 +5,8 @@
 #include "base/threading/thread.h"
 
 #include "base/bind.h"
-#include "base/bind_helpers.h"
 #include "base/lazy_instance.h"
 #include "base/location.h"
-#include "base/logging.h"
 #include "base/run_loop.h"
 #include "base/synchronization/waitable_event.h"
 #include "base/threading/thread_id_name_manager.h"
@@ -16,10 +14,6 @@
 #include "base/threading/thread_restrictions.h"
 #include "build/build_config.h"
 
-#if defined(OS_POSIX) && !defined(OS_NACL)
-#include "base/files/file_descriptor_watcher_posix.h"
-#endif
-
 #if defined(OS_WIN)
 #include "base/win/scoped_com_initializer.h"
 #endif
@@ -32,31 +26,53 @@
 // because its Stop method was called.  This allows us to catch cases where
 // MessageLoop::QuitWhenIdle() is called directly, which is unexpected when
 // using a Thread to setup and run a MessageLoop.
-base::LazyInstance<base::ThreadLocalBoolean>::Leaky lazy_tls_bool =
+base::LazyInstance<base::ThreadLocalBoolean> lazy_tls_bool =
     LAZY_INSTANCE_INITIALIZER;
 
 }  // namespace
 
-Thread::Options::Options() = default;
+// This is used to trigger the message loop to exit.
+void ThreadQuitHelper() {
+  MessageLoop::current()->QuitWhenIdle();
+  Thread::SetThreadWasQuitProperly(true);
+}
 
-Thread::Options::Options(MessageLoop::Type type, size_t size)
-    : message_loop_type(type), stack_size(size) {}
+Thread::Options::Options()
+    : message_loop_type(MessageLoop::TYPE_DEFAULT),
+      timer_slack(TIMER_SLACK_NONE),
+      stack_size(0),
+      priority(ThreadPriority::NORMAL) {
+}
+
+Thread::Options::Options(MessageLoop::Type type,
+                         size_t size)
+    : message_loop_type(type),
+      timer_slack(TIMER_SLACK_NONE),
+      stack_size(size),
+      priority(ThreadPriority::NORMAL) {
+}
 
 Thread::Options::Options(const Options& other) = default;
 
-Thread::Options::~Options() = default;
+Thread::Options::~Options() {
+}
 
 Thread::Thread(const std::string& name)
-    : id_event_(WaitableEvent::ResetPolicy::MANUAL,
+    :
+#if defined(OS_WIN)
+      com_status_(NONE),
+#endif
+      stopping_(false),
+      running_(false),
+      thread_(0),
+      id_(kInvalidThreadId),
+      id_event_(WaitableEvent::ResetPolicy::MANUAL,
                 WaitableEvent::InitialState::NOT_SIGNALED),
+      message_loop_(nullptr),
+      message_loop_timer_slack_(TIMER_SLACK_NONE),
       name_(name),
       start_event_(WaitableEvent::ResetPolicy::MANUAL,
                    WaitableEvent::InitialState::NOT_SIGNALED) {
-  // Only bind the sequence on Start(): the state is constant between
-  // construction and Start() and it's thus valid for Start() to be called on
-  // another sequence as long as every other operation is then performed on that
-  // sequence.
-  owning_sequence_checker_.DetachFromSequence();
 }
 
 Thread::~Thread() {
@@ -64,8 +80,6 @@
 }
 
 bool Thread::Start() {
-  DCHECK(owning_sequence_checker_.CalledOnValidSequence());
-
   Options options;
 #if defined(OS_WIN)
   if (com_status_ == STA)
@@ -75,11 +89,7 @@
 }
 
 bool Thread::StartWithOptions(const Options& options) {
-  DCHECK(owning_sequence_checker_.CalledOnValidSequence());
   DCHECK(!message_loop_);
-  DCHECK(!IsRunning());
-  DCHECK(!stopping_) << "Starting a non-joinable thread a second time? That's "
-                     << "not allowed!";
 #if defined(OS_WIN)
   DCHECK((com_status_ != STA) ||
       (options.message_loop_type == MessageLoop::TYPE_UI));
@@ -96,41 +106,32 @@
     type = MessageLoop::TYPE_CUSTOM;
 
   message_loop_timer_slack_ = options.timer_slack;
-  std::unique_ptr<MessageLoop> message_loop_owned =
+  std::unique_ptr<MessageLoop> message_loop =
       MessageLoop::CreateUnbound(type, options.message_pump_factory);
-  message_loop_ = message_loop_owned.get();
+  message_loop_ = message_loop.get();
   start_event_.Reset();
 
-  // Hold |thread_lock_| while starting the new thread to synchronize with
-  // Stop() while it's not guaranteed to be sequenced (until crbug/629139 is
-  // fixed).
+  // Hold the thread_lock_ while starting a new thread, so that we can make sure
+  // that thread_ is populated before the newly created thread accesses it.
   {
     AutoLock lock(thread_lock_);
-    bool success =
-        options.joinable
-            ? PlatformThread::CreateWithPriority(options.stack_size, this,
-                                                 &thread_, options.priority)
-            : PlatformThread::CreateNonJoinableWithPriority(
-                  options.stack_size, this, options.priority);
-    if (!success) {
+    if (!PlatformThread::CreateWithPriority(options.stack_size, this, &thread_,
+                                            options.priority)) {
       DLOG(ERROR) << "failed to create thread";
       message_loop_ = nullptr;
       return false;
     }
   }
 
-  joinable_ = options.joinable;
-
-  // The ownership of |message_loop_| is managed by the newly created thread
+  // The ownership of message_loop is managemed by the newly created thread
   // within the ThreadMain.
-  ignore_result(message_loop_owned.release());
+  ignore_result(message_loop.release());
 
   DCHECK(message_loop_);
   return true;
 }
 
 bool Thread::StartAndWaitForTesting() {
-  DCHECK(owning_sequence_checker_.CalledOnValidSequence());
   bool result = Start();
   if (!result)
     return false;
@@ -139,7 +140,6 @@
 }
 
 bool Thread::WaitUntilThreadStarted() const {
-  DCHECK(owning_sequence_checker_.CalledOnValidSequence());
   if (!message_loop_)
     return false;
   base::ThreadRestrictions::ScopedAllowWait allow_wait;
@@ -147,74 +147,37 @@
   return true;
 }
 
-void Thread::FlushForTesting() {
-  DCHECK(owning_sequence_checker_.CalledOnValidSequence());
-  if (!message_loop_)
-    return;
-
-  WaitableEvent done(WaitableEvent::ResetPolicy::AUTOMATIC,
-                     WaitableEvent::InitialState::NOT_SIGNALED);
-  task_runner()->PostTask(FROM_HERE,
-                          Bind(&WaitableEvent::Signal, Unretained(&done)));
-  done.Wait();
-}
-
 void Thread::Stop() {
-  DCHECK(joinable_);
-
-  // TODO(gab): Fix improper usage of this API (http://crbug.com/629139) and
-  // enable this check, until then synchronization with Start() via
-  // |thread_lock_| is required...
-  // DCHECK(owning_sequence_checker_.CalledOnValidSequence());
   AutoLock lock(thread_lock_);
-
-  StopSoon();
-
-  // Can't join if the |thread_| is either already gone or is non-joinable.
   if (thread_.is_null())
     return;
 
+  StopSoon();
+
   // Wait for the thread to exit.
   //
-  // TODO(darin): Unfortunately, we need to keep |message_loop_| around until
+  // TODO(darin): Unfortunately, we need to keep message_loop_ around until
   // the thread exits.  Some consumers are abusing the API.  Make them stop.
   //
   PlatformThread::Join(thread_);
   thread_ = base::PlatformThreadHandle();
 
-  // The thread should nullify |message_loop_| on exit (note: Join() adds an
-  // implicit memory barrier and no lock is thus required for this check).
+  // The thread should nullify message_loop_ on exit.
   DCHECK(!message_loop_);
 
   stopping_ = false;
 }
 
 void Thread::StopSoon() {
-  // TODO(gab): Fix improper usage of this API (http://crbug.com/629139) and
-  // enable this check.
-  // DCHECK(owning_sequence_checker_.CalledOnValidSequence());
+  // We should only be called on the same thread that started us.
+
+  DCHECK_NE(GetThreadId(), PlatformThread::CurrentId());
 
   if (stopping_ || !message_loop_)
     return;
 
   stopping_ = true;
-
-  if (using_external_message_loop_) {
-    // Setting |stopping_| to true above should have been sufficient for this
-    // thread to be considered "stopped" per it having never set its |running_|
-    // bit by lack of its own ThreadMain.
-    DCHECK(!IsRunning());
-    message_loop_ = nullptr;
-    return;
-  }
-
-  task_runner()->PostTask(
-      FROM_HERE, base::Bind(&Thread::ThreadQuitHelper, Unretained(this)));
-}
-
-void Thread::DetachFromSequence() {
-  DCHECK(owning_sequence_checker_.CalledOnValidSequence());
-  owning_sequence_checker_.DetachFromSequence();
+  task_runner()->PostTask(FROM_HERE, base::Bind(&ThreadQuitHelper));
 }
 
 PlatformThreadId Thread::GetThreadId() const {
@@ -225,36 +188,26 @@
 }
 
 bool Thread::IsRunning() const {
-  // TODO(gab): Fix improper usage of this API (http://crbug.com/629139) and
-  // enable this check.
-  // DCHECK(owning_sequence_checker_.CalledOnValidSequence());
-
-  // If the thread's already started (i.e. |message_loop_| is non-null) and not
-  // yet requested to stop (i.e. |stopping_| is false) we can just return true.
-  // (Note that |stopping_| is touched only on the same sequence that starts /
-  // started the new thread so we need no locking here.)
+  // If the thread's already started (i.e. message_loop_ is non-null) and
+  // not yet requested to stop (i.e. stopping_ is false) we can just return
+  // true. (Note that stopping_ is touched only on the same thread that
+  // starts / started the new thread so we need no locking here.)
   if (message_loop_ && !stopping_)
     return true;
-  // Otherwise check the |running_| flag, which is set to true by the new thread
+  // Otherwise check the running_ flag, which is set to true by the new thread
   // only while it is inside Run().
   AutoLock lock(running_lock_);
   return running_;
 }
 
-void Thread::Run(RunLoop* run_loop) {
-  // Overridable protected method to be called from our |thread_| only.
-  DCHECK(id_event_.IsSignaled());
-  DCHECK_EQ(id_, PlatformThread::CurrentId());
-
-  run_loop->Run();
+void Thread::Run(MessageLoop*) {
+  RunLoop().Run();
 }
 
-// static
 void Thread::SetThreadWasQuitProperly(bool flag) {
   lazy_tls_bool.Pointer()->Set(flag);
 }
 
-// static
 bool Thread::GetThreadWasQuitProperly() {
   bool quit_properly = true;
 #ifndef NDEBUG
@@ -263,27 +216,9 @@
   return quit_properly;
 }
 
-void Thread::SetMessageLoop(MessageLoop* message_loop) {
-  DCHECK(owning_sequence_checker_.CalledOnValidSequence());
-  DCHECK(message_loop);
-
-  // Setting |message_loop_| should suffice for this thread to be considered
-  // as "running", until Stop() is invoked.
-  DCHECK(!IsRunning());
-  message_loop_ = message_loop;
-  DCHECK(IsRunning());
-
-  using_external_message_loop_ = true;
-}
-
 void Thread::ThreadMain() {
   // First, make GetThreadId() available to avoid deadlocks. It could be called
   // any place in the following thread initialization code.
-  DCHECK(!id_event_.IsSignaled());
-  // Note: this read of |id_| while |id_event_| isn't signaled is exceptionally
-  // okay because ThreadMain has a happens-after relationship with the other
-  // write in StartWithOptions().
-  DCHECK_EQ(kInvalidThreadId, id_);
   id_ = PlatformThread::CurrentId();
   DCHECK_NE(kInvalidThreadId, id_);
   id_event_.Signal();
@@ -291,22 +226,12 @@
   // Complete the initialization of our Thread object.
   PlatformThread::SetName(name_.c_str());
 
-  // Lazily initialize the |message_loop| so that it can run on this thread.
+  // Lazily initialize the message_loop so that it can run on this thread.
   DCHECK(message_loop_);
   std::unique_ptr<MessageLoop> message_loop(message_loop_);
   message_loop_->BindToCurrentThread();
   message_loop_->SetTimerSlack(message_loop_timer_slack_);
 
-#if defined(OS_POSIX) && !defined(OS_NACL)
-  // Allow threads running a MessageLoopForIO to use FileDescriptorWatcher API.
-  std::unique_ptr<FileDescriptorWatcher> file_descriptor_watcher;
-  if (MessageLoopForIO::IsCurrent()) {
-    DCHECK_EQ(message_loop_, MessageLoopForIO::current());
-    file_descriptor_watcher.reset(
-        new FileDescriptorWatcher(MessageLoopForIO::current()));
-  }
-#endif
-
 #if defined(OS_WIN)
   std::unique_ptr<win::ScopedCOMInitializer> com_initializer;
   if (com_status_ != NONE) {
@@ -326,9 +251,7 @@
 
   start_event_.Signal();
 
-  RunLoop run_loop;
-  run_loop_ = &run_loop;
-  Run(run_loop_);
+  Run(message_loop_);
 
   {
     AutoLock lock(running_lock_);
@@ -343,22 +266,15 @@
 #endif
 
   if (message_loop->type() != MessageLoop::TYPE_CUSTOM) {
-    // Assert that RunLoop::QuitWhenIdle was called by ThreadQuitHelper. Don't
-    // check for custom message pumps, because their shutdown might not allow
-    // this.
+    // Assert that MessageLoop::QuitWhenIdle was called by ThreadQuitHelper.
+    // Don't check for custom message pumps, because their shutdown might not
+    // allow this.
     DCHECK(GetThreadWasQuitProperly());
   }
 
   // We can't receive messages anymore.
   // (The message loop is destructed at the end of this block)
   message_loop_ = nullptr;
-  run_loop_ = nullptr;
-}
-
-void Thread::ThreadQuitHelper() {
-  DCHECK(run_loop_);
-  run_loop_->QuitWhenIdle();
-  SetThreadWasQuitProperly(true);
 }
 
 }  // namespace base
diff --git a/base/threading/thread.h b/base/threading/thread.h
index 01f7d8e..c9a77d7 100644
--- a/base/threading/thread.h
+++ b/base/threading/thread.h
@@ -15,9 +15,7 @@
 #include "base/macros.h"
 #include "base/message_loop/message_loop.h"
 #include "base/message_loop/timer_slack.h"
-#include "base/sequence_checker.h"
 #include "base/single_thread_task_runner.h"
-#include "base/synchronization/atomic_flag.h"
 #include "base/synchronization/lock.h"
 #include "base/synchronization/waitable_event.h"
 #include "base/threading/platform_thread.h"
@@ -26,7 +24,6 @@
 namespace base {
 
 class MessagePump;
-class RunLoop;
 
 // A simple thread abstraction that establishes a MessageLoop on a new thread.
 // The consumer uses the MessageLoop of the thread to cause code to execute on
@@ -41,18 +38,6 @@
 //  (1) Thread::CleanUp()
 //  (2) MessageLoop::~MessageLoop
 //  (3.b)    MessageLoop::DestructionObserver::WillDestroyCurrentMessageLoop
-//
-// This API is not thread-safe: unless indicated otherwise its methods are only
-// valid from the owning sequence (which is the one from which Start() is
-// invoked -- should it differ from the one on which it was constructed).
-//
-// Sometimes it's useful to kick things off on the initial sequence (e.g.
-// construction, Start(), task_runner()), but to then hand the Thread over to a
-// pool of users for the last one of them to destroy it when done. For that use
-// case, Thread::DetachFromSequence() allows the owning sequence to give up
-// ownership. The caller is then responsible to ensure a happens-after
-// relationship between the DetachFromSequence() call and the next use of that
-// Thread object (including ~Thread()).
 class BASE_EXPORT Thread : PlatformThread::Delegate {
  public:
   struct BASE_EXPORT Options {
@@ -65,10 +50,10 @@
 
     // Specifies the type of message loop that will be allocated on the thread.
     // This is ignored if message_pump_factory.is_null() is false.
-    MessageLoop::Type message_loop_type = MessageLoop::TYPE_DEFAULT;
+    MessageLoop::Type message_loop_type;
 
     // Specifies timer slack for thread message loop.
-    TimerSlack timer_slack = TIMER_SLACK_NONE;
+    TimerSlack timer_slack;
 
     // Used to create the MessagePump for the MessageLoop. The callback is Run()
     // on the thread. If message_pump_factory.is_null(), then a MessagePump
@@ -79,18 +64,10 @@
     // Specifies the maximum stack size that the thread is allowed to use.
     // This does not necessarily correspond to the thread's initial stack size.
     // A value of 0 indicates that the default maximum should be used.
-    size_t stack_size = 0;
+    size_t stack_size;
 
     // Specifies the initial thread priority.
-    ThreadPriority priority = ThreadPriority::NORMAL;
-
-    // If false, the thread will not be joined on destruction. This is intended
-    // for threads that want TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN
-    // semantics. Non-joinable threads can't be joined (must be leaked and
-    // can't be destroyed or Stop()'ed).
-    // TODO(gab): allow non-joinable instances to be deleted without causing
-    // user-after-frees (proposal @ https://crbug.com/629139#c14)
-    bool joinable = true;
+    ThreadPriority priority;
   };
 
   // Constructor.
@@ -148,19 +125,12 @@
   // carefully for production code.
   bool WaitUntilThreadStarted() const;
 
-  // Blocks until all tasks previously posted to this thread have been executed.
-  void FlushForTesting();
-
-  // Signals the thread to exit and returns once the thread has exited. The
-  // Thread object is completely reset and may be used as if it were newly
-  // constructed (i.e., Start may be called again). Can only be called if
-  // |joinable_|.
+  // Signals the thread to exit and returns once the thread has exited.  After
+  // this method returns, the Thread object is completely reset and may be used
+  // as if it were newly constructed (i.e., Start may be called again).
   //
   // Stop may be called multiple times and is simply ignored if the thread is
-  // already stopped or currently stopping.
-  //
-  // Start/Stop are not thread-safe and callers that desire to invoke them from
-  // different threads must ensure mutual exclusion.
+  // already stopped.
   //
   // NOTE: If you are a consumer of Thread, it is not necessary to call this
   // before deleting your Thread objects, as the destructor will do it.
@@ -175,17 +145,11 @@
   // deadlock on Windows with printer worker thread. In any other case, Stop()
   // should be used.
   //
-  // Call Stop() to reset the thread object once it is known that the thread has
-  // quit.
+  // StopSoon should not be called multiple times as it is risky to do so. It
+  // could cause a timing issue in message_loop() access. Call Stop() to reset
+  // the thread object once it is known that the thread has quit.
   void StopSoon();
 
-  // Detaches the owning sequence, indicating that the next call to this API
-  // (including ~Thread()) can happen from a different sequence (to which it
-  // will be rebound). This call itself must happen on the current owning
-  // sequence and the caller must ensure the next API call has a happens-after
-  // relationship with this one.
-  void DetachFromSequence();
-
   // Returns the message loop for this thread.  Use the MessageLoop's
   // PostTask methods to execute code on the thread.  This only returns
   // non-null after a successful call to Start.  After Stop has been called,
@@ -194,52 +158,29 @@
   // NOTE: You must not call this MessageLoop's Quit method directly.  Use
   // the Thread's Stop method instead.
   //
-  // In addition to this Thread's owning sequence, this can also safely be
-  // called from the underlying thread itself.
-  MessageLoop* message_loop() const {
-    // This class doesn't provide synchronization around |message_loop_| and as
-    // such only the owner should access it (and the underlying thread which
-    // never sees it before it's set). In practice, many callers are coming from
-    // unrelated threads but provide their own implicit (e.g. memory barriers
-    // from task posting) or explicit (e.g. locks) synchronization making the
-    // access of |message_loop_| safe... Changing all of those callers is
-    // unfeasible; instead verify that they can reliably see
-    // |message_loop_ != nullptr| without synchronization as a proof that their
-    // external synchronization catches the unsynchronized effects of Start().
-    // TODO(gab): Despite all of the above this test has to be disabled for now
-    // per crbug.com/629139#c6.
-    // DCHECK(owning_sequence_checker_.CalledOnValidSequence() ||
-    //        (id_event_.IsSignaled() && id_ == PlatformThread::CurrentId()) ||
-    //        message_loop_);
-    return message_loop_;
-  }
+  MessageLoop* message_loop() const { return message_loop_; }
 
   // Returns a TaskRunner for this thread. Use the TaskRunner's PostTask
   // methods to execute code on the thread. Returns nullptr if the thread is not
   // running (e.g. before Start or after Stop have been called). Callers can
   // hold on to this even after the thread is gone; in this situation, attempts
   // to PostTask() will fail.
-  //
-  // In addition to this Thread's owning sequence, this can also safely be
-  // called from the underlying thread itself.
   scoped_refptr<SingleThreadTaskRunner> task_runner() const {
-    // Refer to the DCHECK and comment inside |message_loop()|.
-    DCHECK(owning_sequence_checker_.CalledOnValidSequence() ||
-           (id_event_.IsSignaled() && id_ == PlatformThread::CurrentId()) ||
-           message_loop_);
     return message_loop_ ? message_loop_->task_runner() : nullptr;
   }
 
   // Returns the name of this thread (for display in debugger too).
   const std::string& thread_name() const { return name_; }
 
+  // The native thread handle.
+  PlatformThreadHandle thread_handle() { return thread_; }
+
   // Returns the thread ID.  Should not be called before the first Start*()
   // call.  Keeps on returning the same ID even after a Stop() call. The next
   // Start*() call renews the ID.
   //
   // WARNING: This function will block if the thread hasn't started yet.
   //
-  // This method is thread-safe.
   PlatformThreadId GetThreadId() const;
 
   // Returns true if the thread has been started, and not yet stopped.
@@ -249,8 +190,8 @@
   // Called just prior to starting the message loop
   virtual void Init() {}
 
-  // Called to start the run loop
-  virtual void Run(RunLoop* run_loop);
+  // Called to start the message loop
+  virtual void Run(MessageLoop* message_loop);
 
   // Called just after the message loop ends
   virtual void CleanUp() {}
@@ -258,11 +199,8 @@
   static void SetThreadWasQuitProperly(bool flag);
   static bool GetThreadWasQuitProperly();
 
-  // Bind this Thread to an existing MessageLoop instead of starting a new one.
-  void SetMessageLoop(MessageLoop* message_loop);
-
-  bool using_external_message_loop() const {
-    return using_external_message_loop_;
+  void set_message_loop(MessageLoop* message_loop) {
+    message_loop_ = message_loop;
   }
 
  private:
@@ -277,25 +215,19 @@
   // PlatformThread::Delegate methods:
   void ThreadMain() override;
 
-  void ThreadQuitHelper();
-
 #if defined(OS_WIN)
   // Whether this thread needs to initialize COM, and if so, in what mode.
-  ComStatus com_status_ = NONE;
+  ComStatus com_status_;
 #endif
 
-  // Mirrors the Options::joinable field used to start this thread. Verified
-  // on Stop() -- non-joinable threads can't be joined (must be leaked).
-  bool joinable_ = true;
-
   // If true, we're in the middle of stopping, and shouldn't access
   // |message_loop_|. It may non-nullptr and invalid.
   // Should be written on the thread that created this thread. Also read data
   // could be wrong on other threads.
-  bool stopping_ = false;
+  bool stopping_;
 
   // True while inside of Run().
-  bool running_ = false;
+  bool running_;
   mutable base::Lock running_lock_;  // Protects |running_|.
 
   // The thread's handle.
@@ -303,35 +235,24 @@
   mutable base::Lock thread_lock_;  // Protects |thread_|.
 
   // The thread's id once it has started.
-  PlatformThreadId id_ = kInvalidThreadId;
-  // Protects |id_| which must only be read while it's signaled.
-  mutable WaitableEvent id_event_;
+  PlatformThreadId id_;
+  mutable WaitableEvent id_event_;  // Protects |id_|.
 
-  // The thread's MessageLoop and RunLoop. Valid only while the thread is alive.
-  // Set by the created thread.
-  MessageLoop* message_loop_ = nullptr;
-  RunLoop* run_loop_ = nullptr;
-
-  // True only if |message_loop_| was externally provided by |SetMessageLoop()|
-  // in which case this Thread has no underlying |thread_| and should merely
-  // drop |message_loop_| on Stop(). In that event, this remains true after
-  // Stop() was invoked so that subclasses can use this state to build their own
-  // cleanup logic as required.
-  bool using_external_message_loop_ = false;
+  // The thread's message loop.  Valid only while the thread is alive.  Set
+  // by the created thread.
+  MessageLoop* message_loop_;
 
   // Stores Options::timer_slack_ until the message loop has been bound to
   // a thread.
-  TimerSlack message_loop_timer_slack_ = TIMER_SLACK_NONE;
+  TimerSlack message_loop_timer_slack_;
 
   // The name of the thread.  Used for debugging purposes.
-  const std::string name_;
+  std::string name_;
 
   // Signaled when the created thread gets ready to use the message loop.
   mutable WaitableEvent start_event_;
 
-  // This class is not thread-safe, use this to verify access from the owning
-  // sequence of the Thread.
-  SequenceChecker owning_sequence_checker_;
+  friend void ThreadQuitHelper();
 
   DISALLOW_COPY_AND_ASSIGN(Thread);
 };
diff --git a/base/threading/thread_checker.h b/base/threading/thread_checker.h
index 1d4eb1c..1d970f0 100644
--- a/base/threading/thread_checker.h
+++ b/base/threading/thread_checker.h
@@ -8,6 +8,16 @@
 #include "base/logging.h"
 #include "base/threading/thread_checker_impl.h"
 
+// Apart from debug builds, we also enable the thread checker in
+// builds with DCHECK_ALWAYS_ON so that trybots and waterfall bots
+// with this define will get the same level of thread checking as
+// debug bots.
+#if DCHECK_IS_ON()
+#define ENABLE_THREAD_CHECKER 1
+#else
+#define ENABLE_THREAD_CHECKER 0
+#endif
+
 namespace base {
 
 // Do nothing implementation, for use in release mode.
@@ -53,20 +63,16 @@
 //   ThreadChecker thread_checker_;
 // }
 //
-// Note that, when enabled, CalledOnValidThread() returns false when called from
-// tasks posted to SingleThreadTaskRunners bound to different sequences, even if
-// the tasks happen to run on the same thread (e.g. two independent TaskRunners
-// with ExecutionMode::SINGLE_THREADED on the TaskScheduler that happen to share
-// a thread).
-//
 // In Release mode, CalledOnValidThread will always return true.
-#if DCHECK_IS_ON()
+#if ENABLE_THREAD_CHECKER
 class ThreadChecker : public ThreadCheckerImpl {
 };
 #else
 class ThreadChecker : public ThreadCheckerDoNothing {
 };
-#endif  // DCHECK_IS_ON()
+#endif  // ENABLE_THREAD_CHECKER
+
+#undef ENABLE_THREAD_CHECKER
 
 }  // namespace base
 
diff --git a/base/threading/thread_checker_impl.cc b/base/threading/thread_checker_impl.cc
index d5ccbdb..eb87bae 100644
--- a/base/threading/thread_checker_impl.cc
+++ b/base/threading/thread_checker_impl.cc
@@ -4,54 +4,31 @@
 
 #include "base/threading/thread_checker_impl.h"
 
-#include "base/threading/thread_task_runner_handle.h"
-
 namespace base {
 
-ThreadCheckerImpl::ThreadCheckerImpl() {
-  AutoLock auto_lock(lock_);
-  EnsureAssigned();
+ThreadCheckerImpl::ThreadCheckerImpl()
+    : valid_thread_id_() {
+  EnsureThreadIdAssigned();
 }
 
-ThreadCheckerImpl::~ThreadCheckerImpl() = default;
+ThreadCheckerImpl::~ThreadCheckerImpl() {}
 
 bool ThreadCheckerImpl::CalledOnValidThread() const {
+  EnsureThreadIdAssigned();
   AutoLock auto_lock(lock_);
-  EnsureAssigned();
-
-  // Always return true when called from the task from which this
-  // ThreadCheckerImpl was assigned to a thread.
-  if (task_token_ == TaskToken::GetForCurrentThread())
-    return true;
-
-  // If this ThreadCheckerImpl is bound to a valid SequenceToken, it must be
-  // equal to the current SequenceToken and there must be a registered
-  // ThreadTaskRunnerHandle. Otherwise, the fact that the current task runs on
-  // the thread to which this ThreadCheckerImpl is bound is fortuitous.
-  if (sequence_token_.IsValid() &&
-      (sequence_token_ != SequenceToken::GetForCurrentThread() ||
-       !ThreadTaskRunnerHandle::IsSet())) {
-    return false;
-  }
-
-  return thread_id_ == PlatformThread::CurrentRef();
+  return valid_thread_id_ == PlatformThread::CurrentRef();
 }
 
 void ThreadCheckerImpl::DetachFromThread() {
   AutoLock auto_lock(lock_);
-  thread_id_ = PlatformThreadRef();
-  task_token_ = TaskToken();
-  sequence_token_ = SequenceToken();
+  valid_thread_id_ = PlatformThreadRef();
 }
 
-void ThreadCheckerImpl::EnsureAssigned() const {
-  lock_.AssertAcquired();
-  if (!thread_id_.is_null())
-    return;
-
-  thread_id_ = PlatformThread::CurrentRef();
-  task_token_ = TaskToken::GetForCurrentThread();
-  sequence_token_ = SequenceToken::GetForCurrentThread();
+void ThreadCheckerImpl::EnsureThreadIdAssigned() const {
+  AutoLock auto_lock(lock_);
+  if (valid_thread_id_.is_null()) {
+    valid_thread_id_ = PlatformThread::CurrentRef();
+  }
 }
 
 }  // namespace base
diff --git a/base/threading/thread_checker_impl.h b/base/threading/thread_checker_impl.h
index 13193d1..c92e143 100644
--- a/base/threading/thread_checker_impl.h
+++ b/base/threading/thread_checker_impl.h
@@ -7,18 +7,17 @@
 
 #include "base/base_export.h"
 #include "base/compiler_specific.h"
-#include "base/sequence_token.h"
 #include "base/synchronization/lock.h"
 #include "base/threading/platform_thread.h"
 
 namespace base {
 
-// Real implementation of ThreadChecker, for use in debug mode, or for temporary
-// use in release mode (e.g. to CHECK on a threading issue seen only in the
-// wild).
+// Real implementation of ThreadChecker, for use in debug mode, or
+// for temporary use in release mode (e.g. to CHECK on a threading issue
+// seen only in the wild).
 //
-// Note: You should almost always use the ThreadChecker class to get the right
-// version for your build configuration.
+// Note: You should almost always use the ThreadChecker class to get the
+// right version for your build configuration.
 class BASE_EXPORT ThreadCheckerImpl {
  public:
   ThreadCheckerImpl();
@@ -32,29 +31,12 @@
   void DetachFromThread();
 
  private:
-  void EnsureAssigned() const;
+  void EnsureThreadIdAssigned() const;
 
-  // Members are mutable so that CalledOnValidThread() can set them.
-
-  // Synchronizes access to all members.
   mutable base::Lock lock_;
-
-  // Thread on which CalledOnValidThread() may return true.
-  mutable PlatformThreadRef thread_id_;
-
-  // TaskToken for which CalledOnValidThread() always returns true. This allows
-  // CalledOnValidThread() to return true when called multiple times from the
-  // same task, even if it's not running in a single-threaded context itself
-  // (allowing usage of ThreadChecker/NonThreadSafe objects on the stack in the
-  // scope of one-off tasks). Note: CalledOnValidThread() may return true even
-  // if the current TaskToken is not equal to this.
-  mutable TaskToken task_token_;
-
-  // SequenceToken for which CalledOnValidThread() may return true. Used to
-  // ensure that CalledOnValidThread() doesn't return true for TaskScheduler
-  // tasks that happen to run on the same thread but weren't posted to the same
-  // SingleThreadTaskRunner.
-  mutable SequenceToken sequence_token_;
+  // This is mutable so that CalledOnValidThread can set it.
+  // It's guarded by |lock_|.
+  mutable PlatformThreadRef valid_thread_id_;
 };
 
 }  // namespace base
diff --git a/base/threading/thread_checker_unittest.cc b/base/threading/thread_checker_unittest.cc
index 96455e6..bc5b1e4 100644
--- a/base/threading/thread_checker_unittest.cc
+++ b/base/threading/thread_checker_unittest.cc
@@ -2,194 +2,180 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/threading/thread_checker.h"
+
 #include <memory>
 
-#include "base/bind.h"
-#include "base/bind_helpers.h"
+#include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/sequence_token.h"
-#include "base/test/test_simple_task_runner.h"
 #include "base/threading/simple_thread.h"
-#include "base/threading/thread_checker_impl.h"
-#include "base/threading/thread_task_runner_handle.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
+// Duplicated from base/threading/thread_checker.h so that we can be
+// good citizens there and undef the macro.
+#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+#define ENABLE_THREAD_CHECKER 1
+#else
+#define ENABLE_THREAD_CHECKER 0
+#endif
+
 namespace base {
+
 namespace {
 
-// A thread that runs a callback.
-class RunCallbackThread : public SimpleThread {
+// Simple class to exercise the basics of ThreadChecker.
+// Both the destructor and DoStuff should verify that they were
+// called on the same thread as the constructor.
+class ThreadCheckerClass : public ThreadChecker {
  public:
-  explicit RunCallbackThread(const Closure& callback)
-      : SimpleThread("RunCallbackThread"), callback_(callback) {}
+  ThreadCheckerClass() {}
+
+  // Verifies that it was called on the same thread as the constructor.
+  void DoStuff() {
+    DCHECK(CalledOnValidThread());
+  }
+
+  void DetachFromThread() {
+    ThreadChecker::DetachFromThread();
+  }
+
+  static void MethodOnDifferentThreadImpl();
+  static void DetachThenCallFromDifferentThreadImpl();
 
  private:
-  // SimpleThread:
-  void Run() override { callback_.Run(); }
-
-  const Closure callback_;
-
-  DISALLOW_COPY_AND_ASSIGN(RunCallbackThread);
+  DISALLOW_COPY_AND_ASSIGN(ThreadCheckerClass);
 };
 
-// Runs a callback on a new thread synchronously.
-void RunCallbackOnNewThreadSynchronously(const Closure& callback) {
-  RunCallbackThread run_callback_thread(callback);
-  run_callback_thread.Start();
-  run_callback_thread.Join();
-}
+// Calls ThreadCheckerClass::DoStuff on another thread.
+class CallDoStuffOnThread : public base::SimpleThread {
+ public:
+  explicit CallDoStuffOnThread(ThreadCheckerClass* thread_checker_class)
+      : SimpleThread("call_do_stuff_on_thread"),
+        thread_checker_class_(thread_checker_class) {
+  }
 
-void ExpectCalledOnValidThread(ThreadCheckerImpl* thread_checker) {
-  ASSERT_TRUE(thread_checker);
+  void Run() override { thread_checker_class_->DoStuff(); }
 
-  // This should bind |thread_checker| to the current thread if it wasn't
-  // already bound to a thread.
-  EXPECT_TRUE(thread_checker->CalledOnValidThread());
+ private:
+  ThreadCheckerClass* thread_checker_class_;
 
-  // Since |thread_checker| is now bound to the current thread, another call to
-  // CalledOnValidThread() should return true.
-  EXPECT_TRUE(thread_checker->CalledOnValidThread());
-}
+  DISALLOW_COPY_AND_ASSIGN(CallDoStuffOnThread);
+};
 
-void ExpectNotCalledOnValidThread(ThreadCheckerImpl* thread_checker) {
-  ASSERT_TRUE(thread_checker);
-  EXPECT_FALSE(thread_checker->CalledOnValidThread());
-}
+// Deletes ThreadCheckerClass on a different thread.
+class DeleteThreadCheckerClassOnThread : public base::SimpleThread {
+ public:
+  explicit DeleteThreadCheckerClassOnThread(
+      ThreadCheckerClass* thread_checker_class)
+      : SimpleThread("delete_thread_checker_class_on_thread"),
+        thread_checker_class_(thread_checker_class) {
+  }
 
-void ExpectNotCalledOnValidThreadWithSequenceTokenAndThreadTaskRunnerHandle(
-    ThreadCheckerImpl* thread_checker,
-    SequenceToken sequence_token) {
-  ThreadTaskRunnerHandle thread_task_runner_handle(
-      make_scoped_refptr(new TestSimpleTaskRunner));
-  ScopedSetSequenceTokenForCurrentThread
-      scoped_set_sequence_token_for_current_thread(sequence_token);
-  ExpectNotCalledOnValidThread(thread_checker);
-}
+  void Run() override { thread_checker_class_.reset(); }
+
+ private:
+  std::unique_ptr<ThreadCheckerClass> thread_checker_class_;
+
+  DISALLOW_COPY_AND_ASSIGN(DeleteThreadCheckerClassOnThread);
+};
 
 }  // namespace
 
-TEST(ThreadCheckerTest, AllowedSameThreadNoSequenceToken) {
-  ThreadCheckerImpl thread_checker;
-  EXPECT_TRUE(thread_checker.CalledOnValidThread());
+TEST(ThreadCheckerTest, CallsAllowedOnSameThread) {
+  std::unique_ptr<ThreadCheckerClass> thread_checker_class(
+      new ThreadCheckerClass);
+
+  // Verify that DoStuff doesn't assert.
+  thread_checker_class->DoStuff();
+
+  // Verify that the destructor doesn't assert.
+  thread_checker_class.reset();
 }
 
-TEST(ThreadCheckerTest,
-     AllowedSameThreadAndSequenceDifferentTasksWithThreadTaskRunnerHandle) {
-  ThreadTaskRunnerHandle thread_task_runner_handle(
-      make_scoped_refptr(new TestSimpleTaskRunner));
+TEST(ThreadCheckerTest, DestructorAllowedOnDifferentThread) {
+  std::unique_ptr<ThreadCheckerClass> thread_checker_class(
+      new ThreadCheckerClass);
 
-  std::unique_ptr<ThreadCheckerImpl> thread_checker;
-  const SequenceToken sequence_token = SequenceToken::Create();
+  // Verify that the destructor doesn't assert
+  // when called on a different thread.
+  DeleteThreadCheckerClassOnThread delete_on_thread(
+      thread_checker_class.release());
 
-  {
-    ScopedSetSequenceTokenForCurrentThread
-        scoped_set_sequence_token_for_current_thread(sequence_token);
-    thread_checker.reset(new ThreadCheckerImpl);
-  }
-
-  {
-    ScopedSetSequenceTokenForCurrentThread
-        scoped_set_sequence_token_for_current_thread(sequence_token);
-    EXPECT_TRUE(thread_checker->CalledOnValidThread());
-  }
-}
-
-TEST(ThreadCheckerTest,
-     AllowedSameThreadSequenceAndTaskNoThreadTaskRunnerHandle) {
-  ScopedSetSequenceTokenForCurrentThread
-      scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
-  ThreadCheckerImpl thread_checker;
-  EXPECT_TRUE(thread_checker.CalledOnValidThread());
-}
-
-TEST(ThreadCheckerTest,
-     DisallowedSameThreadAndSequenceDifferentTasksNoThreadTaskRunnerHandle) {
-  std::unique_ptr<ThreadCheckerImpl> thread_checker;
-
-  {
-    ScopedSetSequenceTokenForCurrentThread
-        scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
-    thread_checker.reset(new ThreadCheckerImpl);
-  }
-
-  {
-    ScopedSetSequenceTokenForCurrentThread
-        scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
-    EXPECT_FALSE(thread_checker->CalledOnValidThread());
-  }
-}
-
-TEST(ThreadCheckerTest, DisallowedDifferentThreadsNoSequenceToken) {
-  ThreadCheckerImpl thread_checker;
-  RunCallbackOnNewThreadSynchronously(
-      Bind(&ExpectNotCalledOnValidThread, Unretained(&thread_checker)));
-}
-
-TEST(ThreadCheckerTest, DisallowedDifferentThreadsSameSequence) {
-  ThreadTaskRunnerHandle thread_task_runner_handle(
-      make_scoped_refptr(new TestSimpleTaskRunner));
-  const SequenceToken sequence_token(SequenceToken::Create());
-
-  ScopedSetSequenceTokenForCurrentThread
-      scoped_set_sequence_token_for_current_thread(sequence_token);
-  ThreadCheckerImpl thread_checker;
-  EXPECT_TRUE(thread_checker.CalledOnValidThread());
-
-  RunCallbackOnNewThreadSynchronously(Bind(
-      &ExpectNotCalledOnValidThreadWithSequenceTokenAndThreadTaskRunnerHandle,
-      Unretained(&thread_checker), sequence_token));
-}
-
-TEST(ThreadCheckerTest, DisallowedSameThreadDifferentSequence) {
-  std::unique_ptr<ThreadCheckerImpl> thread_checker;
-
-  ThreadTaskRunnerHandle thread_task_runner_handle(
-      make_scoped_refptr(new TestSimpleTaskRunner));
-
-  {
-    ScopedSetSequenceTokenForCurrentThread
-        scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
-    thread_checker.reset(new ThreadCheckerImpl);
-  }
-
-  {
-    // Different SequenceToken.
-    ScopedSetSequenceTokenForCurrentThread
-        scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
-    EXPECT_FALSE(thread_checker->CalledOnValidThread());
-  }
-
-  // No SequenceToken.
-  EXPECT_FALSE(thread_checker->CalledOnValidThread());
+  delete_on_thread.Start();
+  delete_on_thread.Join();
 }
 
 TEST(ThreadCheckerTest, DetachFromThread) {
-  ThreadCheckerImpl thread_checker;
-  thread_checker.DetachFromThread();
+  std::unique_ptr<ThreadCheckerClass> thread_checker_class(
+      new ThreadCheckerClass);
 
-  // Verify that CalledOnValidThread() returns true when called on a different
-  // thread after a call to DetachFromThread().
-  RunCallbackOnNewThreadSynchronously(
-      Bind(&ExpectCalledOnValidThread, Unretained(&thread_checker)));
+  // Verify that DoStuff doesn't assert when called on a different thread after
+  // a call to DetachFromThread.
+  thread_checker_class->DetachFromThread();
+  CallDoStuffOnThread call_on_thread(thread_checker_class.get());
 
-  EXPECT_FALSE(thread_checker.CalledOnValidThread());
+  call_on_thread.Start();
+  call_on_thread.Join();
 }
 
-TEST(ThreadCheckerTest, DetachFromThreadWithSequenceToken) {
-  ThreadTaskRunnerHandle thread_task_runner_handle(
-      make_scoped_refptr(new TestSimpleTaskRunner));
-  ScopedSetSequenceTokenForCurrentThread
-      scoped_set_sequence_token_for_current_thread(SequenceToken::Create());
-  ThreadCheckerImpl thread_checker;
-  thread_checker.DetachFromThread();
+#if GTEST_HAS_DEATH_TEST || !ENABLE_THREAD_CHECKER
 
-  // Verify that CalledOnValidThread() returns true when called on a different
-  // thread after a call to DetachFromThread().
-  RunCallbackOnNewThreadSynchronously(
-      Bind(&ExpectCalledOnValidThread, Unretained(&thread_checker)));
+void ThreadCheckerClass::MethodOnDifferentThreadImpl() {
+  std::unique_ptr<ThreadCheckerClass> thread_checker_class(
+      new ThreadCheckerClass);
 
-  EXPECT_FALSE(thread_checker.CalledOnValidThread());
+  // DoStuff should assert in debug builds only when called on a
+  // different thread.
+  CallDoStuffOnThread call_on_thread(thread_checker_class.get());
+
+  call_on_thread.Start();
+  call_on_thread.Join();
 }
 
+#if ENABLE_THREAD_CHECKER
+TEST(ThreadCheckerDeathTest, MethodNotAllowedOnDifferentThreadInDebug) {
+  ASSERT_DEATH({
+      ThreadCheckerClass::MethodOnDifferentThreadImpl();
+    }, "");
+}
+#else
+TEST(ThreadCheckerTest, MethodAllowedOnDifferentThreadInRelease) {
+  ThreadCheckerClass::MethodOnDifferentThreadImpl();
+}
+#endif  // ENABLE_THREAD_CHECKER
+
+void ThreadCheckerClass::DetachThenCallFromDifferentThreadImpl() {
+  std::unique_ptr<ThreadCheckerClass> thread_checker_class(
+      new ThreadCheckerClass);
+
+  // DoStuff doesn't assert when called on a different thread
+  // after a call to DetachFromThread.
+  thread_checker_class->DetachFromThread();
+  CallDoStuffOnThread call_on_thread(thread_checker_class.get());
+
+  call_on_thread.Start();
+  call_on_thread.Join();
+
+  // DoStuff should assert in debug builds only after moving to
+  // another thread.
+  thread_checker_class->DoStuff();
+}
+
+#if ENABLE_THREAD_CHECKER
+TEST(ThreadCheckerDeathTest, DetachFromThreadInDebug) {
+  ASSERT_DEATH({
+    ThreadCheckerClass::DetachThenCallFromDifferentThreadImpl();
+    }, "");
+}
+#else
+TEST(ThreadCheckerTest, DetachFromThreadInRelease) {
+  ThreadCheckerClass::DetachThenCallFromDifferentThreadImpl();
+}
+#endif  // ENABLE_THREAD_CHECKER
+
+#endif  // GTEST_HAS_DEATH_TEST || !ENABLE_THREAD_CHECKER
+
+// Just in case we ever get lumped together with other compilation units.
+#undef ENABLE_THREAD_CHECKER
+
 }  // namespace base
diff --git a/base/threading/thread_local.h b/base/threading/thread_local.h
index cad9add..f40420c 100644
--- a/base/threading/thread_local.h
+++ b/base/threading/thread_local.h
@@ -2,34 +2,35 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// WARNING: Thread local storage is a bit tricky to get right. Please make sure
-// that this is really the proper solution for what you're trying to achieve.
-// Don't prematurely optimize, most likely you can just use a Lock.
+// WARNING: Thread local storage is a bit tricky to get right.  Please make
+// sure that this is really the proper solution for what you're trying to
+// achieve.  Don't prematurely optimize, most likely you can just use a Lock.
 //
-// These classes implement a wrapper around ThreadLocalStorage::Slot. On
-// construction, they will allocate a TLS slot, and free the TLS slot on
-// destruction. No memory management (creation or destruction) is handled. This
-// means for uses of ThreadLocalPointer, you must correctly manage the memory
-// yourself, these classes will not destroy the pointer for you. There are no
-// at-thread-exit actions taken by these classes.
+// These classes implement a wrapper around the platform's TLS storage
+// mechanism.  On construction, they will allocate a TLS slot, and free the
+// TLS slot on destruction.  No memory management (creation or destruction) is
+// handled.  This means for uses of ThreadLocalPointer, you must correctly
+// manage the memory yourself, these classes will not destroy the pointer for
+// you.  There are no at-thread-exit actions taken by these classes.
 //
-// ThreadLocalPointer<Type> wraps a Type*. It performs no creation or
-// destruction, so memory management must be handled elsewhere. The first call
-// to Get() on a thread will return NULL. You can update the pointer with a call
-// to Set().
+// ThreadLocalPointer<Type> wraps a Type*.  It performs no creation or
+// destruction, so memory management must be handled elsewhere.  The first call
+// to Get() on a thread will return NULL.  You can update the pointer with a
+// call to Set().
 //
-// ThreadLocalBoolean wraps a bool. It will default to false if it has never
+// ThreadLocalBoolean wraps a bool.  It will default to false if it has never
 // been set otherwise with Set().
 //
-// Thread Safety: An instance of ThreadLocalStorage is completely thread safe
-// once it has been created. If you want to dynamically create an instance, you
-// must of course properly deal with safety and race conditions. This means a
-// function-level static initializer is generally inappropiate.
+// Thread Safety:  An instance of ThreadLocalStorage is completely thread safe
+// once it has been created.  If you want to dynamically create an instance,
+// you must of course properly deal with safety and race conditions.  This
+// means a function-level static initializer is generally inappropiate.
 //
-// In Android, the system TLS is limited.
+// In Android, the system TLS is limited, the implementation is backed with
+// ThreadLocalStorage.
 //
 // Example usage:
-//   // My class is logically attached to a single thread. We cache a pointer
+//   // My class is logically attached to a single thread.  We cache a pointer
 //   // on the thread it was created on, so we can implement current().
 //   MyClass::MyClass() {
 //     DCHECK(Singleton<ThreadLocalPointer<MyClass> >::get()->Get() == NULL);
@@ -50,42 +51,76 @@
 #ifndef BASE_THREADING_THREAD_LOCAL_H_
 #define BASE_THREADING_THREAD_LOCAL_H_
 
+#include "base/base_export.h"
 #include "base/macros.h"
 #include "base/threading/thread_local_storage.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX)
+#include <pthread.h>
+#endif
 
 namespace base {
+namespace internal {
+
+// Helper functions that abstract the cross-platform APIs.  Do not use directly.
+struct BASE_EXPORT ThreadLocalPlatform {
+#if defined(OS_WIN)
+  typedef unsigned long SlotType;
+#elif defined(OS_ANDROID)
+  typedef ThreadLocalStorage::StaticSlot SlotType;
+#elif defined(OS_POSIX)
+  typedef pthread_key_t SlotType;
+#endif
+
+  static void AllocateSlot(SlotType* slot);
+  static void FreeSlot(SlotType slot);
+  static void* GetValueFromSlot(SlotType slot);
+  static void SetValueInSlot(SlotType slot, void* value);
+};
+
+}  // namespace internal
 
 template <typename Type>
 class ThreadLocalPointer {
  public:
-  ThreadLocalPointer() = default;
-  ~ThreadLocalPointer() = default;
+  ThreadLocalPointer() : slot_() {
+    internal::ThreadLocalPlatform::AllocateSlot(&slot_);
+  }
+
+  ~ThreadLocalPointer() {
+    internal::ThreadLocalPlatform::FreeSlot(slot_);
+  }
 
   Type* Get() {
-    return static_cast<Type*>(slot_.Get());
+    return static_cast<Type*>(
+        internal::ThreadLocalPlatform::GetValueFromSlot(slot_));
   }
 
   void Set(Type* ptr) {
-    slot_.Set(const_cast<void*>(static_cast<const void*>(ptr)));
+    internal::ThreadLocalPlatform::SetValueInSlot(
+        slot_, const_cast<void*>(static_cast<const void*>(ptr)));
   }
 
  private:
-  ThreadLocalStorage::Slot slot_;
+  typedef internal::ThreadLocalPlatform::SlotType SlotType;
+
+  SlotType slot_;
 
   DISALLOW_COPY_AND_ASSIGN(ThreadLocalPointer<Type>);
 };
 
 class ThreadLocalBoolean {
  public:
-  ThreadLocalBoolean() = default;
-  ~ThreadLocalBoolean() = default;
+  ThreadLocalBoolean() {}
+  ~ThreadLocalBoolean() {}
 
   bool Get() {
-    return tlp_.Get() != nullptr;
+    return tlp_.Get() != NULL;
   }
 
   void Set(bool val) {
-    tlp_.Set(val ? this : nullptr);
+    tlp_.Set(val ? this : NULL);
   }
 
  private:
diff --git a/base/threading/thread_local_posix.cc b/base/threading/thread_local_posix.cc
new file mode 100644
index 0000000..8bc46ad
--- /dev/null
+++ b/base/threading/thread_local_posix.cc
@@ -0,0 +1,43 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/thread_local.h"
+
+#include <pthread.h>
+
+#include "base/logging.h"
+#include "build/build_config.h"
+
+#if !defined(OS_ANDROID)
+
+namespace base {
+namespace internal {
+
+// static
+void ThreadLocalPlatform::AllocateSlot(SlotType* slot) {
+  int error = pthread_key_create(slot, NULL);
+  CHECK_EQ(error, 0);
+}
+
+// static
+void ThreadLocalPlatform::FreeSlot(SlotType slot) {
+  int error = pthread_key_delete(slot);
+  DCHECK_EQ(0, error);
+}
+
+// static
+void* ThreadLocalPlatform::GetValueFromSlot(SlotType slot) {
+  return pthread_getspecific(slot);
+}
+
+// static
+void ThreadLocalPlatform::SetValueInSlot(SlotType slot, void* value) {
+  int error = pthread_setspecific(slot, value);
+  DCHECK_EQ(error, 0);
+}
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // !defined(OS_ANDROID)
diff --git a/base/threading/thread_local_storage.cc b/base/threading/thread_local_storage.cc
index 48c1dd5..a7eb527 100644
--- a/base/threading/thread_local_storage.cc
+++ b/base/threading/thread_local_storage.cc
@@ -6,57 +6,10 @@
 
 #include "base/atomicops.h"
 #include "base/logging.h"
-#include "base/synchronization/lock.h"
 #include "build/build_config.h"
 
 using base::internal::PlatformThreadLocalStorage;
 
-// Chrome Thread Local Storage (TLS)
-//
-// This TLS system allows Chrome to use a single OS level TLS slot process-wide,
-// and allows us to control the slot limits instead of being at the mercy of the
-// platform. To do this, Chrome TLS replicates an array commonly found in the OS
-// thread metadata.
-//
-// Overview:
-//
-// OS TLS Slots       Per-Thread                 Per-Process Global
-//     ...
-//     []             Chrome TLS Array           Chrome TLS Metadata
-//     [] ----------> [][][][][ ][][][][]        [][][][][ ][][][][]
-//     []                      |                          |
-//     ...                     V                          V
-//                      Metadata Version           Slot Information
-//                         Your Data!
-//
-// Using a single OS TLS slot, Chrome TLS allocates an array on demand for the
-// lifetime of each thread that requests Chrome TLS data. Each per-thread TLS
-// array matches the length of the per-process global metadata array.
-//
-// A per-process global TLS metadata array tracks information about each item in
-// the per-thread array:
-//   * Status: Tracks if the slot is allocated or free to assign.
-//   * Destructor: An optional destructor to call on thread destruction for that
-//                 specific slot.
-//   * Version: Tracks the current version of the TLS slot. Each TLS slot
-//              allocation is associated with a unique version number.
-//
-//              Most OS TLS APIs guarantee that a newly allocated TLS slot is
-//              initialized to 0 for all threads. The Chrome TLS system provides
-//              this guarantee by tracking the version for each TLS slot here
-//              on each per-thread Chrome TLS array entry. Threads that access
-//              a slot with a mismatched version will receive 0 as their value.
-//              The metadata version is incremented when the client frees a
-//              slot. The per-thread metadata version is updated when a client
-//              writes to the slot. This scheme allows for constant time
-//              invalidation and avoids the need to iterate through each Chrome
-//              TLS array to mark the slot as zero.
-//
-// Just like an OS TLS API, clients of the Chrome TLS are responsible for
-// managing any necessary lifetime of the data in their slots. The only
-// convenience provided is automatic destruction when a thread ends. If a client
-// frees a slot, that client is responsible for destroying the data in the slot.
-
 namespace {
 // In order to make TLS destructors work, we need to keep around a function
 // pointer to the destructor for each slot. We keep this array of pointers in a
@@ -65,42 +18,37 @@
 // hold a pointer to a per-thread array (table) of slots that we allocate to
 // Chromium consumers.
 
-// g_native_tls_key is the one native TLS that we use. It stores our table.
+// g_native_tls_key is the one native TLS that we use.  It stores our table.
 base::subtle::Atomic32 g_native_tls_key =
     PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES;
 
-// The maximum number of slots in our thread local storage stack.
-constexpr int kThreadLocalStorageSize = 256;
-constexpr int kInvalidSlotValue = -1;
+// g_last_used_tls_key is the high-water-mark of allocated thread local storage.
+// Each allocation is an index into our g_tls_destructors[].  Each such index is
+// assigned to the instance variable slot_ in a ThreadLocalStorage::Slot
+// instance.  We reserve the value slot_ == 0 to indicate that the corresponding
+// instance of ThreadLocalStorage::Slot has been freed (i.e., destructor called,
+// etc.).  This reserved use of 0 is then stated as the initial value of
+// g_last_used_tls_key, so that the first issued index will be 1.
+base::subtle::Atomic32 g_last_used_tls_key = 0;
 
-enum TlsStatus {
-  FREE,
-  IN_USE,
-};
-
-struct TlsMetadata {
-  TlsStatus status;
-  base::ThreadLocalStorage::TLSDestructorFunc destructor;
-  uint32_t version;
-};
-
-struct TlsVectorEntry {
-  void* data;
-  uint32_t version;
-};
-
-// This lock isn't needed until after we've constructed the per-thread TLS
-// vector, so it's safe to use.
-base::Lock* GetTLSMetadataLock() {
-  static auto* lock = new base::Lock();
-  return lock;
-}
-TlsMetadata g_tls_metadata[kThreadLocalStorageSize];
-size_t g_last_assigned_slot = 0;
+// The maximum number of 'slots' in our thread local storage stack.
+const int kThreadLocalStorageSize = 256;
 
 // The maximum number of times to try to clear slots by calling destructors.
 // Use pthread naming convention for clarity.
-constexpr int kMaxDestructorIterations = kThreadLocalStorageSize;
+const int kMaxDestructorIterations = kThreadLocalStorageSize;
+
+// An array of destructor function pointers for the slots.  If a slot has a
+// destructor, it will be stored in its corresponding entry in this array.
+// The elements are volatile to ensure that when the compiler reads the value
+// to potentially call the destructor, it does so once, and that value is tested
+// for null-ness and then used. Yes, that would be a weird de-optimization,
+// but I can imagine some register machines where it was just as easy to
+// re-fetch an array element, and I want to be sure a call to free the key
+// (i.e., null out the destructor entry) that happens on a separate thread can't
+// hurt the racy calls to the destructors on another thread.
+volatile base::ThreadLocalStorage::TLSDestructorFunc
+    g_tls_destructors[kThreadLocalStorageSize];
 
 // This function is called to initialize our entire Chromium TLS system.
 // It may be called very early, and we need to complete most all of the setup
@@ -108,7 +56,7 @@
 // recursively depend on this initialization.
 // As a result, we use Atomics, and avoid anything (like a singleton) that might
 // require memory allocations.
-TlsVectorEntry* ConstructTlsVector() {
+void** ConstructTlsVector() {
   PlatformThreadLocalStorage::TLSKey key =
       base::subtle::NoBarrier_Load(&g_native_tls_key);
   if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES) {
@@ -125,8 +73,8 @@
             key != PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES);
       PlatformThreadLocalStorage::FreeTLS(tmp);
     }
-    // Atomically test-and-set the tls_key. If the key is
-    // TLS_KEY_OUT_OF_INDEXES, go ahead and set it. Otherwise, do nothing, as
+    // Atomically test-and-set the tls_key.  If the key is
+    // TLS_KEY_OUT_OF_INDEXES, go ahead and set it.  Otherwise, do nothing, as
     // another thread already did our dirty work.
     if (PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES !=
         static_cast<PlatformThreadLocalStorage::TLSKey>(
@@ -142,38 +90,39 @@
   }
   CHECK(!PlatformThreadLocalStorage::GetTLSValue(key));
 
-  // Some allocators, such as TCMalloc, make use of thread local storage. As a
-  // result, any attempt to call new (or malloc) will lazily cause such a system
-  // to initialize, which will include registering for a TLS key. If we are not
-  // careful here, then that request to create a key will call new back, and
-  // we'll have an infinite loop. We avoid that as follows: Use a stack
-  // allocated vector, so that we don't have dependence on our allocator until
-  // our service is in place. (i.e., don't even call new until after we're
-  // setup)
-  TlsVectorEntry stack_allocated_tls_data[kThreadLocalStorageSize];
+  // Some allocators, such as TCMalloc, make use of thread local storage.
+  // As a result, any attempt to call new (or malloc) will lazily cause such a
+  // system to initialize, which will include registering for a TLS key.  If we
+  // are not careful here, then that request to create a key will call new back,
+  // and we'll have an infinite loop.  We avoid that as follows:
+  // Use a stack allocated vector, so that we don't have dependence on our
+  // allocator until our service is in place.  (i.e., don't even call new until
+  // after we're setup)
+  void* stack_allocated_tls_data[kThreadLocalStorageSize];
   memset(stack_allocated_tls_data, 0, sizeof(stack_allocated_tls_data));
   // Ensure that any rentrant calls change the temp version.
   PlatformThreadLocalStorage::SetTLSValue(key, stack_allocated_tls_data);
 
   // Allocate an array to store our data.
-  TlsVectorEntry* tls_data = new TlsVectorEntry[kThreadLocalStorageSize];
+  void** tls_data = new void*[kThreadLocalStorageSize];
   memcpy(tls_data, stack_allocated_tls_data, sizeof(stack_allocated_tls_data));
   PlatformThreadLocalStorage::SetTLSValue(key, tls_data);
   return tls_data;
 }
 
-void OnThreadExitInternal(TlsVectorEntry* tls_data) {
-  DCHECK(tls_data);
-  // Some allocators, such as TCMalloc, use TLS. As a result, when a thread
+void OnThreadExitInternal(void* value) {
+  DCHECK(value);
+  void** tls_data = static_cast<void**>(value);
+  // Some allocators, such as TCMalloc, use TLS.  As a result, when a thread
   // terminates, one of the destructor calls we make may be to shut down an
-  // allocator. We have to be careful that after we've shutdown all of the known
-  // destructors (perchance including an allocator), that we don't call the
-  // allocator and cause it to resurrect itself (with no possibly destructor
-  // call to follow). We handle this problem as follows: Switch to using a stack
-  // allocated vector, so that we don't have dependence on our allocator after
-  // we have called all g_tls_metadata destructors. (i.e., don't even call
-  // delete[] after we're done with destructors.)
-  TlsVectorEntry stack_allocated_tls_data[kThreadLocalStorageSize];
+  // allocator.  We have to be careful that after we've shutdown all of the
+  // known destructors (perchance including an allocator), that we don't call
+  // the allocator and cause it to resurrect itself (with no possibly destructor
+  // call to follow).  We handle this problem as follows:
+  // Switch to using a stack allocated vector, so that we don't have dependence
+  // on our allocator after we have called all g_tls_destructors.  (i.e., don't
+  // even call delete[] after we're done with destructors.)
+  void* stack_allocated_tls_data[kThreadLocalStorageSize];
   memcpy(stack_allocated_tls_data, tls_data, sizeof(stack_allocated_tls_data));
   // Ensure that any re-entrant calls change the temp version.
   PlatformThreadLocalStorage::TLSKey key =
@@ -181,38 +130,32 @@
   PlatformThreadLocalStorage::SetTLSValue(key, stack_allocated_tls_data);
   delete[] tls_data;  // Our last dependence on an allocator.
 
-  // Snapshot the TLS Metadata so we don't have to lock on every access.
-  TlsMetadata tls_metadata[kThreadLocalStorageSize];
-  {
-    base::AutoLock auto_lock(*GetTLSMetadataLock());
-    memcpy(tls_metadata, g_tls_metadata, sizeof(g_tls_metadata));
-  }
-
   int remaining_attempts = kMaxDestructorIterations;
   bool need_to_scan_destructors = true;
   while (need_to_scan_destructors) {
     need_to_scan_destructors = false;
     // Try to destroy the first-created-slot (which is slot 1) in our last
-    // destructor call. That user was able to function, and define a slot with
+    // destructor call.  That user was able to function, and define a slot with
     // no other services running, so perhaps it is a basic service (like an
-    // allocator) and should also be destroyed last. If we get the order wrong,
-    // then we'll iterate several more times, so it is really not that critical
-    // (but it might help).
-    for (int slot = 0; slot < kThreadLocalStorageSize ; ++slot) {
-      void* tls_value = stack_allocated_tls_data[slot].data;
-      if (!tls_value || tls_metadata[slot].status == TlsStatus::FREE ||
-          stack_allocated_tls_data[slot].version != tls_metadata[slot].version)
+    // allocator) and should also be destroyed last.  If we get the order wrong,
+    // then we'll itterate several more times, so it is really not that
+    // critical (but it might help).
+    base::subtle::Atomic32 last_used_tls_key =
+        base::subtle::NoBarrier_Load(&g_last_used_tls_key);
+    for (int slot = last_used_tls_key; slot > 0; --slot) {
+      void* tls_value = stack_allocated_tls_data[slot];
+      if (tls_value == NULL)
         continue;
 
       base::ThreadLocalStorage::TLSDestructorFunc destructor =
-          tls_metadata[slot].destructor;
-      if (!destructor)
+          g_tls_destructors[slot];
+      if (destructor == NULL)
         continue;
-      stack_allocated_tls_data[slot].data = nullptr;  // pre-clear the slot.
+      stack_allocated_tls_data[slot] = NULL;  // pre-clear the slot.
       destructor(tls_value);
-      // Any destructor might have called a different service, which then set a
-      // different slot to a non-null value. Hence we need to check the whole
-      // vector again. This is a pthread standard.
+      // Any destructor might have called a different service, which then set
+      // a different slot to a non-NULL value.  Hence we need to check
+      // the whole vector again.  This is a pthread standard.
       need_to_scan_destructors = true;
     }
     if (--remaining_attempts <= 0) {
@@ -222,7 +165,7 @@
   }
 
   // Remove our stack allocated vector.
-  PlatformThreadLocalStorage::SetTLSValue(key, nullptr);
+  PlatformThreadLocalStorage::SetTLSValue(key, NULL);
 }
 
 }  // namespace
@@ -241,107 +184,69 @@
   // Maybe we have never initialized TLS for this thread.
   if (!tls_data)
     return;
-  OnThreadExitInternal(static_cast<TlsVectorEntry*>(tls_data));
+  OnThreadExitInternal(tls_data);
 }
 #elif defined(OS_POSIX)
 void PlatformThreadLocalStorage::OnThreadExit(void* value) {
-  OnThreadExitInternal(static_cast<TlsVectorEntry*>(value));
+  OnThreadExitInternal(value);
 }
 #endif  // defined(OS_WIN)
 
 }  // namespace internal
 
+ThreadLocalStorage::Slot::Slot(TLSDestructorFunc destructor) {
+  slot_ = 0;
+  base::subtle::Release_Store(&initialized_, 0);
+  Initialize(destructor);
+}
+
 void ThreadLocalStorage::StaticSlot::Initialize(TLSDestructorFunc destructor) {
   PlatformThreadLocalStorage::TLSKey key =
       base::subtle::NoBarrier_Load(&g_native_tls_key);
   if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES ||
-      !PlatformThreadLocalStorage::GetTLSValue(key)) {
+      !PlatformThreadLocalStorage::GetTLSValue(key))
     ConstructTlsVector();
-  }
 
   // Grab a new slot.
-  slot_ = kInvalidSlotValue;
-  version_ = 0;
-  {
-    base::AutoLock auto_lock(*GetTLSMetadataLock());
-    for (int i = 0; i < kThreadLocalStorageSize; ++i) {
-      // Tracking the last assigned slot is an attempt to find the next
-      // available slot within one iteration. Under normal usage, slots remain
-      // in use for the lifetime of the process (otherwise before we reclaimed
-      // slots, we would have run out of slots). This makes it highly likely the
-      // next slot is going to be a free slot.
-      size_t slot_candidate =
-          (g_last_assigned_slot + 1 + i) % kThreadLocalStorageSize;
-      if (g_tls_metadata[slot_candidate].status == TlsStatus::FREE) {
-        g_tls_metadata[slot_candidate].status = TlsStatus::IN_USE;
-        g_tls_metadata[slot_candidate].destructor = destructor;
-        g_last_assigned_slot = slot_candidate;
-        slot_ = slot_candidate;
-        version_ = g_tls_metadata[slot_candidate].version;
-        break;
-      }
-    }
-  }
-  CHECK_NE(slot_, kInvalidSlotValue);
+  slot_ = base::subtle::NoBarrier_AtomicIncrement(&g_last_used_tls_key, 1);
+  DCHECK_GT(slot_, 0);
   CHECK_LT(slot_, kThreadLocalStorageSize);
 
   // Setup our destructor.
+  g_tls_destructors[slot_] = destructor;
   base::subtle::Release_Store(&initialized_, 1);
 }
 
 void ThreadLocalStorage::StaticSlot::Free() {
-  DCHECK_NE(slot_, kInvalidSlotValue);
+  // At this time, we don't reclaim old indices for TLS slots.
+  // So all we need to do is wipe the destructor.
+  DCHECK_GT(slot_, 0);
   DCHECK_LT(slot_, kThreadLocalStorageSize);
-  {
-    base::AutoLock auto_lock(*GetTLSMetadataLock());
-    g_tls_metadata[slot_].status = TlsStatus::FREE;
-    g_tls_metadata[slot_].destructor = nullptr;
-    ++(g_tls_metadata[slot_].version);
-  }
-  slot_ = kInvalidSlotValue;
+  g_tls_destructors[slot_] = NULL;
+  slot_ = 0;
   base::subtle::Release_Store(&initialized_, 0);
 }
 
 void* ThreadLocalStorage::StaticSlot::Get() const {
-  TlsVectorEntry* tls_data = static_cast<TlsVectorEntry*>(
+  void** tls_data = static_cast<void**>(
       PlatformThreadLocalStorage::GetTLSValue(
           base::subtle::NoBarrier_Load(&g_native_tls_key)));
   if (!tls_data)
     tls_data = ConstructTlsVector();
-  DCHECK_NE(slot_, kInvalidSlotValue);
+  DCHECK_GT(slot_, 0);
   DCHECK_LT(slot_, kThreadLocalStorageSize);
-  // Version mismatches means this slot was previously freed.
-  if (tls_data[slot_].version != version_)
-    return nullptr;
-  return tls_data[slot_].data;
+  return tls_data[slot_];
 }
 
 void ThreadLocalStorage::StaticSlot::Set(void* value) {
-  TlsVectorEntry* tls_data = static_cast<TlsVectorEntry*>(
+  void** tls_data = static_cast<void**>(
       PlatformThreadLocalStorage::GetTLSValue(
           base::subtle::NoBarrier_Load(&g_native_tls_key)));
   if (!tls_data)
     tls_data = ConstructTlsVector();
-  DCHECK_NE(slot_, kInvalidSlotValue);
+  DCHECK_GT(slot_, 0);
   DCHECK_LT(slot_, kThreadLocalStorageSize);
-  tls_data[slot_].data = value;
-  tls_data[slot_].version = version_;
-}
-
-ThreadLocalStorage::Slot::Slot(TLSDestructorFunc destructor) {
-  tls_slot_.Initialize(destructor);
-}
-
-ThreadLocalStorage::Slot::~Slot() {
-  tls_slot_.Free();
-}
-
-void* ThreadLocalStorage::Slot::Get() const {
-  return tls_slot_.Get();
-}
-
-void ThreadLocalStorage::Slot::Set(void* value) {
-  tls_slot_.Set(value);
+  tls_data[slot_] = value;
 }
 
 }  // namespace base
diff --git a/base/threading/thread_local_storage.h b/base/threading/thread_local_storage.h
index 5e70410..0c7a692 100644
--- a/base/threading/thread_local_storage.h
+++ b/base/threading/thread_local_storage.h
@@ -5,8 +5,6 @@
 #ifndef BASE_THREADING_THREAD_LOCAL_STORAGE_H_
 #define BASE_THREADING_THREAD_LOCAL_STORAGE_H_
 
-#include <stdint.h>
-
 #include "base/atomicops.h"
 #include "base/base_export.h"
 #include "base/macros.h"
@@ -22,12 +20,9 @@
 
 namespace internal {
 
-// WARNING: You should *NOT* use this class directly.
-// PlatformThreadLocalStorage is a low-level abstraction of the OS's TLS
-// interface. Instead, you should use one of the following:
-// * ThreadLocalBoolean (from thread_local.h) for booleans.
-// * ThreadLocalPointer (from thread_local.h) for pointers.
-// * ThreadLocalStorage::StaticSlot/Slot for more direct control of the slot.
+// WARNING: You should *NOT* be using this class directly.
+// PlatformThreadLocalStorage is low-level abstraction to the OS's TLS
+// interface, you should instead be using ThreadLocalStorage::StaticSlot/Slot.
 class BASE_EXPORT PlatformThreadLocalStorage {
  public:
 
@@ -94,7 +89,7 @@
   // initialization, as base's LINKER_INITIALIZED requires a constructor and on
   // some compilers (notably gcc 4.4) this still ends up needing runtime
   // initialization.
-#define TLS_INITIALIZER {false, 0, 0}
+  #define TLS_INITIALIZER {false, 0}
 
   // A key representing one value stored in TLS.
   // Initialize like
@@ -128,25 +123,18 @@
     // The internals of this struct should be considered private.
     base::subtle::Atomic32 initialized_;
     int slot_;
-    uint32_t version_;
   };
 
   // A convenience wrapper around StaticSlot with a constructor. Can be used
   // as a member variable.
-  class BASE_EXPORT Slot {
+  class BASE_EXPORT Slot : public StaticSlot {
    public:
+    // Calls StaticSlot::Initialize().
     explicit Slot(TLSDestructorFunc destructor = NULL);
-    ~Slot();
-
-    // Get the thread-local value stored in this slot.
-    // Values are guaranteed to initially be zero.
-    void* Get() const;
-
-    // Set the slot's thread-local value to |value|.
-    void Set(void* value);
 
    private:
-    StaticSlot tls_slot_;
+    using StaticSlot::initialized_;
+    using StaticSlot::slot_;
 
     DISALLOW_COPY_AND_ASSIGN(Slot);
   };
diff --git a/base/threading/thread_local_storage_unittest.cc b/base/threading/thread_local_storage_unittest.cc
index 335252b..322524b 100644
--- a/base/threading/thread_local_storage_unittest.cc
+++ b/base/threading/thread_local_storage_unittest.cc
@@ -127,14 +127,4 @@
   tls_slot.Free();  // Stop doing callbacks to cleanup threads.
 }
 
-TEST(ThreadLocalStorageTest, TLSReclaim) {
-  // Creates and destroys many TLS slots and ensures they all zero-inited.
-  for (int i = 0; i < 1000; ++i) {
-    ThreadLocalStorage::Slot slot(nullptr);
-    EXPECT_EQ(nullptr, slot.Get());
-    slot.Set(reinterpret_cast<void*>(0xBAADF00D));
-    EXPECT_EQ(reinterpret_cast<void*>(0xBAADF00D), slot.Get());
-  }
-}
-
 }  // namespace base
diff --git a/base/threading/thread_restrictions.cc b/base/threading/thread_restrictions.cc
index 8dd7743..00306c5 100644
--- a/base/threading/thread_restrictions.cc
+++ b/base/threading/thread_restrictions.cc
@@ -4,7 +4,7 @@
 
 #include "base/threading/thread_restrictions.h"
 
-#if DCHECK_IS_ON()
+#if ENABLE_THREAD_RESTRICTIONS
 
 #include "base/lazy_instance.h"
 #include "base/logging.h"
@@ -35,7 +35,7 @@
 // static
 void ThreadRestrictions::AssertIOAllowed() {
   if (g_io_disallowed.Get().Get()) {
-    NOTREACHED() <<
+    LOG(FATAL) <<
         "Function marked as IO-only was called from a thread that "
         "disallows IO!  If this thread really should be allowed to "
         "make IO calls, adjust the call to "
@@ -54,14 +54,10 @@
 // static
 void ThreadRestrictions::AssertSingletonAllowed() {
   if (g_singleton_disallowed.Get().Get()) {
-    NOTREACHED() << "LazyInstance/Singleton is not allowed to be used on this "
-                 << "thread.  Most likely it's because this thread is not "
-                 << "joinable (or the current task is running with "
-                 << "TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN semantics), so "
-                 << "AtExitManager may have deleted the object on shutdown, "
-                 << "leading to a potential shutdown crash. If you need to use "
-                 << "the object from this context, it'll have to be updated to "
-                 << "use Leaky traits.";
+    LOG(FATAL) << "LazyInstance/Singleton is not allowed to be used on this "
+               << "thread.  Most likely it's because this thread is not "
+               << "joinable, so AtExitManager may have deleted the object "
+               << "on shutdown, leading to a potential shutdown crash.";
   }
 }
 
@@ -73,8 +69,8 @@
 // static
 void ThreadRestrictions::AssertWaitAllowed() {
   if (g_wait_disallowed.Get().Get()) {
-    NOTREACHED() << "Waiting is not allowed to be used on this thread to "
-                 << "prevent jank and deadlock.";
+    LOG(FATAL) << "Waiting is not allowed to be used on this thread to prevent "
+               << "jank and deadlock.";
   }
 }
 
@@ -86,4 +82,4 @@
 
 }  // namespace base
 
-#endif  // DCHECK_IS_ON()
+#endif  // ENABLE_THREAD_RESTRICTIONS
diff --git a/base/threading/thread_restrictions.h b/base/threading/thread_restrictions.h
index a86dd45..4212a4b 100644
--- a/base/threading/thread_restrictions.h
+++ b/base/threading/thread_restrictions.h
@@ -6,9 +6,15 @@
 #define BASE_THREADING_THREAD_RESTRICTIONS_H_
 
 #include "base/base_export.h"
-#include "base/logging.h"
 #include "base/macros.h"
 
+// See comment at top of thread_checker.h
+#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
+#define ENABLE_THREAD_RESTRICTIONS 1
+#else
+#define ENABLE_THREAD_RESTRICTIONS 0
+#endif
+
 class BrowserProcessImpl;
 class HistogramSynchronizer;
 class NativeBackendKWallet;
@@ -51,10 +57,10 @@
 class GpuChannelHost;
 }
 namespace mojo {
-class SyncCallRestrictions;
-namespace edk {
-class ScopedIPCSupport;
+namespace common {
+class MessagePumpMojo;
 }
+class SyncCallRestrictions;
 }
 namespace ui {
 class CommandBufferClientImpl;
@@ -86,10 +92,6 @@
 class JavaHandlerThread;
 }
 
-namespace internal {
-class TaskTracker;
-}
-
 class SequencedWorkerPool;
 class SimpleThread;
 class Thread;
@@ -135,7 +137,21 @@
     DISALLOW_COPY_AND_ASSIGN(ScopedAllowIO);
   };
 
-#if DCHECK_IS_ON()
+  // Constructing a ScopedAllowSingleton temporarily allows accessing for the
+  // current thread.  Doing this is almost always incorrect.
+  class BASE_EXPORT ScopedAllowSingleton {
+   public:
+    ScopedAllowSingleton() { previous_value_ = SetSingletonAllowed(true); }
+    ~ScopedAllowSingleton() { SetSingletonAllowed(previous_value_); }
+   private:
+    // Whether singleton use is allowed when the ScopedAllowSingleton was
+    // constructed.
+    bool previous_value_;
+
+    DISALLOW_COPY_AND_ASSIGN(ScopedAllowSingleton);
+  };
+
+#if ENABLE_THREAD_RESTRICTIONS
   // Set whether the current thread to make IO calls.
   // Threads start out in the *allowed* state.
   // Returns the previous value.
@@ -181,7 +197,6 @@
   friend class content::ScopedAllowWaitForAndroidLayoutTests;
   friend class content::ScopedAllowWaitForDebugURL;
   friend class ::HistogramSynchronizer;
-  friend class internal::TaskTracker;
   friend class ::ScopedAllowWaitForLegacyWebViewApi;
   friend class cc::CompletionEvent;
   friend class cc::SingleThreadTaskGraphRunner;
@@ -195,8 +210,8 @@
   friend class ThreadTestHelper;
   friend class PlatformThread;
   friend class android::JavaHandlerThread;
+  friend class mojo::common::MessagePumpMojo;
   friend class mojo::SyncCallRestrictions;
-  friend class mojo::edk::ScopedIPCSupport;
   friend class ui::CommandBufferClientImpl;
   friend class ui::CommandBufferLocal;
   friend class ui::GpuState;
@@ -225,7 +240,7 @@
   friend class views::ScreenMus;
 // END USAGE THAT NEEDS TO BE FIXED.
 
-#if DCHECK_IS_ON()
+#if ENABLE_THREAD_RESTRICTIONS
   static bool SetWaitAllowed(bool allowed);
 #else
   static bool SetWaitAllowed(bool) { return true; }
diff --git a/base/threading/thread_task_runner_handle.cc b/base/threading/thread_task_runner_handle.cc
index 00deaa4..190e18f 100644
--- a/base/threading/thread_task_runner_handle.cc
+++ b/base/threading/thread_task_runner_handle.cc
@@ -6,10 +6,8 @@
 
 #include <utility>
 
-#include "base/bind.h"
 #include "base/lazy_instance.h"
 #include "base/logging.h"
-#include "base/memory/ptr_util.h"
 #include "base/threading/sequenced_task_runner_handle.h"
 #include "base/threading/thread_local.h"
 
@@ -34,50 +32,6 @@
   return !!lazy_tls_ptr.Pointer()->Get();
 }
 
-// static
-ScopedClosureRunner ThreadTaskRunnerHandle::OverrideForTesting(
-    scoped_refptr<SingleThreadTaskRunner> overriding_task_runner) {
-  // OverrideForTesting() is not compatible with a SequencedTaskRunnerHandle
-  // being set (but SequencedTaskRunnerHandle::IsSet() includes
-  // ThreadTaskRunnerHandle::IsSet() so that's discounted as the only valid
-  // excuse for it to be true). Sadly this means that tests that merely need a
-  // SequencedTaskRunnerHandle on their main thread can be forced to use a
-  // ThreadTaskRunnerHandle if they're also using test task runners (that
-  // OverrideForTesting() when running their tasks from said main thread). To
-  // solve this: sequence_task_runner_handle.cc and thread_task_runner_handle.cc
-  // would have to be merged into a single impl file and share TLS state. This
-  // was deemed unecessary for now as most tests should use higher level
-  // constructs and not have to instantiate task runner handles on their own.
-  DCHECK(!SequencedTaskRunnerHandle::IsSet() || IsSet());
-
-  if (!IsSet()) {
-    std::unique_ptr<ThreadTaskRunnerHandle> top_level_ttrh =
-        MakeUnique<ThreadTaskRunnerHandle>(std::move(overriding_task_runner));
-    return ScopedClosureRunner(base::Bind(
-        [](std::unique_ptr<ThreadTaskRunnerHandle>) {},
-        base::Passed(&top_level_ttrh)));
-  }
-
-  ThreadTaskRunnerHandle* ttrh = lazy_tls_ptr.Pointer()->Get();
-  // Swap the two (and below bind |overriding_task_runner|, which is now the
-  // previous one, as the |task_runner_to_restore|).
-  ttrh->task_runner_.swap(overriding_task_runner);
-
-  return ScopedClosureRunner(base::Bind(
-      [](scoped_refptr<SingleThreadTaskRunner> task_runner_to_restore,
-         SingleThreadTaskRunner* expected_task_runner_before_restore) {
-        ThreadTaskRunnerHandle* ttrh = lazy_tls_ptr.Pointer()->Get();
-
-        DCHECK_EQ(expected_task_runner_before_restore, ttrh->task_runner_.get())
-            << "Nested overrides must expire their ScopedClosureRunners "
-               "in LIFO order.";
-
-        ttrh->task_runner_.swap(task_runner_to_restore);
-      },
-      base::Passed(&overriding_task_runner),
-      base::Unretained(ttrh->task_runner_.get())));
-}
-
 ThreadTaskRunnerHandle::ThreadTaskRunnerHandle(
     scoped_refptr<SingleThreadTaskRunner> task_runner)
     : task_runner_(std::move(task_runner)) {
diff --git a/base/threading/thread_task_runner_handle.h b/base/threading/thread_task_runner_handle.h
index 7ae85e6..c8e5893 100644
--- a/base/threading/thread_task_runner_handle.h
+++ b/base/threading/thread_task_runner_handle.h
@@ -6,7 +6,6 @@
 #define BASE_THREADING_THREAD_TASK_RUNNER_HANDLE_H_
 
 #include "base/base_export.h"
-#include "base/callback_helpers.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
 #include "base/single_thread_task_runner.h"
@@ -27,17 +26,6 @@
   // the current thread.
   static bool IsSet();
 
-  // Overrides ThreadTaskRunnerHandle::Get()'s |task_runner_| to point at
-  // |overriding_task_runner| until the returned ScopedClosureRunner goes out of
-  // scope (instantiates a ThreadTaskRunnerHandle for that scope if |!IsSet()|).
-  // Nested overrides are allowed but callers must ensure the
-  // ScopedClosureRunners expire in LIFO (stack) order. Note: nesting
-  // ThreadTaskRunnerHandles isn't generally desired but it's useful in unit
-  // tests where multiple task runners can share the main thread for simplicity
-  // and determinism.
-  static ScopedClosureRunner OverrideForTesting(
-      scoped_refptr<SingleThreadTaskRunner> overriding_task_runner);
-
   // Binds |task_runner| to the current thread. |task_runner| must belong
   // to the current thread for this to succeed.
   explicit ThreadTaskRunnerHandle(
diff --git a/base/threading/thread_unittest.cc b/base/threading/thread_unittest.cc
index af83474..b0fd265 100644
--- a/base/threading/thread_unittest.cc
+++ b/base/threading/thread_unittest.cc
@@ -5,22 +5,13 @@
 #include "base/threading/thread.h"
 
 #include <stddef.h>
-#include <stdint.h>
 
 #include <vector>
 
 #include "base/bind.h"
-#include "base/debug/leak_annotations.h"
-#include "base/macros.h"
-#include "base/memory/ptr_util.h"
-#include "base/message_loop/message_loop.h"
-#include "base/run_loop.h"
+#include "base/location.h"
 #include "base/single_thread_task_runner.h"
 #include "base/synchronization/waitable_event.h"
-#include "base/test/gtest_util.h"
-#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
-#include "base/threading/platform_thread.h"
-#include "base/time/time.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 #include "testing/platform_test.h"
@@ -47,11 +38,8 @@
     init_called_ = true;
   }
   bool InitCalled() { return init_called_; }
-
  private:
   bool init_called_;
-
-  DISALLOW_COPY_AND_ASSIGN(SleepInsideInitThread);
 };
 
 enum ThreadEvent {
@@ -88,8 +76,6 @@
 
  private:
   EventList* event_list_;
-
-  DISALLOW_COPY_AND_ASSIGN(CaptureToEventList);
 };
 
 // Observer that writes a value into |event_list| when a message loop has been
@@ -110,8 +96,6 @@
 
  private:
   EventList* event_list_;
-
-  DISALLOW_COPY_AND_ASSIGN(CapturingDestructionObserver);
 };
 
 // Task that adds a destruction observer to the current message loop.
@@ -131,79 +115,59 @@
 
 }  // namespace
 
+TEST_F(ThreadTest, Restart) {
+  Thread a("Restart");
+  a.Stop();
+  EXPECT_FALSE(a.message_loop());
+  EXPECT_FALSE(a.IsRunning());
+  EXPECT_TRUE(a.Start());
+  EXPECT_TRUE(a.message_loop());
+  EXPECT_TRUE(a.IsRunning());
+  a.Stop();
+  EXPECT_FALSE(a.message_loop());
+  EXPECT_FALSE(a.IsRunning());
+  EXPECT_TRUE(a.Start());
+  EXPECT_TRUE(a.message_loop());
+  EXPECT_TRUE(a.IsRunning());
+  a.Stop();
+  EXPECT_FALSE(a.message_loop());
+  EXPECT_FALSE(a.IsRunning());
+  a.Stop();
+  EXPECT_FALSE(a.message_loop());
+  EXPECT_FALSE(a.IsRunning());
+}
+
 TEST_F(ThreadTest, StartWithOptions_StackSize) {
   Thread a("StartWithStackSize");
   // Ensure that the thread can work with only 12 kb and still process a
-  // message. At the same time, we should scale with the bitness of the system
-  // where 12 kb is definitely not enough.
-  // 12 kb = 3072 Slots on a 32-bit system, so we'll scale based off of that.
+  // message.
   Thread::Options options;
-#if defined(ADDRESS_SANITIZER) || !defined(NDEBUG)
-  // ASan bloats the stack variables and overflows the 3072 slot stack. Some
-  // debug builds also grow the stack too much.
-  options.stack_size = 2 * 3072 * sizeof(uintptr_t);
+#if defined(ADDRESS_SANITIZER)
+  // ASan bloats the stack variables and overflows the 12 kb stack.
+  options.stack_size = 24*1024;
 #else
-  options.stack_size = 3072 * sizeof(uintptr_t);
+  options.stack_size = 12*1024;
 #endif
   EXPECT_TRUE(a.StartWithOptions(options));
   EXPECT_TRUE(a.message_loop());
   EXPECT_TRUE(a.IsRunning());
 
-  base::WaitableEvent event(base::WaitableEvent::ResetPolicy::AUTOMATIC,
-                            base::WaitableEvent::InitialState::NOT_SIGNALED);
-  a.task_runner()->PostTask(FROM_HERE, base::Bind(&base::WaitableEvent::Signal,
-                                                  base::Unretained(&event)));
-  event.Wait();
+  bool was_invoked = false;
+  a.task_runner()->PostTask(FROM_HERE, base::Bind(&ToggleValue, &was_invoked));
+
+  // wait for the task to run (we could use a kernel event here
+  // instead to avoid busy waiting, but this is sufficient for
+  // testing purposes).
+  for (int i = 100; i >= 0 && !was_invoked; --i) {
+    base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(10));
+  }
+  EXPECT_TRUE(was_invoked);
 }
 
-// Intentional test-only race for otherwise untestable code, won't fix.
-// https://crbug.com/634383
-#if !defined(THREAD_SANITIZER)
-TEST_F(ThreadTest, StartWithOptions_NonJoinable) {
-  Thread* a = new Thread("StartNonJoinable");
-  // Non-joinable threads have to be leaked for now (see
-  // Thread::Options::joinable for details).
-  ANNOTATE_LEAKING_OBJECT_PTR(a);
-
-  Thread::Options options;
-  options.joinable = false;
-  EXPECT_TRUE(a->StartWithOptions(options));
-  EXPECT_TRUE(a->message_loop());
-  EXPECT_TRUE(a->IsRunning());
-
-  // Without this call this test is racy. The above IsRunning() succeeds because
-  // of an early-return condition while between Start() and StopSoon(), after
-  // invoking StopSoon() below this early-return condition is no longer
-  // satisfied and the real |is_running_| bit has to be checked. It could still
-  // be false if the message loop hasn't started for real in practice. This is
-  // only a requirement for this test because the non-joinable property forces
-  // it to use StopSoon() and not wait for a complete Stop().
-  EXPECT_TRUE(a->WaitUntilThreadStarted());
-
-  // Make the thread block until |block_event| is signaled.
-  base::WaitableEvent block_event(
-      base::WaitableEvent::ResetPolicy::AUTOMATIC,
-      base::WaitableEvent::InitialState::NOT_SIGNALED);
-  a->task_runner()->PostTask(
-      FROM_HERE,
-      base::Bind(&base::WaitableEvent::Wait, base::Unretained(&block_event)));
-
-  a->StopSoon();
-  EXPECT_TRUE(a->IsRunning());
-
-  // Unblock the task and give a bit of extra time to unwind QuitWhenIdle().
-  block_event.Signal();
-  base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(20));
-
-  // The thread should now have stopped on its own.
-  EXPECT_FALSE(a->IsRunning());
-}
-#endif
-
-TEST_F(ThreadTest, TwoTasksOnJoinableThread) {
+TEST_F(ThreadTest, TwoTasks) {
   bool was_invoked = false;
   {
-    Thread a("TwoTasksOnJoinableThread");
+    Thread a("TwoTasks");
     EXPECT_TRUE(a.Start());
     EXPECT_TRUE(a.message_loop());
 
@@ -220,164 +184,18 @@
   EXPECT_TRUE(was_invoked);
 }
 
-TEST_F(ThreadTest, DestroyWhileRunningIsSafe) {
-  Thread a("DestroyWhileRunningIsSafe");
-  EXPECT_TRUE(a.Start());
-  EXPECT_TRUE(a.WaitUntilThreadStarted());
-}
-
-// TODO(gab): Enable this test when destroying a non-joinable Thread instance
-// is supported (proposal @ https://crbug.com/629139#c14).
-TEST_F(ThreadTest, DISABLED_DestroyWhileRunningNonJoinableIsSafe) {
-  {
-    Thread a("DestroyWhileRunningNonJoinableIsSafe");
-    Thread::Options options;
-    options.joinable = false;
-    EXPECT_TRUE(a.StartWithOptions(options));
-    EXPECT_TRUE(a.WaitUntilThreadStarted());
-  }
-
-  // Attempt to catch use-after-frees from the non-joinable thread in the
-  // scope of this test if any.
-  base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(20));
-}
-
 TEST_F(ThreadTest, StopSoon) {
   Thread a("StopSoon");
   EXPECT_TRUE(a.Start());
   EXPECT_TRUE(a.message_loop());
   EXPECT_TRUE(a.IsRunning());
   a.StopSoon();
-  a.Stop();
-  EXPECT_FALSE(a.message_loop());
-  EXPECT_FALSE(a.IsRunning());
-}
-
-TEST_F(ThreadTest, StopTwiceNop) {
-  Thread a("StopTwiceNop");
-  EXPECT_TRUE(a.Start());
-  EXPECT_TRUE(a.message_loop());
-  EXPECT_TRUE(a.IsRunning());
-  a.StopSoon();
-  // Calling StopSoon() a second time should be a nop.
   a.StopSoon();
   a.Stop();
-  // Same with Stop().
-  a.Stop();
-  EXPECT_FALSE(a.message_loop());
-  EXPECT_FALSE(a.IsRunning());
-  // Calling them when not running should also nop.
-  a.StopSoon();
-  a.Stop();
-}
-
-// TODO(gab): Enable this test in conjunction with re-enabling the sequence
-// check in Thread::Stop() as part of http://crbug.com/629139.
-TEST_F(ThreadTest, DISABLED_StopOnNonOwningThreadIsDeath) {
-  Thread a("StopOnNonOwningThreadDeath");
-  EXPECT_TRUE(a.StartAndWaitForTesting());
-
-  Thread b("NonOwningThread");
-  b.Start();
-  EXPECT_DCHECK_DEATH({
-    // Stopping |a| on |b| isn't allowed.
-    b.task_runner()->PostTask(FROM_HERE,
-                              base::Bind(&Thread::Stop, base::Unretained(&a)));
-    // Block here so the DCHECK on |b| always happens in this scope.
-    base::PlatformThread::Sleep(base::TimeDelta::Max());
-  });
-}
-
-TEST_F(ThreadTest, TransferOwnershipAndStop) {
-  std::unique_ptr<Thread> a =
-      base::MakeUnique<Thread>("TransferOwnershipAndStop");
-  EXPECT_TRUE(a->StartAndWaitForTesting());
-  EXPECT_TRUE(a->IsRunning());
-
-  Thread b("TakingOwnershipThread");
-  b.Start();
-
-  base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
-                            base::WaitableEvent::InitialState::NOT_SIGNALED);
-
-  // a->DetachFromSequence() should allow |b| to use |a|'s Thread API.
-  a->DetachFromSequence();
-  b.task_runner()->PostTask(
-      FROM_HERE, base::Bind(
-                     [](std::unique_ptr<Thread> thread_to_stop,
-                        base::WaitableEvent* event_to_signal) -> void {
-                       thread_to_stop->Stop();
-                       event_to_signal->Signal();
-                     },
-                     base::Passed(&a), base::Unretained(&event)));
-
-  event.Wait();
-}
-
-TEST_F(ThreadTest, StartTwice) {
-  Thread a("StartTwice");
-
-  EXPECT_FALSE(a.message_loop());
-  EXPECT_FALSE(a.IsRunning());
-
-  EXPECT_TRUE(a.Start());
-  EXPECT_TRUE(a.message_loop());
-  EXPECT_TRUE(a.IsRunning());
-
-  a.Stop();
-  EXPECT_FALSE(a.message_loop());
-  EXPECT_FALSE(a.IsRunning());
-
-  EXPECT_TRUE(a.Start());
-  EXPECT_TRUE(a.message_loop());
-  EXPECT_TRUE(a.IsRunning());
-
-  a.Stop();
   EXPECT_FALSE(a.message_loop());
   EXPECT_FALSE(a.IsRunning());
 }
 
-// Intentional test-only race for otherwise untestable code, won't fix.
-// https://crbug.com/634383
-#if !defined(THREAD_SANITIZER)
-TEST_F(ThreadTest, StartTwiceNonJoinableNotAllowed) {
-  LOG(ERROR) << __FUNCTION__;
-  Thread* a = new Thread("StartTwiceNonJoinable");
-  // Non-joinable threads have to be leaked for now (see
-  // Thread::Options::joinable for details).
-  ANNOTATE_LEAKING_OBJECT_PTR(a);
-
-  Thread::Options options;
-  options.joinable = false;
-  EXPECT_TRUE(a->StartWithOptions(options));
-  EXPECT_TRUE(a->message_loop());
-  EXPECT_TRUE(a->IsRunning());
-
-  // Signaled when last task on |a| is processed.
-  base::WaitableEvent last_task_event(
-      base::WaitableEvent::ResetPolicy::AUTOMATIC,
-      base::WaitableEvent::InitialState::NOT_SIGNALED);
-  a->task_runner()->PostTask(FROM_HERE,
-                            base::Bind(&base::WaitableEvent::Signal,
-                                       base::Unretained(&last_task_event)));
-
-  // StopSoon() is non-blocking, Yield() to |a|, wait for last task to be
-  // processed and a little more for QuitWhenIdle() to unwind before considering
-  // the thread "stopped".
-  a->StopSoon();
-  base::PlatformThread::YieldCurrentThread();
-  last_task_event.Wait();
-  base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(20));
-
-  // This test assumes that the above was sufficient to let the thread fully
-  // stop.
-  ASSERT_FALSE(a->IsRunning());
-
-  // Restarting it should not be allowed.
-  EXPECT_DCHECK_DEATH(a->Start());
-}
-#endif
-
 TEST_F(ThreadTest, ThreadName) {
   Thread a("ThreadName");
   EXPECT_TRUE(a.Start());
@@ -479,91 +297,3 @@
   EXPECT_TRUE(a.WaitUntilThreadStarted());
   EXPECT_TRUE(a.WaitUntilThreadStarted());
 }
-
-TEST_F(ThreadTest, FlushForTesting) {
-  Thread a("FlushForTesting");
-
-  // Flushing a non-running thread should be a no-op.
-  a.FlushForTesting();
-
-  ASSERT_TRUE(a.Start());
-
-  // Flushing a thread with no tasks shouldn't block.
-  a.FlushForTesting();
-
-  constexpr base::TimeDelta kSleepPerTestTask =
-      base::TimeDelta::FromMilliseconds(50);
-  constexpr size_t kNumSleepTasks = 5;
-
-  const base::TimeTicks ticks_before_post = base::TimeTicks::Now();
-
-  for (size_t i = 0; i < kNumSleepTasks; ++i) {
-    a.task_runner()->PostTask(
-        FROM_HERE, base::Bind(&base::PlatformThread::Sleep, kSleepPerTestTask));
-  }
-
-  // All tasks should have executed, as reflected by the elapsed time.
-  a.FlushForTesting();
-  EXPECT_GE(base::TimeTicks::Now() - ticks_before_post,
-            kNumSleepTasks * kSleepPerTestTask);
-
-  a.Stop();
-
-  // Flushing a stopped thread should be a no-op.
-  a.FlushForTesting();
-}
-
-namespace {
-
-// A Thread which uses a MessageLoop on the stack. It won't start a real
-// underlying thread (instead its messages can be processed by a RunLoop on the
-// stack).
-class ExternalMessageLoopThread : public Thread {
- public:
-  ExternalMessageLoopThread() : Thread("ExternalMessageLoopThread") {}
-
-  ~ExternalMessageLoopThread() override { Stop(); }
-
-  void InstallMessageLoop() { SetMessageLoop(&external_message_loop_); }
-
-  void VerifyUsingExternalMessageLoop(
-      bool expected_using_external_message_loop) {
-    EXPECT_EQ(expected_using_external_message_loop,
-              using_external_message_loop());
-  }
-
- private:
-  base::MessageLoop external_message_loop_;
-
-  DISALLOW_COPY_AND_ASSIGN(ExternalMessageLoopThread);
-};
-
-}  // namespace
-
-TEST_F(ThreadTest, ExternalMessageLoop) {
-  ExternalMessageLoopThread a;
-  EXPECT_FALSE(a.message_loop());
-  EXPECT_FALSE(a.IsRunning());
-  a.VerifyUsingExternalMessageLoop(false);
-
-  a.InstallMessageLoop();
-  EXPECT_TRUE(a.message_loop());
-  EXPECT_TRUE(a.IsRunning());
-  a.VerifyUsingExternalMessageLoop(true);
-
-  bool ran = false;
-  a.task_runner()->PostTask(
-      FROM_HERE, base::Bind([](bool* toggled) { *toggled = true; }, &ran));
-  base::RunLoop().RunUntilIdle();
-  EXPECT_TRUE(ran);
-
-  a.Stop();
-  EXPECT_FALSE(a.message_loop());
-  EXPECT_FALSE(a.IsRunning());
-  a.VerifyUsingExternalMessageLoop(true);
-
-  // Confirm that running any remaining tasks posted from Stop() goes smoothly
-  // (e.g. https://codereview.chromium.org/2135413003/#ps300001 crashed if
-  // StopSoon() posted Thread::ThreadQuitHelper() while |run_loop_| was null).
-  base::RunLoop().RunUntilIdle();
-}
diff --git a/base/threading/worker_pool.cc b/base/threading/worker_pool.cc
index d47037d..0b7bf8e 100644
--- a/base/threading/worker_pool.cc
+++ b/base/threading/worker_pool.cc
@@ -4,11 +4,9 @@
 
 #include "base/threading/worker_pool.h"
 
-#include <utility>
-
 #include "base/bind.h"
 #include "base/compiler_specific.h"
-#include "base/debug/leak_annotations.h"
+#include "base/lazy_instance.h"
 #include "base/macros.h"
 #include "base/task_runner.h"
 #include "base/threading/post_task_and_reply_impl.h"
@@ -99,27 +97,27 @@
   scoped_refptr<TaskRunner> taskrunners_[2];
 };
 
+base::LazyInstance<TaskRunnerHolder>::Leaky
+    g_taskrunners = LAZY_INSTANCE_INITIALIZER;
+
 }  // namespace
 
 bool WorkerPool::PostTaskAndReply(const tracked_objects::Location& from_here,
-                                  Closure task,
-                                  Closure reply,
+                                  const Closure& task,
+                                  const Closure& reply,
                                   bool task_is_slow) {
   // Do not report PostTaskAndReplyRelay leaks in tests. There's nothing we can
   // do about them because WorkerPool doesn't have a flushing API.
   // http://crbug.com/248513
   // http://crbug.com/290897
-  // Note: this annotation does not cover tasks posted through a TaskRunner.
-  ANNOTATE_SCOPED_MEMORY_LEAK;
-  return PostTaskAndReplyWorkerPool(task_is_slow)
-      .PostTaskAndReply(from_here, std::move(task), std::move(reply));
+  return PostTaskAndReplyWorkerPool(task_is_slow).PostTaskAndReply(
+      from_here, task, reply);
 }
 
 // static
 const scoped_refptr<TaskRunner>&
 WorkerPool::GetTaskRunner(bool tasks_are_slow) {
-  static auto* task_runner_holder = new TaskRunnerHolder();
-  return task_runner_holder->taskrunners_[tasks_are_slow];
+  return g_taskrunners.Get().taskrunners_[tasks_are_slow];
 }
 
 }  // namespace base
diff --git a/base/threading/worker_pool.h b/base/threading/worker_pool.h
index 865948e..a52a414 100644
--- a/base/threading/worker_pool.h
+++ b/base/threading/worker_pool.h
@@ -6,9 +6,11 @@
 #define BASE_THREADING_WORKER_POOL_H_
 
 #include "base/base_export.h"
-#include "base/callback.h"
+#include "base/callback_forward.h"
 #include "base/memory/ref_counted.h"
 
+class Task;
+
 namespace tracked_objects {
 class Location;
 }  // namespace tracked_objects
@@ -38,8 +40,8 @@
   // for |task| is a worker thread and you can specify |task_is_slow| just
   // like you can for PostTask above.
   static bool PostTaskAndReply(const tracked_objects::Location& from_here,
-                               Closure task,
-                               Closure reply,
+                               const Closure& task,
+                               const Closure& reply,
                                bool task_is_slow);
 
   // Return true if the current thread is one that this WorkerPool runs tasks
diff --git a/base/threading/worker_pool_posix.cc b/base/threading/worker_pool_posix.cc
index 0e19a1a..6b4c42f 100644
--- a/base/threading/worker_pool_posix.cc
+++ b/base/threading/worker_pool_posix.cc
@@ -30,21 +30,10 @@
 
 const int kIdleSecondsBeforeExit = 10 * 60;
 
-#if defined(OS_MACOSX)
-// On Mac OS X a background thread's default stack size is 512Kb. We need at
-// least 1MB for compilation tasks in V8, so increase this default.
-const int kStackSize = 1 * 1024 * 1024;
-#else
-const int kStackSize = 0;
-#endif
-
 class WorkerPoolImpl {
  public:
   WorkerPoolImpl();
-
-  // WorkerPoolImpl is only instantiated as a leaky LazyInstance, so the
-  // destructor is never called.
-  ~WorkerPoolImpl() = delete;
+  ~WorkerPoolImpl();
 
   void PostTask(const tracked_objects::Location& from_here,
                 const base::Closure& task,
@@ -58,13 +47,17 @@
     : pool_(new base::PosixDynamicThreadPool("WorkerPool",
                                              kIdleSecondsBeforeExit)) {}
 
+WorkerPoolImpl::~WorkerPoolImpl() {
+  pool_->Terminate();
+}
+
 void WorkerPoolImpl::PostTask(const tracked_objects::Location& from_here,
                               const base::Closure& task,
                               bool /*task_is_slow*/) {
   pool_->PostTask(from_here, task);
 }
 
-base::LazyInstance<WorkerPoolImpl>::Leaky g_lazy_worker_pool =
+base::LazyInstance<WorkerPoolImpl> g_lazy_worker_pool =
     LAZY_INSTANCE_INITIALIZER;
 
 class WorkerThread : public PlatformThread::Delegate {
@@ -97,7 +90,7 @@
 
     tracked_objects::TaskStopwatch stopwatch;
     stopwatch.Start();
-    std::move(pending_task.task).Run();
+    pending_task.task.Run();
     stopwatch.Stop();
 
     tracked_objects::ThreadData::TallyRunOnWorkerThreadIfTracking(
@@ -128,13 +121,23 @@
     : name_prefix_(name_prefix),
       idle_seconds_before_exit_(idle_seconds_before_exit),
       pending_tasks_available_cv_(&lock_),
-      num_idle_threads_(0) {}
+      num_idle_threads_(0),
+      terminated_(false) {}
 
 PosixDynamicThreadPool::~PosixDynamicThreadPool() {
   while (!pending_tasks_.empty())
     pending_tasks_.pop();
 }
 
+void PosixDynamicThreadPool::Terminate() {
+  {
+    AutoLock locked(lock_);
+    DCHECK(!terminated_) << "Thread pool is already terminated.";
+    terminated_ = true;
+  }
+  pending_tasks_available_cv_.Broadcast();
+}
+
 void PosixDynamicThreadPool::PostTask(
     const tracked_objects::Location& from_here,
     const base::Closure& task) {
@@ -144,6 +147,8 @@
 
 void PosixDynamicThreadPool::AddTask(PendingTask* pending_task) {
   AutoLock locked(lock_);
+  DCHECK(!terminated_)
+      << "This thread pool is already terminated.  Do not post new tasks.";
 
   pending_tasks_.push(std::move(*pending_task));
 
@@ -154,13 +159,16 @@
     // The new PlatformThread will take ownership of the WorkerThread object,
     // which will delete itself on exit.
     WorkerThread* worker = new WorkerThread(name_prefix_, this);
-    PlatformThread::CreateNonJoinable(kStackSize, worker);
+    PlatformThread::CreateNonJoinable(0, worker);
   }
 }
 
 PendingTask PosixDynamicThreadPool::WaitForTask() {
   AutoLock locked(lock_);
 
+  if (terminated_)
+    return PendingTask(FROM_HERE, base::Closure());
+
   if (pending_tasks_.empty()) {  // No work available, wait for work.
     num_idle_threads_++;
     if (num_idle_threads_cv_.get())
diff --git a/base/threading/worker_pool_posix.h b/base/threading/worker_pool_posix.h
index d65ae8f..628e2b6 100644
--- a/base/threading/worker_pool_posix.h
+++ b/base/threading/worker_pool_posix.h
@@ -38,6 +38,8 @@
 #include "base/threading/platform_thread.h"
 #include "base/tracked_objects.h"
 
+class Task;
+
 namespace base {
 
 class BASE_EXPORT PosixDynamicThreadPool
@@ -50,6 +52,10 @@
   PosixDynamicThreadPool(const std::string& name_prefix,
                          int idle_seconds_before_exit);
 
+  // Indicates that the thread pool is going away.  Stops handing out tasks to
+  // worker threads.  Wakes up all the idle threads to let them exit.
+  void Terminate();
+
   // Adds |task| to the thread pool.
   void PostTask(const tracked_objects::Location& from_here,
                 const Closure& task);
@@ -79,6 +85,7 @@
   ConditionVariable pending_tasks_available_cv_;
   int num_idle_threads_;
   TaskQueue pending_tasks_;
+  bool terminated_;
   // Only used for tests to ensure correct thread ordering.  It will always be
   // NULL in non-test code.
   std::unique_ptr<ConditionVariable> num_idle_threads_cv_;
diff --git a/base/threading/worker_pool_posix_unittest.cc b/base/threading/worker_pool_posix_unittest.cc
index b4e8b58..6cefeed 100644
--- a/base/threading/worker_pool_posix_unittest.cc
+++ b/base/threading/worker_pool_posix_unittest.cc
@@ -103,6 +103,12 @@
     peer_.set_num_idle_threads_cv(new ConditionVariable(peer_.lock()));
   }
 
+  void TearDown() override {
+    // Wake up the idle threads so they can terminate.
+    if (pool_.get())
+      pool_->Terminate();
+  }
+
   void WaitForTasksToStart(int num_tasks) {
     base::AutoLock num_waiting_to_start_locked(num_waiting_to_start_lock_);
     while (num_waiting_to_start_ < num_tasks) {
diff --git a/base/time/time.cc b/base/time/time.cc
index d1c6a47..3670f55 100644
--- a/base/time/time.cc
+++ b/base/time/time.cc
@@ -21,6 +21,11 @@
 
 // TimeDelta ------------------------------------------------------------------
 
+// static
+TimeDelta TimeDelta::Max() {
+  return TimeDelta(std::numeric_limits<int64_t>::max());
+}
+
 int TimeDelta::InDays() const {
   if (is_max()) {
     // Preserve max to prevent overflow.
@@ -99,29 +104,33 @@
 int64_t SaturatedAdd(TimeDelta delta, int64_t value) {
   CheckedNumeric<int64_t> rv(delta.delta_);
   rv += value;
-  if (rv.IsValid())
-    return rv.ValueOrDie();
-  // Positive RHS overflows. Negative RHS underflows.
-  if (value < 0)
-    return -std::numeric_limits<int64_t>::max();
-  return std::numeric_limits<int64_t>::max();
+  return FromCheckedNumeric(rv);
 }
 
 int64_t SaturatedSub(TimeDelta delta, int64_t value) {
   CheckedNumeric<int64_t> rv(delta.delta_);
   rv -= value;
-  if (rv.IsValid())
-    return rv.ValueOrDie();
-  // Negative RHS overflows. Positive RHS underflows.
-  if (value < 0)
-    return std::numeric_limits<int64_t>::max();
-  return -std::numeric_limits<int64_t>::max();
+  return FromCheckedNumeric(rv);
+}
+
+int64_t FromCheckedNumeric(const CheckedNumeric<int64_t> value) {
+  if (value.IsValid())
+    return value.ValueUnsafe();
+
+  // We could return max/min but we don't really expose what the maximum delta
+  // is. Instead, return max/(-max), which is something that clients can reason
+  // about.
+  // TODO(rvargas) crbug.com/332611: don't use internal values.
+  int64_t limit = std::numeric_limits<int64_t>::max();
+  if (value.validity() == internal::RANGE_UNDERFLOW)
+    limit = -limit;
+  return value.ValueOrDefault(limit);
 }
 
 }  // namespace time_internal
 
 std::ostream& operator<<(std::ostream& os, TimeDelta time_delta) {
-  return os << time_delta.InSecondsF() << " s";
+  return os << time_delta.InSecondsF() << "s";
 }
 
 // Time -----------------------------------------------------------------------
@@ -198,11 +207,6 @@
           kMicrosecondsPerMillisecond);
 }
 
-Time Time::FromJavaTime(int64_t ms_since_epoch) {
-  return base::Time::UnixEpoch() +
-         base::TimeDelta::FromMilliseconds(ms_since_epoch);
-}
-
 int64_t Time::ToJavaTime() const {
   if (is_null()) {
     // Preserve 0 so the invalid result doesn't depend on the platform.
@@ -230,12 +234,7 @@
   exploded.minute = 0;
   exploded.second = 0;
   exploded.millisecond = 0;
-  Time out_time;
-  if (FromLocalExploded(exploded, &out_time))
-    return out_time;
-  // This function must not fail.
-  NOTREACHED();
-  return Time();
+  return FromLocalExploded(exploded);
 }
 
 // static
diff --git a/base/time/time.h b/base/time/time.h
index ff8bdde..efece96 100644
--- a/base/time/time.h
+++ b/base/time/time.h
@@ -21,11 +21,9 @@
 // ThreadTicks will "stand still" whenever the thread has been de-scheduled by
 // the operating system.
 //
-// All time classes are copyable, assignable, and occupy 64-bits per instance.
-// As a result, prefer passing them by value:
-//   void MyFunction(TimeDelta arg);
-// If circumstances require, you may also pass by const reference:
-//   void MyFunction(const TimeDelta& arg);  // Not preferred.
+// All time classes are copyable, assignable, and occupy 64-bits per
+// instance. Thus, they can be efficiently passed by-value (as opposed to
+// by-reference).
 //
 // Definitions of operator<< are provided to make these types work with
 // DCHECK_EQ() and other log macros. For human-readable formatting, see
@@ -59,7 +57,6 @@
 
 #include "base/base_export.h"
 #include "base/compiler_specific.h"
-#include "base/logging.h"
 #include "base/numerics/safe_math.h"
 #include "build/build_config.h"
 
@@ -96,6 +93,10 @@
 BASE_EXPORT int64_t SaturatedAdd(TimeDelta delta, int64_t value);
 BASE_EXPORT int64_t SaturatedSub(TimeDelta delta, int64_t value);
 
+// Clamp |value| on overflow and underflow conditions. The int64_t argument and
+// return value are in terms of a microsecond timebase.
+BASE_EXPORT int64_t FromCheckedNumeric(const CheckedNumeric<int64_t> value);
+
 }  // namespace time_internal
 
 // TimeDelta ------------------------------------------------------------------
@@ -114,9 +115,6 @@
   static constexpr TimeDelta FromSecondsD(double secs);
   static constexpr TimeDelta FromMillisecondsD(double ms);
   static constexpr TimeDelta FromMicroseconds(int64_t us);
-#if defined(OS_POSIX)
-  static TimeDelta FromTimeSpec(const timespec& ts);
-#endif
 #if defined(OS_WIN)
   static TimeDelta FromQPCValue(LONGLONG qpc_value);
 #endif
@@ -130,7 +128,7 @@
   // Returns the maximum time delta, which should be greater than any reasonable
   // time delta we might compare it to. Adding or subtracting the maximum time
   // delta to a time or another time delta has an undefined result.
-  static constexpr TimeDelta Max();
+  static TimeDelta Max();
 
   // Returns the internal numeric value of the TimeDelta object. Please don't
   // use this and do arithmetic on it, as it is more error prone than using the
@@ -202,24 +200,13 @@
   TimeDelta operator*(T a) const {
     CheckedNumeric<int64_t> rv(delta_);
     rv *= a;
-    if (rv.IsValid())
-      return TimeDelta(rv.ValueOrDie());
-    // Matched sign overflows. Mismatched sign underflows.
-    if ((delta_ < 0) ^ (a < 0))
-      return TimeDelta(-std::numeric_limits<int64_t>::max());
-    return TimeDelta(std::numeric_limits<int64_t>::max());
+    return TimeDelta(time_internal::FromCheckedNumeric(rv));
   }
   template<typename T>
   TimeDelta operator/(T a) const {
     CheckedNumeric<int64_t> rv(delta_);
     rv /= a;
-    if (rv.IsValid())
-      return TimeDelta(rv.ValueOrDie());
-    // Matched sign overflows. Mismatched sign underflows.
-    // Special case to catch divide by zero.
-    if ((delta_ < 0) ^ (a <= 0))
-      return TimeDelta(-std::numeric_limits<int64_t>::max());
-    return TimeDelta(std::numeric_limits<int64_t>::max());
+    return TimeDelta(time_internal::FromCheckedNumeric(rv));
   }
   template<typename T>
   TimeDelta& operator*=(T a) {
@@ -255,11 +242,6 @@
     return delta_ >= other.delta_;
   }
 
-#if defined(OS_WIN)
-  // This works around crbug.com/635974
-  constexpr TimeDelta(const TimeDelta& other) : delta_(other.delta_) {}
-#endif
-
  private:
   friend int64_t time_internal::SaturatedAdd(TimeDelta delta, int64_t value);
   friend int64_t time_internal::SaturatedSub(TimeDelta delta, int64_t value);
@@ -470,6 +452,8 @@
   static Time NowFromSystemTime();
 
   // Converts to/from time_t in UTC and a Time class.
+  // TODO(brettw) this should be removed once everybody starts using the |Time|
+  // class.
   static Time FromTimeT(time_t tt);
   time_t ToTimeT() const;
 
@@ -495,9 +479,8 @@
   static Time FromJsTime(double ms_since_epoch);
   double ToJsTime() const;
 
-  // Converts to/from Java convention for times, a number of
+  // Converts to Java convention for times, a number of
   // milliseconds since the epoch.
-  static Time FromJavaTime(int64_t ms_since_epoch);
   int64_t ToJavaTime() const;
 
 #if defined(OS_POSIX)
@@ -538,8 +521,23 @@
 #endif
 
   // Converts an exploded structure representing either the local time or UTC
+  // into a Time class.
+  // TODO(maksims): Get rid of these in favor of the methods below when
+  // all the callers stop using these ones.
+  static Time FromUTCExploded(const Exploded& exploded) {
+    base::Time time;
+    ignore_result(FromUTCExploded(exploded, &time));
+    return time;
+  }
+  static Time FromLocalExploded(const Exploded& exploded) {
+    base::Time time;
+    ignore_result(FromLocalExploded(exploded, &time));
+    return time;
+  }
+
+  // Converts an exploded structure representing either the local time or UTC
   // into a Time class. Returns false on a failure when, for example, a day of
-  // month is set to 31 on a 28-30 day month. Returns Time(0) on overflow.
+  // month is set to 31 on a 28-30 day month.
   static bool FromUTCExploded(const Exploded& exploded,
                               Time* time) WARN_UNUSED_RESULT {
     return FromExploded(false, exploded, time);
@@ -557,12 +555,10 @@
   // specified in RFC822) is treated as if the timezone is not specified.
   // TODO(iyengar) Move the FromString/FromTimeT/ToTimeT/FromFileTime to
   // a new time converter class.
-  static bool FromString(const char* time_string,
-                         Time* parsed_time) WARN_UNUSED_RESULT {
+  static bool FromString(const char* time_string, Time* parsed_time) {
     return FromStringInternal(time_string, true, parsed_time);
   }
-  static bool FromUTCString(const char* time_string,
-                            Time* parsed_time) WARN_UNUSED_RESULT {
+  static bool FromUTCString(const char* time_string, Time* parsed_time) {
     return FromStringInternal(time_string, false, parsed_time);
   }
 
@@ -605,11 +601,10 @@
   // timezone is not specified.
   static bool FromStringInternal(const char* time_string,
                                  bool is_local,
-                                 Time* parsed_time) WARN_UNUSED_RESULT;
+                                 Time* parsed_time);
 
   // Comparison does not consider |day_of_week| when doing the operation.
-  static bool ExplodedMostlyEquals(const Exploded& lhs,
-                                   const Exploded& rhs) WARN_UNUSED_RESULT;
+  static bool ExplodedMostlyEquals(const Exploded& lhs, const Exploded& rhs);
 };
 
 // static
@@ -659,11 +654,6 @@
 }
 
 // static
-constexpr TimeDelta TimeDelta::Max() {
-  return TimeDelta(std::numeric_limits<int64_t>::max());
-}
-
-// static
 constexpr TimeDelta TimeDelta::FromDouble(double value) {
   // TODO(crbug.com/612601): Use saturated_cast<int64_t>(value) once we sort out
   // the Min() behavior.
@@ -721,14 +711,7 @@
   // Now() will return high resolution values. Note that, on systems where the
   // high resolution clock works but is deemed inefficient, the low resolution
   // clock will be used instead.
-  static bool IsHighResolution() WARN_UNUSED_RESULT;
-
-  // Returns true if TimeTicks is consistent across processes, meaning that
-  // timestamps taken on different processes can be safely compared with one
-  // another. (Note that, even on platforms where this returns true, time values
-  // from different threads that are within one tick of each other must be
-  // considered to have an ambiguous ordering.)
-  static bool IsConsistentAcrossProcesses() WARN_UNUSED_RESULT;
+  static bool IsHighResolution();
 
 #if defined(OS_WIN)
   // Translates an absolute QPC timestamp into a TimeTicks value. The returned
@@ -737,10 +720,6 @@
   static TimeTicks FromQPCValue(LONGLONG qpc_value);
 #endif
 
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-  static TimeTicks FromMachAbsoluteTime(uint64_t mach_absolute_time);
-#endif  // defined(OS_MACOSX) && !defined(OS_IOS)
-
   // Get an estimate of the TimeTick value at the time of the UnixEpoch. Because
   // Time and TimeTicks respond differently to user-set time and NTP
   // adjustments, this number is only an estimate. Nevertheless, this can be
@@ -789,7 +768,7 @@
   }
 
   // Returns true if ThreadTicks::Now() is supported on this system.
-  static bool IsSupported() WARN_UNUSED_RESULT {
+  static bool IsSupported() {
 #if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
     (defined(OS_MACOSX) && !defined(OS_IOS)) || defined(OS_ANDROID)
     return true;
@@ -840,7 +819,7 @@
   // allow testing.
   static double TSCTicksPerSecond();
 
-  static bool IsSupportedWin() WARN_UNUSED_RESULT;
+  static bool IsSupportedWin();
   static void WaitUntilInitializedWin();
 #endif
 };
diff --git a/base/time/time_mac.cc b/base/time/time_mac.cc
index c75423d..373ec3a 100644
--- a/base/time/time_mac.cc
+++ b/base/time/time_mac.cc
@@ -25,33 +25,6 @@
 
 namespace {
 
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-int64_t MachAbsoluteTimeToTicks(uint64_t mach_absolute_time) {
-  static mach_timebase_info_data_t timebase_info;
-  if (timebase_info.denom == 0) {
-    // Zero-initialization of statics guarantees that denom will be 0 before
-    // calling mach_timebase_info.  mach_timebase_info will never set denom to
-    // 0 as that would be invalid, so the zero-check can be used to determine
-    // whether mach_timebase_info has already been called.  This is
-    // recommended by Apple's QA1398.
-    kern_return_t kr = mach_timebase_info(&timebase_info);
-    MACH_DCHECK(kr == KERN_SUCCESS, kr) << "mach_timebase_info";
-  }
-
-  // timebase_info converts absolute time tick units into nanoseconds.  Convert
-  // to microseconds up front to stave off overflows.
-  base::CheckedNumeric<uint64_t> result(mach_absolute_time /
-                                        base::Time::kNanosecondsPerMicrosecond);
-  result *= timebase_info.numer;
-  result /= timebase_info.denom;
-
-  // Don't bother with the rollover handling that the Windows version does.
-  // With numer and denom = 1 (the expected case), the 64-bit absolute time
-  // reported in nanoseconds is enough to last nearly 585 years.
-  return base::checked_cast<int64_t>(result.ValueOrDie());
-}
-#endif  // defined(OS_MACOSX) && !defined(OS_IOS)
-
 int64_t ComputeCurrentTicks() {
 #if defined(OS_IOS)
   // On iOS mach_absolute_time stops while the device is sleeping. Instead use
@@ -63,15 +36,37 @@
   size_t size = sizeof(boottime);
   int kr = sysctl(mib, arraysize(mib), &boottime, &size, nullptr, 0);
   DCHECK_EQ(KERN_SUCCESS, kr);
-  base::TimeDelta time_difference =
-      base::Time::Now() - (base::Time::FromTimeT(boottime.tv_sec) +
-                           base::TimeDelta::FromMicroseconds(boottime.tv_usec));
+  base::TimeDelta time_difference = base::Time::Now() -
+      (base::Time::FromTimeT(boottime.tv_sec) +
+       base::TimeDelta::FromMicroseconds(boottime.tv_usec));
   return time_difference.InMicroseconds();
 #else
+  static mach_timebase_info_data_t timebase_info;
+  if (timebase_info.denom == 0) {
+    // Zero-initialization of statics guarantees that denom will be 0 before
+    // calling mach_timebase_info.  mach_timebase_info will never set denom to
+    // 0 as that would be invalid, so the zero-check can be used to determine
+    // whether mach_timebase_info has already been called.  This is
+    // recommended by Apple's QA1398.
+    kern_return_t kr = mach_timebase_info(&timebase_info);
+    MACH_DCHECK(kr == KERN_SUCCESS, kr) << "mach_timebase_info";
+  }
+
   // mach_absolute_time is it when it comes to ticks on the Mac.  Other calls
   // with less precision (such as TickCount) just call through to
   // mach_absolute_time.
-  return MachAbsoluteTimeToTicks(mach_absolute_time());
+
+  // timebase_info converts absolute time tick units into nanoseconds.  Convert
+  // to microseconds up front to stave off overflows.
+  base::CheckedNumeric<uint64_t> result(
+      mach_absolute_time() / base::Time::kNanosecondsPerMicrosecond);
+  result *= timebase_info.numer;
+  result /= timebase_info.denom;
+
+  // Don't bother with the rollover handling that the Windows version does.
+  // With numer and denom = 1 (the expected case), the 64-bit absolute time
+  // reported in nanoseconds is enough to last nearly 585 years.
+  return base::checked_cast<int64_t>(result.ValueOrDie());
 #endif  // defined(OS_IOS)
 }
 
@@ -190,18 +185,9 @@
       exploded.millisecond);
   CFAbsoluteTime seconds = absolute_time + kCFAbsoluteTimeIntervalSince1970;
 
-  // CFAbsolutTime is typedef of double. Convert seconds to
-  // microseconds and then cast to int64. If
-  // it cannot be suited to int64, then fail to avoid overflows.
-  double microseconds =
-      (seconds * kMicrosecondsPerSecond) + kWindowsEpochDeltaMicroseconds;
-  if (microseconds > std::numeric_limits<int64_t>::max() ||
-      microseconds < std::numeric_limits<int64_t>::min()) {
-    *time = Time(0);
-    return false;
-  }
-
-  base::Time converted_time = Time(static_cast<int64_t>(microseconds));
+  base::Time converted_time =
+      Time(static_cast<int64_t>(seconds * kMicrosecondsPerSecond) +
+           kWindowsEpochDeltaMicroseconds);
 
   // If |exploded.day_of_month| is set to 31
   // on a 28-30 day month, it will return the first day of the next month.
@@ -273,18 +259,6 @@
 }
 
 // static
-bool TimeTicks::IsConsistentAcrossProcesses() {
-  return true;
-}
-
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-// static
-TimeTicks TimeTicks::FromMachAbsoluteTime(uint64_t mach_absolute_time) {
-  return TimeTicks(MachAbsoluteTimeToTicks(mach_absolute_time));
-}
-#endif  // defined(OS_MACOSX) && !defined(OS_IOS)
-
-// static
 TimeTicks::Clock TimeTicks::GetClock() {
 #if defined(OS_IOS)
   return Clock::IOS_CF_ABSOLUTE_TIME_MINUS_KERN_BOOTTIME;
diff --git a/base/time/time_posix.cc b/base/time/time_posix.cc
index 2cceb0c..495e249 100644
--- a/base/time/time_posix.cc
+++ b/base/time/time_posix.cc
@@ -16,7 +16,6 @@
 #include <ostream>
 
 #include "base/logging.h"
-#include "base/numerics/safe_math.h"
 #include "build/build_config.h"
 
 #if defined(OS_ANDROID)
@@ -26,6 +25,7 @@
 #endif
 
 #if !defined(OS_MACOSX)
+#include "base/lazy_instance.h"
 #include "base/synchronization/lock.h"
 #endif
 
@@ -34,10 +34,8 @@
 #if !defined(OS_MACOSX)
 // This prevents a crash on traversing the environment global and looking up
 // the 'TZ' variable in libc. See: crbug.com/390567.
-base::Lock* GetSysTimeToTimeStructLock() {
-  static auto* lock = new base::Lock();
-  return lock;
-}
+base::LazyInstance<base::Lock>::Leaky
+    g_sys_time_to_time_struct_lock = LAZY_INSTANCE_INITIALIZER;
 
 // Define a system-specific SysTime that wraps either to a time_t or
 // a time64_t depending on the host system, and associated convertion.
@@ -46,7 +44,7 @@
 typedef time64_t SysTime;
 
 SysTime SysTimeFromTimeStruct(struct tm* timestruct, bool is_local) {
-  base::AutoLock locked(*GetSysTimeToTimeStructLock());
+  base::AutoLock locked(g_sys_time_to_time_struct_lock.Get());
   if (is_local)
     return mktime64(timestruct);
   else
@@ -54,7 +52,7 @@
 }
 
 void SysTimeToTimeStruct(SysTime t, struct tm* timestruct, bool is_local) {
-  base::AutoLock locked(*GetSysTimeToTimeStructLock());
+  base::AutoLock locked(g_sys_time_to_time_struct_lock.Get());
   if (is_local)
     localtime64_r(&t, timestruct);
   else
@@ -65,7 +63,7 @@
 typedef time_t SysTime;
 
 SysTime SysTimeFromTimeStruct(struct tm* timestruct, bool is_local) {
-  base::AutoLock locked(*GetSysTimeToTimeStructLock());
+  base::AutoLock locked(g_sys_time_to_time_struct_lock.Get());
   if (is_local)
     return mktime(timestruct);
   else
@@ -73,7 +71,7 @@
 }
 
 void SysTimeToTimeStruct(SysTime t, struct tm* timestruct, bool is_local) {
-  base::AutoLock locked(*GetSysTimeToTimeStructLock());
+  base::AutoLock locked(g_sys_time_to_time_struct_lock.Get());
   if (is_local)
     localtime_r(&t, timestruct);
   else
@@ -82,19 +80,10 @@
 #endif  // OS_ANDROID
 
 int64_t ConvertTimespecToMicros(const struct timespec& ts) {
-  // On 32-bit systems, the calculation cannot overflow int64_t.
-  // 2**32 * 1000000 + 2**64 / 1000 < 2**63
-  if (sizeof(ts.tv_sec) <= 4 && sizeof(ts.tv_nsec) <= 8) {
-    int64_t result = ts.tv_sec;
-    result *= base::Time::kMicrosecondsPerSecond;
-    result += (ts.tv_nsec / base::Time::kNanosecondsPerMicrosecond);
-    return result;
-  } else {
-    base::CheckedNumeric<int64_t> result(ts.tv_sec);
-    result *= base::Time::kMicrosecondsPerSecond;
-    result += (ts.tv_nsec / base::Time::kNanosecondsPerMicrosecond);
-    return result.ValueOrDie();
-  }
+  base::CheckedNumeric<int64_t> result(ts.tv_sec);
+  result *= base::Time::kMicrosecondsPerSecond;
+  result += (ts.tv_nsec / base::Time::kNanosecondsPerMicrosecond);
+  return result.ValueOrDie();
 }
 
 // Helper function to get results from clock_gettime() and convert to a
@@ -121,12 +110,6 @@
 
 namespace base {
 
-// static
-TimeDelta TimeDelta::FromTimeSpec(const timespec& ts) {
-  return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
-                   ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
-}
-
 struct timespec TimeDelta::ToTimeSpec() const {
   int64_t microseconds = InMicroseconds();
   time_t seconds = 0;
@@ -229,30 +212,22 @@
 
 // static
 bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
-  CheckedNumeric<int> month = exploded.month;
-  month--;
-  CheckedNumeric<int> year = exploded.year;
-  year -= 1900;
-  if (!month.IsValid() || !year.IsValid()) {
-    *time = Time(0);
-    return false;
-  }
-
   struct tm timestruct;
-  timestruct.tm_sec = exploded.second;
-  timestruct.tm_min = exploded.minute;
-  timestruct.tm_hour = exploded.hour;
-  timestruct.tm_mday = exploded.day_of_month;
-  timestruct.tm_mon = month.ValueOrDie();
-  timestruct.tm_year = year.ValueOrDie();
-  timestruct.tm_wday = exploded.day_of_week;  // mktime/timegm ignore this
-  timestruct.tm_yday = 0;                     // mktime/timegm ignore this
-  timestruct.tm_isdst = -1;                   // attempt to figure it out
+  timestruct.tm_sec    = exploded.second;
+  timestruct.tm_min    = exploded.minute;
+  timestruct.tm_hour   = exploded.hour;
+  timestruct.tm_mday   = exploded.day_of_month;
+  timestruct.tm_mon    = exploded.month - 1;
+  timestruct.tm_year   = exploded.year - 1900;
+  timestruct.tm_wday   = exploded.day_of_week;  // mktime/timegm ignore this
+  timestruct.tm_yday   = 0;     // mktime/timegm ignore this
+  timestruct.tm_isdst  = -1;    // attempt to figure it out
 #if !defined(OS_NACL) && !defined(OS_SOLARIS)
-  timestruct.tm_gmtoff = 0;   // not a POSIX field, so mktime/timegm ignore
-  timestruct.tm_zone = NULL;  // not a POSIX field, so mktime/timegm ignore
+  timestruct.tm_gmtoff = 0;     // not a POSIX field, so mktime/timegm ignore
+  timestruct.tm_zone   = NULL;  // not a POSIX field, so mktime/timegm ignore
 #endif
 
+  int64_t milliseconds;
   SysTime seconds;
 
   // Certain exploded dates do not really exist due to daylight saving times,
@@ -290,7 +265,6 @@
   // return is the best that can be done here.  It's not ideal, but it's better
   // than failing here or ignoring the overflow case and treating each time
   // overflow as one second prior to the epoch.
-  int64_t milliseconds = 0;
   if (seconds == -1 &&
       (exploded.year < 1969 || exploded.year > 1970)) {
     // If exploded.year is 1969 or 1970, take -1 as correct, with the
@@ -323,25 +297,13 @@
       milliseconds += (kMillisecondsPerSecond - 1);
     }
   } else {
-    base::CheckedNumeric<int64_t> checked_millis = seconds;
-    checked_millis *= kMillisecondsPerSecond;
-    checked_millis += exploded.millisecond;
-    if (!checked_millis.IsValid()) {
-      *time = base::Time(0);
-      return false;
-    }
-    milliseconds = checked_millis.ValueOrDie();
+    milliseconds = seconds * kMillisecondsPerSecond + exploded.millisecond;
   }
 
-  // Adjust from Unix (1970) to Windows (1601) epoch avoiding overflows.
-  base::CheckedNumeric<int64_t> checked_microseconds_win_epoch = milliseconds;
-  checked_microseconds_win_epoch *= kMicrosecondsPerMillisecond;
-  checked_microseconds_win_epoch += kWindowsEpochDeltaMicroseconds;
-  if (!checked_microseconds_win_epoch.IsValid()) {
-    *time = base::Time(0);
-    return false;
-  }
-  base::Time converted_time(checked_microseconds_win_epoch.ValueOrDie());
+  // Adjust from Unix (1970) to Windows (1601) epoch.
+  base::Time converted_time =
+      Time((milliseconds * kMicrosecondsPerMillisecond) +
+           kWindowsEpochDeltaMicroseconds);
 
   // If |exploded.day_of_month| is set to 31 on a 28-30 day month, it will
   // return the first day of the next month. Thus round-trip the time and
@@ -378,11 +340,6 @@
 }
 
 // static
-bool TimeTicks::IsConsistentAcrossProcesses() {
-  return true;
-}
-
-// static
 ThreadTicks ThreadTicks::Now() {
 #if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
     defined(OS_ANDROID)
diff --git a/base/time/time_unittest.cc b/base/time/time_unittest.cc
index 8906c3b..4f47d56 100644
--- a/base/time/time_unittest.cc
+++ b/base/time/time_unittest.cc
@@ -54,16 +54,6 @@
       {{2016, 10, 0, 25, 7, 47, 234, 0}, false},
       // Milliseconds are too large
       {{2016, 10, 0, 25, 6, 31, 23, 1643}, false},
-      // Test overflow. Time is valid, but overflow case
-      // results in Time(0).
-      {{9840633, 1, 0, 1, 1, 1, 0, 0}, true},
-      // Underflow will fail as well.
-      {{-9840633, 1, 0, 1, 1, 1, 0, 0}, true},
-      // Test integer overflow and underflow cases for the values themselves.
-      {{std::numeric_limits<int>::min(), 1, 0, 1, 1, 1, 0, 0}, true},
-      {{std::numeric_limits<int>::max(), 1, 0, 1, 1, 1, 0, 0}, true},
-      {{2016, std::numeric_limits<int>::min(), 0, 1, 1, 1, 0, 0}, false},
-      {{2016, std::numeric_limits<int>::max(), 0, 1, 1, 1, 0, 0}, false},
   };
 
   for (const auto& test : kDateTestData) {
@@ -816,29 +806,22 @@
 
 #if defined(OS_POSIX)
 TEST(TimeDelta, TimeSpecConversion) {
-  TimeDelta delta = TimeDelta::FromSeconds(0);
-  struct timespec result = delta.ToTimeSpec();
+  struct timespec result = TimeDelta::FromSeconds(0).ToTimeSpec();
   EXPECT_EQ(result.tv_sec, 0);
   EXPECT_EQ(result.tv_nsec, 0);
-  EXPECT_EQ(delta, TimeDelta::FromTimeSpec(result));
 
-  delta = TimeDelta::FromSeconds(1);
-  result = delta.ToTimeSpec();
+  result = TimeDelta::FromSeconds(1).ToTimeSpec();
   EXPECT_EQ(result.tv_sec, 1);
   EXPECT_EQ(result.tv_nsec, 0);
-  EXPECT_EQ(delta, TimeDelta::FromTimeSpec(result));
 
-  delta = TimeDelta::FromMicroseconds(1);
-  result = delta.ToTimeSpec();
+  result = TimeDelta::FromMicroseconds(1).ToTimeSpec();
   EXPECT_EQ(result.tv_sec, 0);
   EXPECT_EQ(result.tv_nsec, 1000);
-  EXPECT_EQ(delta, TimeDelta::FromTimeSpec(result));
 
-  delta = TimeDelta::FromMicroseconds(Time::kMicrosecondsPerSecond + 1);
-  result = delta.ToTimeSpec();
+  result = TimeDelta::FromMicroseconds(
+      Time::kMicrosecondsPerSecond + 1).ToTimeSpec();
   EXPECT_EQ(result.tv_sec, 1);
   EXPECT_EQ(result.tv_nsec, 1000);
-  EXPECT_EQ(delta, TimeDelta::FromTimeSpec(result));
 }
 #endif  // OS_POSIX
 
@@ -1116,17 +1099,17 @@
 
 TEST(TimeDeltaLogging, EmptyIsZero) {
   TimeDelta zero;
-  EXPECT_EQ("0 s", AnyToString(zero));
+  EXPECT_EQ("0s", AnyToString(zero));
 }
 
 TEST(TimeDeltaLogging, FiveHundredMs) {
   TimeDelta five_hundred_ms = TimeDelta::FromMilliseconds(500);
-  EXPECT_EQ("0.5 s", AnyToString(five_hundred_ms));
+  EXPECT_EQ("0.5s", AnyToString(five_hundred_ms));
 }
 
 TEST(TimeDeltaLogging, MinusTenSeconds) {
   TimeDelta minus_ten_seconds = TimeDelta::FromSeconds(-10);
-  EXPECT_EQ("-10 s", AnyToString(minus_ten_seconds));
+  EXPECT_EQ("-10s", AnyToString(minus_ten_seconds));
 }
 
 TEST(TimeDeltaLogging, DoesNotMessUpFormattingFlags) {
diff --git a/base/timer/timer.cc b/base/timer/timer.cc
index 6ec18f1..e554905 100644
--- a/base/timer/timer.cc
+++ b/base/timer/timer.cc
@@ -6,15 +6,11 @@
 
 #include <stddef.h>
 
-#include <utility>
-
 #include "base/logging.h"
-#include "base/memory/ptr_util.h"
 #include "base/memory/ref_counted.h"
 #include "base/single_thread_task_runner.h"
 #include "base/threading/platform_thread.h"
 #include "base/threading/thread_task_runner_handle.h"
-#include "base/time/tick_clock.h"
 
 namespace base {
 
@@ -64,36 +60,26 @@
 };
 
 Timer::Timer(bool retain_user_task, bool is_repeating)
-    : Timer(retain_user_task, is_repeating, nullptr) {}
-
-Timer::Timer(bool retain_user_task, bool is_repeating, TickClock* tick_clock)
-    : scheduled_task_(nullptr),
+    : scheduled_task_(NULL),
       thread_id_(0),
       is_repeating_(is_repeating),
       retain_user_task_(retain_user_task),
-      tick_clock_(tick_clock),
-      is_running_(false) {}
+      is_running_(false) {
+}
 
 Timer::Timer(const tracked_objects::Location& posted_from,
              TimeDelta delay,
              const base::Closure& user_task,
              bool is_repeating)
-    : Timer(posted_from, delay, user_task, is_repeating, nullptr) {}
-
-Timer::Timer(const tracked_objects::Location& posted_from,
-             TimeDelta delay,
-             const base::Closure& user_task,
-             bool is_repeating,
-             TickClock* tick_clock)
-    : scheduled_task_(nullptr),
+    : scheduled_task_(NULL),
       posted_from_(posted_from),
       delay_(delay),
       user_task_(user_task),
       thread_id_(0),
       is_repeating_(is_repeating),
       retain_user_task_(true),
-      tick_clock_(tick_clock),
-      is_running_(false) {}
+      is_running_(false) {
+}
 
 Timer::~Timer() {
   StopAndAbandon();
@@ -137,7 +123,7 @@
 
   // Set the new desired_run_time_.
   if (delay_ > TimeDelta::FromMicroseconds(0))
-    desired_run_time_ = Now() + delay_;
+    desired_run_time_ = TimeTicks::Now() + delay_;
   else
     desired_run_time_ = TimeTicks();
 
@@ -153,10 +139,6 @@
   PostNewScheduledTask(delay_);
 }
 
-TimeTicks Timer::Now() const {
-  return tick_clock_ ? tick_clock_->NowTicks() : TimeTicks::Now();
-}
-
 void Timer::SetTaskInfo(const tracked_objects::Location& posted_from,
                         TimeDelta delay,
                         const base::Closure& user_task) {
@@ -173,7 +155,7 @@
     GetTaskRunner()->PostDelayedTask(posted_from_,
         base::Bind(&BaseTimerTaskInternal::Run, base::Owned(scheduled_task_)),
         delay);
-    scheduled_run_time_ = desired_run_time_ = Now() + delay;
+    scheduled_run_time_ = desired_run_time_ = TimeTicks::Now() + delay;
   } else {
     GetTaskRunner()->PostTask(posted_from_,
         base::Bind(&BaseTimerTaskInternal::Run, base::Owned(scheduled_task_)));
@@ -181,8 +163,10 @@
   }
   // Remember the thread ID that posts the first task -- this will be verified
   // later when the task is abandoned to detect misuse from multiple threads.
-  if (!thread_id_)
+  if (!thread_id_) {
+    DCHECK(GetTaskRunner()->BelongsToCurrentThread());
     thread_id_ = static_cast<int>(PlatformThread::CurrentId());
+  }
 }
 
 scoped_refptr<SingleThreadTaskRunner> Timer::GetTaskRunner() {
@@ -205,9 +189,9 @@
 
   // First check if we need to delay the task because of a new target time.
   if (desired_run_time_ > scheduled_run_time_) {
-    // Now() can be expensive, so only call it if we know the user has changed
-    // the desired_run_time_.
-    TimeTicks now = Now();
+    // TimeTicks::Now() can be expensive, so only call it if we know the user
+    // has changed the desired_run_time_.
+    TimeTicks now = TimeTicks::Now();
     // Task runner may have called us late anyway, so only post a continuation
     // task if the desired_run_time_ is in the future.
     if (desired_run_time_ > now) {
diff --git a/base/timer/timer.h b/base/timer/timer.h
index 8aac279..661829b 100644
--- a/base/timer/timer.h
+++ b/base/timer/timer.h
@@ -49,8 +49,6 @@
 // because they're flaky on the buildbot, but when you run them locally you
 // should be able to tell the difference.
 
-#include <memory>
-
 #include "base/base_export.h"
 #include "base/bind.h"
 #include "base/bind_helpers.h"
@@ -63,7 +61,6 @@
 
 class BaseTimerTaskInternal;
 class SingleThreadTaskRunner;
-class TickClock;
 
 //-----------------------------------------------------------------------------
 // This class wraps MessageLoop::PostDelayedTask to manage delayed and repeating
@@ -74,23 +71,14 @@
  public:
   // Construct a timer in repeating or one-shot mode. Start or SetTaskInfo must
   // be called later to set task info. |retain_user_task| determines whether the
-  // user_task is retained or reset when it runs or stops. If |tick_clock| is
-  // provided, it is used instead of TimeTicks::Now() to get TimeTicks when
-  // scheduling tasks.
+  // user_task is retained or reset when it runs or stops.
   Timer(bool retain_user_task, bool is_repeating);
-  Timer(bool retain_user_task, bool is_repeating, TickClock* tick_clock);
 
-  // Construct a timer with retained task info. If |tick_clock| is provided, it
-  // is used instead of TimeTicks::Now() to get TimeTicks when scheduling tasks.
+  // Construct a timer with retained task info.
   Timer(const tracked_objects::Location& posted_from,
         TimeDelta delay,
         const base::Closure& user_task,
         bool is_repeating);
-  Timer(const tracked_objects::Location& posted_from,
-        TimeDelta delay,
-        const base::Closure& user_task,
-        bool is_repeating,
-        TickClock* tick_clock);
 
   virtual ~Timer();
 
@@ -123,9 +111,6 @@
   const TimeTicks& desired_run_time() const { return desired_run_time_; }
 
  protected:
-  // Returns the current tick count.
-  TimeTicks Now() const;
-
   // Used to initiate a new delayed task.  This has the side-effect of disabling
   // scheduled_task_ if it is non-null.
   void SetTaskInfo(const tracked_objects::Location& posted_from,
@@ -163,10 +148,8 @@
 
   // Stop running task (if any) and abandon scheduled task (if any).
   void StopAndAbandon() {
-    AbandonScheduledTask();
-
     Stop();
-    // No more member accesses here: |this| could be deleted at this point.
+    AbandonScheduledTask();
   }
 
   // When non-NULL, the scheduled_task_ is waiting in the MessageLoop to call
@@ -208,9 +191,6 @@
   // If true, hold on to the user_task_ closure object for reuse.
   const bool retain_user_task_;
 
-  // The tick clock used to calculate the run time for scheduled tasks.
-  TickClock* const tick_clock_;
-
   // If true, user_task_ is scheduled to run sometime in the future.
   bool is_running_;
 
@@ -230,8 +210,8 @@
   using Timer::Start;
 
   enum RepeatMode { ONE_SHOT, REPEATING };
-  BaseTimerMethodPointer(RepeatMode mode, TickClock* tick_clock)
-      : Timer(mode == REPEATING, mode == REPEATING, tick_clock) {}
+  BaseTimerMethodPointer(RepeatMode mode)
+      : Timer(mode == REPEATING, mode == REPEATING) {}
 
   // Start the timer to run at the given |delay| from now. If the timer is
   // already running, it will be replaced to call a task formed from
@@ -250,18 +230,14 @@
 // A simple, one-shot timer.  See usage notes at the top of the file.
 class OneShotTimer : public BaseTimerMethodPointer {
  public:
-  OneShotTimer() : OneShotTimer(nullptr) {}
-  explicit OneShotTimer(TickClock* tick_clock)
-      : BaseTimerMethodPointer(ONE_SHOT, tick_clock) {}
+  OneShotTimer() : BaseTimerMethodPointer(ONE_SHOT) {}
 };
 
 //-----------------------------------------------------------------------------
 // A simple, repeating timer.  See usage notes at the top of the file.
 class RepeatingTimer : public BaseTimerMethodPointer {
  public:
-  RepeatingTimer() : RepeatingTimer(nullptr) {}
-  explicit RepeatingTimer(TickClock* tick_clock)
-      : BaseTimerMethodPointer(REPEATING, tick_clock) {}
+  RepeatingTimer() : BaseTimerMethodPointer(REPEATING) {}
 };
 
 //-----------------------------------------------------------------------------
@@ -282,19 +258,10 @@
              TimeDelta delay,
              Receiver* receiver,
              void (Receiver::*method)())
-      : DelayTimer(posted_from, delay, receiver, method, nullptr) {}
-
-  template <class Receiver>
-  DelayTimer(const tracked_objects::Location& posted_from,
-             TimeDelta delay,
-             Receiver* receiver,
-             void (Receiver::*method)(),
-             TickClock* tick_clock)
       : Timer(posted_from,
               delay,
               base::Bind(method, base::Unretained(receiver)),
-              false,
-              tick_clock) {}
+              false) {}
 
   void Reset() override;
 };
diff --git a/base/timer/timer_unittest.cc b/base/timer/timer_unittest.cc
index 69338eb..6fcd25b 100644
--- a/base/timer/timer_unittest.cc
+++ b/base/timer/timer_unittest.cc
@@ -8,274 +8,189 @@
 
 #include <memory>
 
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/callback.h"
 #include "base/macros.h"
-#include "base/memory/ptr_util.h"
-#include "base/memory/ref_counted.h"
 #include "base/message_loop/message_loop.h"
 #include "base/run_loop.h"
-#include "base/sequenced_task_runner.h"
-#include "base/single_thread_task_runner.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/test/sequenced_worker_pool_owner.h"
-#include "base/test/test_mock_time_task_runner.h"
-#include "base/threading/platform_thread.h"
-#include "base/threading/sequenced_task_runner_handle.h"
-#include "base/threading/thread.h"
+#include "base/test/test_simple_task_runner.h"
 #include "base/threading/thread_task_runner_handle.h"
-#include "base/time/tick_clock.h"
-#include "base/time/time.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
-namespace base {
+using base::TimeDelta;
+using base::SingleThreadTaskRunner;
 
 namespace {
 
 // The message loops on which each timer should be tested.
-const MessageLoop::Type testing_message_loops[] = {
-    MessageLoop::TYPE_DEFAULT, MessageLoop::TYPE_IO,
+const base::MessageLoop::Type testing_message_loops[] = {
+  base::MessageLoop::TYPE_DEFAULT,
+  base::MessageLoop::TYPE_IO,
 #if !defined(OS_IOS)  // iOS does not allow direct running of the UI loop.
-    MessageLoop::TYPE_UI,
+  base::MessageLoop::TYPE_UI,
 #endif
 };
 
 const int kNumTestingMessageLoops = arraysize(testing_message_loops);
 
-class Receiver {
+class OneShotTimerTester {
  public:
-  Receiver() : count_(0) {}
-  void OnCalled() { count_++; }
-  bool WasCalled() { return count_ > 0; }
-  int TimesCalled() { return count_; }
-
- private:
-  int count_;
-};
-
-// A basic helper class that can start a one-shot timer and signal a
-// WaitableEvent when this timer fires.
-class OneShotTimerTesterBase {
- public:
-  // |did_run|, if provided, will be signaled when Run() fires.
-  explicit OneShotTimerTesterBase(
-      WaitableEvent* did_run = nullptr,
-      const TimeDelta& delay = TimeDelta::FromMilliseconds(10))
-      : did_run_(did_run), delay_(delay) {}
-
-  virtual ~OneShotTimerTesterBase() = default;
-
-  void Start() {
-    started_time_ = TimeTicks::Now();
-    timer_->Start(FROM_HERE, delay_, this, &OneShotTimerTesterBase::Run);
+  explicit OneShotTimerTester(bool* did_run, unsigned milliseconds = 10)
+      : did_run_(did_run),
+        delay_ms_(milliseconds),
+        quit_message_loop_(true) {
   }
 
-  bool IsRunning() { return timer_->IsRunning(); }
+  void Start() {
+    timer_.Start(FROM_HERE, TimeDelta::FromMilliseconds(delay_ms_), this,
+                 &OneShotTimerTester::Run);
+  }
 
-  TimeTicks started_time() const { return started_time_; }
-  TimeDelta delay() const { return delay_; }
+  void SetTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner) {
+    quit_message_loop_ = false;
+    timer_.SetTaskRunner(task_runner);
+  }
 
- protected:
-  virtual void Run() {
-    if (did_run_) {
-      EXPECT_FALSE(did_run_->IsSignaled());
-      did_run_->Signal();
+ private:
+  void Run() {
+    *did_run_ = true;
+    if (quit_message_loop_) {
+      base::MessageLoop::current()->QuitWhenIdle();
     }
   }
 
-  std::unique_ptr<OneShotTimer> timer_ = MakeUnique<OneShotTimer>();
-
- private:
-  WaitableEvent* const did_run_;
-  const TimeDelta delay_;
-  TimeTicks started_time_;
-
-  DISALLOW_COPY_AND_ASSIGN(OneShotTimerTesterBase);
+  bool* did_run_;
+  base::OneShotTimer timer_;
+  const unsigned delay_ms_;
+  bool quit_message_loop_;
 };
 
-// Extends functionality of OneShotTimerTesterBase with the abilities to wait
-// until the timer fires and to change task runner for the timer.
-class OneShotTimerTester : public OneShotTimerTesterBase {
+class OneShotSelfDeletingTimerTester {
  public:
-  // |did_run|, if provided, will be signaled when Run() fires.
-  explicit OneShotTimerTester(
-      WaitableEvent* did_run = nullptr,
-      const TimeDelta& delay = TimeDelta::FromMilliseconds(10))
-      : OneShotTimerTesterBase(did_run, delay),
-        quit_closure_(run_loop_.QuitClosure()) {}
+  explicit OneShotSelfDeletingTimerTester(bool* did_run)
+      : did_run_(did_run), timer_(new base::OneShotTimer()) {}
 
-  ~OneShotTimerTester() override = default;
-
-  void SetTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner) {
-    timer_->SetTaskRunner(std::move(task_runner));
-
-    // Run() will be invoked on |task_runner| but |run_loop_|'s QuitClosure
-    // needs to run on this thread (where the MessageLoop lives).
-    quit_closure_ =
-        Bind(IgnoreResult(&SingleThreadTaskRunner::PostTask),
-             ThreadTaskRunnerHandle::Get(), FROM_HERE, run_loop_.QuitClosure());
+  void Start() {
+    timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(10), this,
+                  &OneShotSelfDeletingTimerTester::Run);
   }
 
-  // Blocks until Run() executes and confirms that Run() didn't fire before
-  // |delay_| expired.
-  void WaitAndConfirmTimerFiredAfterDelay() {
-    run_loop_.Run();
-
-    EXPECT_NE(TimeTicks(), started_time());
-    EXPECT_GE(TimeTicks::Now() - started_time(), delay());
-  }
-
- protected:
-  // Overridable method to do things on Run() before signaling events/closures
-  // managed by this helper.
-  virtual void OnRun() {}
-
  private:
-  void Run() override {
-    OnRun();
-    OneShotTimerTesterBase::Run();
-    quit_closure_.Run();
+  void Run() {
+    *did_run_ = true;
+    timer_.reset();
+    base::MessageLoop::current()->QuitWhenIdle();
   }
 
-  RunLoop run_loop_;
-  Closure quit_closure_;
-
-  DISALLOW_COPY_AND_ASSIGN(OneShotTimerTester);
+  bool* did_run_;
+  std::unique_ptr<base::OneShotTimer> timer_;
 };
 
-class OneShotSelfDeletingTimerTester : public OneShotTimerTester {
- protected:
-  void OnRun() override { timer_.reset(); }
-};
-
-constexpr int kNumRepeats = 10;
-
 class RepeatingTimerTester {
  public:
-  explicit RepeatingTimerTester(WaitableEvent* did_run, const TimeDelta& delay)
-      : counter_(kNumRepeats),
-        quit_closure_(run_loop_.QuitClosure()),
-        did_run_(did_run),
-        delay_(delay) {}
-
-  void Start() {
-    started_time_ = TimeTicks::Now();
-    timer_.Start(FROM_HERE, delay_, this, &RepeatingTimerTester::Run);
+  explicit RepeatingTimerTester(bool* did_run, const TimeDelta& delay)
+      : did_run_(did_run), counter_(10), delay_(delay) {
   }
 
-  void WaitAndConfirmTimerFiredRepeatedlyAfterDelay() {
-    run_loop_.Run();
-
-    EXPECT_NE(TimeTicks(), started_time_);
-    EXPECT_GE(TimeTicks::Now() - started_time_, kNumRepeats * delay_);
+  void Start() {
+    timer_.Start(FROM_HERE, delay_, this, &RepeatingTimerTester::Run);
   }
 
  private:
   void Run() {
     if (--counter_ == 0) {
-      if (did_run_) {
-        EXPECT_FALSE(did_run_->IsSignaled());
-        did_run_->Signal();
-      }
+      *did_run_ = true;
       timer_.Stop();
-      quit_closure_.Run();
+      base::MessageLoop::current()->QuitWhenIdle();
     }
   }
 
-  RepeatingTimer timer_;
+  bool* did_run_;
   int counter_;
-
-  RunLoop run_loop_;
-  Closure quit_closure_;
-  WaitableEvent* const did_run_;
-
-  const TimeDelta delay_;
-  TimeTicks started_time_;
-
-  DISALLOW_COPY_AND_ASSIGN(RepeatingTimerTester);
+  TimeDelta delay_;
+  base::RepeatingTimer timer_;
 };
 
-// Basic test with same setup as RunTest_OneShotTimers_Cancel below to confirm
-// that |did_run_a| would be signaled in that test if it wasn't for the
-// deletion.
-void RunTest_OneShotTimers(MessageLoop::Type message_loop_type) {
-  MessageLoop loop(message_loop_type);
+void RunTest_OneShotTimer(base::MessageLoop::Type message_loop_type) {
+  base::MessageLoop loop(message_loop_type);
 
-  WaitableEvent did_run_a(WaitableEvent::ResetPolicy::MANUAL,
-                          WaitableEvent::InitialState::NOT_SIGNALED);
-  OneShotTimerTester a(&did_run_a);
-  a.Start();
+  bool did_run = false;
+  OneShotTimerTester f(&did_run);
+  f.Start();
 
-  OneShotTimerTester b;
-  b.Start();
+  base::RunLoop().Run();
 
-  b.WaitAndConfirmTimerFiredAfterDelay();
-
-  EXPECT_TRUE(did_run_a.IsSignaled());
+  EXPECT_TRUE(did_run);
 }
 
-void RunTest_OneShotTimers_Cancel(MessageLoop::Type message_loop_type) {
-  MessageLoop loop(message_loop_type);
+void RunTest_OneShotTimer_Cancel(base::MessageLoop::Type message_loop_type) {
+  base::MessageLoop loop(message_loop_type);
 
-  WaitableEvent did_run_a(WaitableEvent::ResetPolicy::MANUAL,
-                          WaitableEvent::InitialState::NOT_SIGNALED);
+  bool did_run_a = false;
   OneShotTimerTester* a = new OneShotTimerTester(&did_run_a);
 
   // This should run before the timer expires.
-  SequencedTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, a);
+  base::ThreadTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, a);
 
   // Now start the timer.
   a->Start();
 
-  OneShotTimerTester b;
+  bool did_run_b = false;
+  OneShotTimerTester b(&did_run_b);
   b.Start();
 
-  b.WaitAndConfirmTimerFiredAfterDelay();
+  base::RunLoop().Run();
 
-  EXPECT_FALSE(did_run_a.IsSignaled());
+  EXPECT_FALSE(did_run_a);
+  EXPECT_TRUE(did_run_b);
 }
 
-void RunTest_OneShotSelfDeletingTimer(MessageLoop::Type message_loop_type) {
-  MessageLoop loop(message_loop_type);
+void RunTest_OneShotSelfDeletingTimer(
+    base::MessageLoop::Type message_loop_type) {
+  base::MessageLoop loop(message_loop_type);
 
-  OneShotSelfDeletingTimerTester f;
+  bool did_run = false;
+  OneShotSelfDeletingTimerTester f(&did_run);
   f.Start();
-  f.WaitAndConfirmTimerFiredAfterDelay();
+
+  base::RunLoop().Run();
+
+  EXPECT_TRUE(did_run);
 }
 
-void RunTest_RepeatingTimer(MessageLoop::Type message_loop_type,
+void RunTest_RepeatingTimer(base::MessageLoop::Type message_loop_type,
                             const TimeDelta& delay) {
-  MessageLoop loop(message_loop_type);
+  base::MessageLoop loop(message_loop_type);
 
-  RepeatingTimerTester f(nullptr, delay);
+  bool did_run = false;
+  RepeatingTimerTester f(&did_run, delay);
   f.Start();
-  f.WaitAndConfirmTimerFiredRepeatedlyAfterDelay();
+
+  base::RunLoop().Run();
+
+  EXPECT_TRUE(did_run);
 }
 
-void RunTest_RepeatingTimer_Cancel(MessageLoop::Type message_loop_type,
+void RunTest_RepeatingTimer_Cancel(base::MessageLoop::Type message_loop_type,
                                    const TimeDelta& delay) {
-  MessageLoop loop(message_loop_type);
+  base::MessageLoop loop(message_loop_type);
 
-  WaitableEvent did_run_a(WaitableEvent::ResetPolicy::MANUAL,
-                          WaitableEvent::InitialState::NOT_SIGNALED);
+  bool did_run_a = false;
   RepeatingTimerTester* a = new RepeatingTimerTester(&did_run_a, delay);
 
   // This should run before the timer expires.
-  SequencedTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, a);
+  base::ThreadTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, a);
 
   // Now start the timer.
   a->Start();
 
-  RepeatingTimerTester b(nullptr, delay);
+  bool did_run_b = false;
+  RepeatingTimerTester b(&did_run_b, delay);
   b.Start();
 
-  b.WaitAndConfirmTimerFiredRepeatedlyAfterDelay();
+  base::RunLoop().Run();
 
-  // |a| should not have fired despite |b| starting after it on the same
-  // sequence and being complete by now.
-  EXPECT_FALSE(did_run_a.IsSignaled());
+  EXPECT_FALSE(did_run_a);
+  EXPECT_TRUE(did_run_b);
 }
 
 class DelayTimerTarget {
@@ -291,38 +206,40 @@
   bool signaled_ = false;
 };
 
-void RunTest_DelayTimer_NoCall(MessageLoop::Type message_loop_type) {
-  MessageLoop loop(message_loop_type);
+void RunTest_DelayTimer_NoCall(base::MessageLoop::Type message_loop_type) {
+  base::MessageLoop loop(message_loop_type);
 
   // If Delay is never called, the timer shouldn't go off.
   DelayTimerTarget target;
-  DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(1), &target,
-                   &DelayTimerTarget::Signal);
+  base::DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(1), &target,
+                         &DelayTimerTarget::Signal);
 
-  OneShotTimerTester tester;
+  bool did_run = false;
+  OneShotTimerTester tester(&did_run);
   tester.Start();
-  tester.WaitAndConfirmTimerFiredAfterDelay();
+  base::RunLoop().Run();
 
   ASSERT_FALSE(target.signaled());
 }
 
-void RunTest_DelayTimer_OneCall(MessageLoop::Type message_loop_type) {
-  MessageLoop loop(message_loop_type);
+void RunTest_DelayTimer_OneCall(base::MessageLoop::Type message_loop_type) {
+  base::MessageLoop loop(message_loop_type);
 
   DelayTimerTarget target;
-  DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(1), &target,
-                   &DelayTimerTarget::Signal);
+  base::DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(1), &target,
+                         &DelayTimerTarget::Signal);
   timer.Reset();
 
-  OneShotTimerTester tester(nullptr, TimeDelta::FromMilliseconds(100));
+  bool did_run = false;
+  OneShotTimerTester tester(&did_run, 100 /* milliseconds */);
   tester.Start();
-  tester.WaitAndConfirmTimerFiredAfterDelay();
+  base::RunLoop().Run();
 
   ASSERT_TRUE(target.signaled());
 }
 
 struct ResetHelper {
-  ResetHelper(DelayTimer* timer, DelayTimerTarget* target)
+  ResetHelper(base::DelayTimer* timer, DelayTimerTarget* target)
       : timer_(timer), target_(target) {}
 
   void Reset() {
@@ -331,30 +248,31 @@
   }
 
  private:
-  DelayTimer* const timer_;
+  base::DelayTimer* const timer_;
   DelayTimerTarget* const target_;
 };
 
-void RunTest_DelayTimer_Reset(MessageLoop::Type message_loop_type) {
-  MessageLoop loop(message_loop_type);
+void RunTest_DelayTimer_Reset(base::MessageLoop::Type message_loop_type) {
+  base::MessageLoop loop(message_loop_type);
 
   // If Delay is never called, the timer shouldn't go off.
   DelayTimerTarget target;
-  DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(50), &target,
-                   &DelayTimerTarget::Signal);
+  base::DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(50), &target,
+                         &DelayTimerTarget::Signal);
   timer.Reset();
 
   ResetHelper reset_helper(&timer, &target);
 
-  OneShotTimer timers[20];
+  base::OneShotTimer timers[20];
   for (size_t i = 0; i < arraysize(timers); ++i) {
     timers[i].Start(FROM_HERE, TimeDelta::FromMilliseconds(i * 10),
                     &reset_helper, &ResetHelper::Reset);
   }
 
-  OneShotTimerTester tester(nullptr, TimeDelta::FromMilliseconds(300));
+  bool did_run = false;
+  OneShotTimerTester tester(&did_run, 300);
   tester.Start();
-  tester.WaitAndConfirmTimerFiredAfterDelay();
+  base::RunLoop().Run();
 
   ASSERT_TRUE(target.signaled());
 }
@@ -366,20 +284,21 @@
   }
 };
 
-void RunTest_DelayTimer_Deleted(MessageLoop::Type message_loop_type) {
-  MessageLoop loop(message_loop_type);
+
+void RunTest_DelayTimer_Deleted(base::MessageLoop::Type message_loop_type) {
+  base::MessageLoop loop(message_loop_type);
 
   DelayTimerFatalTarget target;
 
   {
-    DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(50), &target,
-                     &DelayTimerFatalTarget::Signal);
+    base::DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(50), &target,
+                           &DelayTimerFatalTarget::Signal);
     timer.Reset();
   }
 
   // When the timer is deleted, the DelayTimerFatalTarget should never be
   // called.
-  PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
+  base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(100));
 }
 
 }  // namespace
@@ -388,15 +307,15 @@
 // Each test is run against each type of MessageLoop.  That way we are sure
 // that timers work properly in all configurations.
 
-TEST(TimerTest, OneShotTimers) {
+TEST(TimerTest, OneShotTimer) {
   for (int i = 0; i < kNumTestingMessageLoops; i++) {
-    RunTest_OneShotTimers(testing_message_loops[i]);
+    RunTest_OneShotTimer(testing_message_loops[i]);
   }
 }
 
-TEST(TimerTest, OneShotTimers_Cancel) {
+TEST(TimerTest, OneShotTimer_Cancel) {
   for (int i = 0; i < kNumTestingMessageLoops; i++) {
-    RunTest_OneShotTimers_Cancel(testing_message_loops[i]);
+    RunTest_OneShotTimer_Cancel(testing_message_loops[i]);
   }
 }
 
@@ -409,42 +328,17 @@
 }
 
 TEST(TimerTest, OneShotTimer_CustomTaskRunner) {
-  // A MessageLoop is required for the timer events on the other thread to
-  // communicate back to the Timer under test.
-  MessageLoop loop;
+  scoped_refptr<base::TestSimpleTaskRunner> task_runner =
+      new base::TestSimpleTaskRunner();
 
-  Thread other_thread("OneShotTimer_CustomTaskRunner");
-  other_thread.Start();
-
-  WaitableEvent did_run(WaitableEvent::ResetPolicy::MANUAL,
-                        WaitableEvent::InitialState::NOT_SIGNALED);
+  bool did_run = false;
   OneShotTimerTester f(&did_run);
-  f.SetTaskRunner(other_thread.task_runner());
+  f.SetTaskRunner(task_runner);
   f.Start();
-  EXPECT_TRUE(f.IsRunning());
 
-  f.WaitAndConfirmTimerFiredAfterDelay();
-  EXPECT_TRUE(did_run.IsSignaled());
-
-  // |f| should already have communicated back to this |loop| before invoking
-  // Run() and as such this thread should already be aware that |f| is no longer
-  // running.
-  EXPECT_TRUE(loop.IsIdleForTesting());
-  EXPECT_FALSE(f.IsRunning());
-}
-
-TEST(TimerTest, OneShotTimerWithTickClock) {
-  scoped_refptr<TestMockTimeTaskRunner> task_runner(
-      new TestMockTimeTaskRunner(Time::Now(), TimeTicks::Now()));
-  std::unique_ptr<TickClock> tick_clock(task_runner->GetMockTickClock());
-  MessageLoop message_loop;
-  message_loop.SetTaskRunner(task_runner);
-  Receiver receiver;
-  OneShotTimer timer(tick_clock.get());
-  timer.Start(FROM_HERE, TimeDelta::FromSeconds(1),
-              Bind(&Receiver::OnCalled, Unretained(&receiver)));
-  task_runner->FastForwardBy(TimeDelta::FromSeconds(1));
-  EXPECT_TRUE(receiver.WasCalled());
+  EXPECT_FALSE(did_run);
+  task_runner->RunUntilIdle();
+  EXPECT_TRUE(did_run);
 }
 
 TEST(TimerTest, RepeatingTimer) {
@@ -475,22 +369,6 @@
   }
 }
 
-TEST(TimerTest, RepeatingTimerWithTickClock) {
-  scoped_refptr<TestMockTimeTaskRunner> task_runner(
-      new TestMockTimeTaskRunner(Time::Now(), TimeTicks::Now()));
-  std::unique_ptr<TickClock> tick_clock(task_runner->GetMockTickClock());
-  MessageLoop message_loop;
-  message_loop.SetTaskRunner(task_runner);
-  Receiver receiver;
-  const int expected_times_called = 10;
-  RepeatingTimer timer(tick_clock.get());
-  timer.Start(FROM_HERE, TimeDelta::FromSeconds(1),
-              Bind(&Receiver::OnCalled, Unretained(&receiver)));
-  task_runner->FastForwardBy(TimeDelta::FromSeconds(expected_times_called));
-  timer.Stop();
-  EXPECT_EQ(expected_times_called, receiver.TimesCalled());
-}
-
 TEST(TimerTest, DelayTimer_NoCall) {
   for (int i = 0; i < kNumTestingMessageLoops; i++) {
     RunTest_DelayTimer_NoCall(testing_message_loops[i]);
@@ -516,89 +394,25 @@
   }
 }
 
-TEST(TimerTest, DelayTimerWithTickClock) {
-  scoped_refptr<TestMockTimeTaskRunner> task_runner(
-      new TestMockTimeTaskRunner(Time::Now(), TimeTicks::Now()));
-  std::unique_ptr<TickClock> tick_clock(task_runner->GetMockTickClock());
-  MessageLoop message_loop;
-  message_loop.SetTaskRunner(task_runner);
-  Receiver receiver;
-  DelayTimer timer(FROM_HERE, TimeDelta::FromSeconds(1), &receiver,
-                   &Receiver::OnCalled, tick_clock.get());
-  task_runner->FastForwardBy(TimeDelta::FromMilliseconds(999));
-  EXPECT_FALSE(receiver.WasCalled());
-  timer.Reset();
-  task_runner->FastForwardBy(TimeDelta::FromMilliseconds(999));
-  EXPECT_FALSE(receiver.WasCalled());
-  timer.Reset();
-  task_runner->FastForwardBy(TimeDelta::FromSeconds(1));
-  EXPECT_TRUE(receiver.WasCalled());
-}
-
 TEST(TimerTest, MessageLoopShutdown) {
   // This test is designed to verify that shutdown of the
   // message loop does not cause crashes if there were pending
   // timers not yet fired.  It may only trigger exceptions
   // if debug heap checking is enabled.
-  WaitableEvent did_run(WaitableEvent::ResetPolicy::MANUAL,
-                        WaitableEvent::InitialState::NOT_SIGNALED);
+  bool did_run = false;
   {
-    OneShotTimerTesterBase a(&did_run);
-    OneShotTimerTesterBase b(&did_run);
-    OneShotTimerTesterBase c(&did_run);
-    OneShotTimerTesterBase d(&did_run);
+    OneShotTimerTester a(&did_run);
+    OneShotTimerTester b(&did_run);
+    OneShotTimerTester c(&did_run);
+    OneShotTimerTester d(&did_run);
     {
-      MessageLoop loop;
+      base::MessageLoop loop;
       a.Start();
       b.Start();
     }  // MessageLoop destructs by falling out of scope.
   }  // OneShotTimers destruct.  SHOULD NOT CRASH, of course.
 
-  EXPECT_FALSE(did_run.IsSignaled());
-}
-
-// Ref counted class which owns a Timer. The class passes a reference to itself
-// via the |user_task| parameter in Timer::Start(). |Timer::user_task_| might
-// end up holding the last reference to the class.
-class OneShotSelfOwningTimerTester
-    : public RefCounted<OneShotSelfOwningTimerTester> {
- public:
-  OneShotSelfOwningTimerTester() = default;
-
-  void StartTimer() {
-    // Start timer with long delay in order to test the timer getting destroyed
-    // while a timer task is still pending.
-    timer_.Start(FROM_HERE, TimeDelta::FromDays(1),
-                 base::Bind(&OneShotSelfOwningTimerTester::Run, this));
-  }
-
- private:
-  friend class RefCounted<OneShotSelfOwningTimerTester>;
-  ~OneShotSelfOwningTimerTester() = default;
-
-  void Run() {
-    ADD_FAILURE() << "Timer unexpectedly fired.";
-  }
-
-  OneShotTimer timer_;
-
-  DISALLOW_COPY_AND_ASSIGN(OneShotSelfOwningTimerTester);
-};
-
-TEST(TimerTest, MessageLoopShutdownSelfOwningTimer) {
-  // This test verifies that shutdown of the message loop does not cause crashes
-  // if there is a pending timer not yet fired and |Timer::user_task_| owns the
-  // timer. The test may only trigger exceptions if debug heap checking is
-  // enabled.
-
-  MessageLoop loop;
-  scoped_refptr<OneShotSelfOwningTimerTester> tester =
-      new OneShotSelfOwningTimerTester();
-
-  std::move(tester)->StartTimer();
-  // |Timer::user_task_| owns sole reference to |tester|.
-
-  // MessageLoop destructs by falling out of scope. SHOULD NOT CRASH.
+  EXPECT_FALSE(did_run);
 }
 
 void TimerTestCallback() {
@@ -606,10 +420,11 @@
 
 TEST(TimerTest, NonRepeatIsRunning) {
   {
-    MessageLoop loop;
-    Timer timer(false, false);
+    base::MessageLoop loop;
+    base::Timer timer(false, false);
     EXPECT_FALSE(timer.IsRunning());
-    timer.Start(FROM_HERE, TimeDelta::FromDays(1), Bind(&TimerTestCallback));
+    timer.Start(FROM_HERE, TimeDelta::FromDays(1),
+                base::Bind(&TimerTestCallback));
     EXPECT_TRUE(timer.IsRunning());
     timer.Stop();
     EXPECT_FALSE(timer.IsRunning());
@@ -617,10 +432,11 @@
   }
 
   {
-    Timer timer(true, false);
-    MessageLoop loop;
+    base::Timer timer(true, false);
+    base::MessageLoop loop;
     EXPECT_FALSE(timer.IsRunning());
-    timer.Start(FROM_HERE, TimeDelta::FromDays(1), Bind(&TimerTestCallback));
+    timer.Start(FROM_HERE, TimeDelta::FromDays(1),
+                base::Bind(&TimerTestCallback));
     EXPECT_TRUE(timer.IsRunning());
     timer.Stop();
     EXPECT_FALSE(timer.IsRunning());
@@ -631,11 +447,12 @@
 }
 
 TEST(TimerTest, NonRepeatMessageLoopDeath) {
-  Timer timer(false, false);
+  base::Timer timer(false, false);
   {
-    MessageLoop loop;
+    base::MessageLoop loop;
     EXPECT_FALSE(timer.IsRunning());
-    timer.Start(FROM_HERE, TimeDelta::FromDays(1), Bind(&TimerTestCallback));
+    timer.Start(FROM_HERE, TimeDelta::FromDays(1),
+                base::Bind(&TimerTestCallback));
     EXPECT_TRUE(timer.IsRunning());
   }
   EXPECT_FALSE(timer.IsRunning());
@@ -643,9 +460,9 @@
 }
 
 TEST(TimerTest, RetainRepeatIsRunning) {
-  MessageLoop loop;
-  Timer timer(FROM_HERE, TimeDelta::FromDays(1), Bind(&TimerTestCallback),
-              true);
+  base::MessageLoop loop;
+  base::Timer timer(FROM_HERE, TimeDelta::FromDays(1),
+                    base::Bind(&TimerTestCallback), true);
   EXPECT_FALSE(timer.IsRunning());
   timer.Reset();
   EXPECT_TRUE(timer.IsRunning());
@@ -656,9 +473,9 @@
 }
 
 TEST(TimerTest, RetainNonRepeatIsRunning) {
-  MessageLoop loop;
-  Timer timer(FROM_HERE, TimeDelta::FromDays(1), Bind(&TimerTestCallback),
-              false);
+  base::MessageLoop loop;
+  base::Timer timer(FROM_HERE, TimeDelta::FromDays(1),
+                    base::Bind(&TimerTestCallback), false);
   EXPECT_FALSE(timer.IsRunning());
   timer.Reset();
   EXPECT_TRUE(timer.IsRunning());
@@ -680,27 +497,25 @@
 
 void SetCallbackHappened1() {
   g_callback_happened1 = true;
-  MessageLoop::current()->QuitWhenIdle();
+  base::MessageLoop::current()->QuitWhenIdle();
 }
 
 void SetCallbackHappened2() {
   g_callback_happened2 = true;
-  MessageLoop::current()->QuitWhenIdle();
+  base::MessageLoop::current()->QuitWhenIdle();
 }
 
-}  // namespace
-
 TEST(TimerTest, ContinuationStopStart) {
   {
     ClearAllCallbackHappened();
-    MessageLoop loop;
-    Timer timer(false, false);
+    base::MessageLoop loop;
+    base::Timer timer(false, false);
     timer.Start(FROM_HERE, TimeDelta::FromMilliseconds(10),
-                Bind(&SetCallbackHappened1));
+                base::Bind(&SetCallbackHappened1));
     timer.Stop();
     timer.Start(FROM_HERE, TimeDelta::FromMilliseconds(40),
-                Bind(&SetCallbackHappened2));
-    RunLoop().Run();
+                base::Bind(&SetCallbackHappened2));
+    base::RunLoop().Run();
     EXPECT_FALSE(g_callback_happened1);
     EXPECT_TRUE(g_callback_happened2);
   }
@@ -709,16 +524,16 @@
 TEST(TimerTest, ContinuationReset) {
   {
     ClearAllCallbackHappened();
-    MessageLoop loop;
-    Timer timer(false, false);
+    base::MessageLoop loop;
+    base::Timer timer(false, false);
     timer.Start(FROM_HERE, TimeDelta::FromMilliseconds(10),
-                Bind(&SetCallbackHappened1));
+                base::Bind(&SetCallbackHappened1));
     timer.Reset();
     // Since Reset happened before task ran, the user_task must not be cleared:
     ASSERT_FALSE(timer.user_task().is_null());
-    RunLoop().Run();
+    base::RunLoop().Run();
     EXPECT_TRUE(g_callback_happened1);
   }
 }
 
-}  // namespace base
+}  // namespace
diff --git a/base/trace_event/category_registry.cc b/base/trace_event/category_registry.cc
deleted file mode 100644
index e7c1460..0000000
--- a/base/trace_event/category_registry.cc
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/category_registry.h"
-
-#include <string.h>
-
-#include <type_traits>
-
-#include "base/atomicops.h"
-#include "base/debug/leak_annotations.h"
-#include "base/logging.h"
-#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
-#include "base/trace_event/trace_category.h"
-
-namespace base {
-namespace trace_event {
-
-namespace {
-
-constexpr size_t kMaxCategories = 200;
-const int kNumBuiltinCategories = 4;
-
-// |g_categories| might end up causing creating dynamic initializers if not POD.
-static_assert(std::is_pod<TraceCategory>::value, "TraceCategory must be POD");
-
-// These entries must be kept consistent with the kCategory* consts below.
-TraceCategory g_categories[kMaxCategories] = {
-    {0, 0, "tracing categories exhausted; must increase kMaxCategories"},
-    {0, 0, "tracing already shutdown"},  // See kCategoryAlreadyShutdown below.
-    {0, 0, "__metadata"},                // See kCategoryMetadata below.
-    {0, 0, "toplevel"},                  // Warmup the toplevel category.
-};
-
-base::subtle::AtomicWord g_category_index = kNumBuiltinCategories;
-
-bool IsValidCategoryPtr(const TraceCategory* category) {
-  // If any of these are hit, something has cached a corrupt category pointer.
-  uintptr_t ptr = reinterpret_cast<uintptr_t>(category);
-  return ptr % sizeof(void*) == 0 &&
-         ptr >= reinterpret_cast<uintptr_t>(&g_categories[0]) &&
-         ptr <= reinterpret_cast<uintptr_t>(&g_categories[kMaxCategories - 1]);
-}
-
-}  // namespace
-
-// static
-TraceCategory* const CategoryRegistry::kCategoryExhausted = &g_categories[0];
-TraceCategory* const CategoryRegistry::kCategoryAlreadyShutdown =
-    &g_categories[1];
-TraceCategory* const CategoryRegistry::kCategoryMetadata = &g_categories[2];
-
-// static
-void CategoryRegistry::Initialize() {
-  // Trace is enabled or disabled on one thread while other threads are
-  // accessing the enabled flag. We don't care whether edge-case events are
-  // traced or not, so we allow races on the enabled flag to keep the trace
-  // macros fast.
-  for (size_t i = 0; i < kMaxCategories; ++i) {
-    ANNOTATE_BENIGN_RACE(g_categories[i].state_ptr(),
-                         "trace_event category enabled");
-    // If this DCHECK is hit in a test it means that ResetForTesting() is not
-    // called and the categories state leaks between test fixtures.
-    DCHECK(!g_categories[i].is_enabled());
-  }
-}
-
-// static
-void CategoryRegistry::ResetForTesting() {
-  // reset_for_testing clears up only the enabled state and filters. The
-  // categories themselves cannot be cleared up because the static pointers
-  // injected by the macros still point to them and cannot be reset.
-  for (size_t i = 0; i < kMaxCategories; ++i)
-    g_categories[i].reset_for_testing();
-}
-
-// static
-TraceCategory* CategoryRegistry::GetCategoryByName(const char* category_name) {
-  DCHECK(!strchr(category_name, '"'))
-      << "Category names may not contain double quote";
-
-  // The g_categories is append only, avoid using a lock for the fast path.
-  size_t category_index = base::subtle::Acquire_Load(&g_category_index);
-
-  // Search for pre-existing category group.
-  for (size_t i = 0; i < category_index; ++i) {
-    if (strcmp(g_categories[i].name(), category_name) == 0) {
-      return &g_categories[i];
-    }
-  }
-  return nullptr;
-}
-
-bool CategoryRegistry::GetOrCreateCategoryLocked(
-    const char* category_name,
-    CategoryInitializerFn category_initializer_fn,
-    TraceCategory** category) {
-  // This is the slow path: the lock is not held in the fastpath
-  // (GetCategoryByName), so more than one thread could have reached here trying
-  // to add the same category.
-  *category = GetCategoryByName(category_name);
-  if (*category)
-    return false;
-
-  // Create a new category.
-  size_t category_index = base::subtle::Acquire_Load(&g_category_index);
-  if (category_index >= kMaxCategories) {
-    NOTREACHED() << "must increase kMaxCategories";
-    *category = kCategoryExhausted;
-    return false;
-  }
-
-  // TODO(primiano): this strdup should be removed. The only documented reason
-  // for it was TraceWatchEvent, which is gone. However, something might have
-  // ended up relying on this. Needs some auditing before removal.
-  const char* category_name_copy = strdup(category_name);
-  ANNOTATE_LEAKING_OBJECT_PTR(category_name_copy);
-
-  *category = &g_categories[category_index];
-  DCHECK(!(*category)->is_valid());
-  DCHECK(!(*category)->is_enabled());
-  (*category)->set_name(category_name_copy);
-  category_initializer_fn(*category);
-
-  // Update the max index now.
-  base::subtle::Release_Store(&g_category_index, category_index + 1);
-  return true;
-}
-
-// static
-const TraceCategory* CategoryRegistry::GetCategoryByStatePtr(
-    const uint8_t* category_state) {
-  const TraceCategory* category = TraceCategory::FromStatePtr(category_state);
-  DCHECK(IsValidCategoryPtr(category));
-  return category;
-}
-
-// static
-bool CategoryRegistry::IsBuiltinCategory(const TraceCategory* category) {
-  DCHECK(IsValidCategoryPtr(category));
-  return category < &g_categories[kNumBuiltinCategories];
-}
-
-// static
-CategoryRegistry::Range CategoryRegistry::GetAllCategories() {
-  // The |g_categories| array is append only. We have to only guarantee to
-  // not return an index to a category which is being initialized by
-  // GetOrCreateCategoryByName().
-  size_t category_index = base::subtle::Acquire_Load(&g_category_index);
-  return CategoryRegistry::Range(&g_categories[0],
-                                 &g_categories[category_index]);
-}
-
-}  // namespace trace_event
-}  // namespace base
diff --git a/base/trace_event/category_registry.h b/base/trace_event/category_registry.h
deleted file mode 100644
index 9c08efa..0000000
--- a/base/trace_event/category_registry.h
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TRACE_EVENT_CATEGORY_REGISTRY_H_
-#define BASE_TRACE_EVENT_CATEGORY_REGISTRY_H_
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include "base/base_export.h"
-#include "base/logging.h"
-
-namespace base {
-namespace trace_event {
-
-struct TraceCategory;
-class TraceCategoryTest;
-class TraceLog;
-
-// Allows fast and thread-safe acces to the state of all tracing categories.
-// All the methods in this class can be concurrently called on multiple threads,
-// unless otherwise noted (e.g., GetOrCreateCategoryLocked).
-// The reason why this is a fully static class with global state is to allow to
-// statically define known categories as global linker-initialized structs,
-// without requiring static initializers.
-class BASE_EXPORT CategoryRegistry {
- public:
-  // Allows for-each iterations over a slice of the categories array.
-  class Range {
-   public:
-    Range(TraceCategory* begin, TraceCategory* end) : begin_(begin), end_(end) {
-      DCHECK_LE(begin, end);
-    }
-    TraceCategory* begin() const { return begin_; }
-    TraceCategory* end() const { return end_; }
-
-   private:
-    TraceCategory* const begin_;
-    TraceCategory* const end_;
-  };
-
-  // Known categories.
-  static TraceCategory* const kCategoryExhausted;
-  static TraceCategory* const kCategoryMetadata;
-  static TraceCategory* const kCategoryAlreadyShutdown;
-
-  // Returns a category entry from the Category.state_ptr() pointer.
-  // TODO(primiano): trace macros should just keep a pointer to the entire
-  // TraceCategory, not just the enabled state pointer. That would remove the
-  // need for this function and make everything cleaner at no extra cost (as
-  // long as the |state_| is the first field of the struct, which can be
-  // guaranteed via static_assert, see TraceCategory ctor).
-  static const TraceCategory* GetCategoryByStatePtr(
-      const uint8_t* category_state);
-
-  // Returns a category from its name or nullptr if not found.
-  // The output |category| argument is an undefinitely lived pointer to the
-  // TraceCategory owned by the registry. TRACE_EVENTx macros will cache this
-  // pointer and use it for checks in their fast-paths.
-  static TraceCategory* GetCategoryByName(const char* category_name);
-
-  static bool IsBuiltinCategory(const TraceCategory*);
-
- private:
-  friend class TraceCategoryTest;
-  friend class TraceLog;
-  using CategoryInitializerFn = void (*)(TraceCategory*);
-
-  // Only for debugging/testing purposes, is a no-op on release builds.
-  static void Initialize();
-
-  // Resets the state of all categories, to clear up the state between tests.
-  static void ResetForTesting();
-
-  // Used to get/create a category in the slow-path. If the category exists
-  // already, this has the same effect of GetCategoryByName and returns false.
-  // If not, a new category is created and the CategoryInitializerFn is invoked
-  // before retuning true. The caller must guarantee serialization: either call
-  // this method from a single thread or hold a lock when calling this.
-  static bool GetOrCreateCategoryLocked(const char* category_name,
-                                        CategoryInitializerFn,
-                                        TraceCategory**);
-
-  // Allows to iterate over the valid categories in a for-each loop.
-  // This includes builtin categories such as __metadata.
-  static Range GetAllCategories();
-};
-
-}  // namespace trace_event
-}  // namespace base
-
-#endif  // BASE_TRACE_EVENT_CATEGORY_REGISTRY_H_
diff --git a/base/trace_event/common/trace_event_common.h b/base/trace_event/common/trace_event_common.h
index bb6fa1b..0a04d62 100644
--- a/base/trace_event/common/trace_event_common.h
+++ b/base/trace_event/common/trace_event_common.h
@@ -223,6 +223,49 @@
                                             flow_flags, arg1_name, arg1_val, \
                                             arg2_name, arg2_val)
 
+// UNSHIPPED_TRACE_EVENT* are like TRACE_EVENT* except that they are not
+// included in official builds.
+
+#if OFFICIAL_BUILD
+#undef TRACING_IS_OFFICIAL_BUILD
+#define TRACING_IS_OFFICIAL_BUILD 1
+#elif !defined(TRACING_IS_OFFICIAL_BUILD)
+#define TRACING_IS_OFFICIAL_BUILD 0
+#endif
+
+#if TRACING_IS_OFFICIAL_BUILD
+#define UNSHIPPED_TRACE_EVENT0(category_group, name) (void)0
+#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+  (void)0
+#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
+                               arg2_name, arg2_val)                       \
+  (void)0
+#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) (void)0
+#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, \
+                                       arg1_val)                               \
+  (void)0
+#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, \
+                                       arg1_val, arg2_name, arg2_val)          \
+  (void)0
+#else
+#define UNSHIPPED_TRACE_EVENT0(category_group, name) \
+  TRACE_EVENT0(category_group, name)
+#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+  TRACE_EVENT1(category_group, name, arg1_name, arg1_val)
+#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
+                               arg2_name, arg2_val)                       \
+  TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
+#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) \
+  TRACE_EVENT_INSTANT0(category_group, name, scope)
+#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, \
+                                       arg1_val)                               \
+  TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val)
+#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, \
+                                       arg1_val, arg2_name, arg2_val)          \
+  TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val,       \
+                       arg2_name, arg2_val)
+#endif
+
 // Records a single event called "name" immediately, with 0, 1 or 2
 // associated arguments. If the category is not enabled, then this
 // does nothing.
@@ -254,10 +297,20 @@
 
 #define TRACE_EVENT_INSTANT_WITH_TIMESTAMP0(category_group, name, scope, \
                                             timestamp)                   \
-  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                               \
-      TRACE_EVENT_PHASE_INSTANT, category_group, name, timestamp,        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                    \
+      TRACE_EVENT_PHASE_INSTANT, category_group, name, 0, 0, timestamp,  \
       TRACE_EVENT_FLAG_NONE | scope)
 
+// Syntactic sugars for the sampling tracing in the main thread.
+#define TRACE_EVENT_SCOPED_SAMPLING_STATE(category, name) \
+  TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(0, category, name)
+#define TRACE_EVENT_GET_SAMPLING_STATE() \
+  TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(0)
+#define TRACE_EVENT_SET_SAMPLING_STATE(category, name) \
+  TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(0, category, name)
+#define TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(categoryAndName) \
+  TRACE_EVENT_SET_NONCONST_SAMPLING_STATE_FOR_BUCKET(0, categoryAndName)
+
 // Records a single BEGIN event called "name" immediately, with 0, 1 or 2
 // associated arguments. If the category is not enabled, then this
 // does nothing.
@@ -342,15 +395,10 @@
                            TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val,   \
                            arg2_name, arg2_val)
 
-#define TRACE_EVENT_MARK_WITH_TIMESTAMP0(category_group, name, timestamp) \
-  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                                \
-      TRACE_EVENT_PHASE_MARK, category_group, name, timestamp,            \
-      TRACE_EVENT_FLAG_NONE)
-
 #define TRACE_EVENT_MARK_WITH_TIMESTAMP1(category_group, name, timestamp, \
                                          arg1_name, arg1_val)             \
-  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                                \
-      TRACE_EVENT_PHASE_MARK, category_group, name, timestamp,            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                     \
+      TRACE_EVENT_PHASE_MARK, category_group, name, 0, 0, timestamp,      \
       TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
 
 #define TRACE_EVENT_COPY_MARK(category_group, name)                      \
@@ -358,8 +406,8 @@
                            TRACE_EVENT_FLAG_COPY)
 
 #define TRACE_EVENT_COPY_MARK_WITH_TIMESTAMP(category_group, name, timestamp) \
-  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                                    \
-      TRACE_EVENT_PHASE_MARK, category_group, name, timestamp,                \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                         \
+      TRACE_EVENT_PHASE_MARK, category_group, name, 0, 0, timestamp,          \
       TRACE_EVENT_FLAG_COPY)
 
 // Similar to TRACE_EVENT_ENDx but with a custom |at| timestamp provided.
@@ -496,12 +544,6 @@
       TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \
       TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
 
-#define TRACE_EVENT_SAMPLE_WITH_ID1(category_group, name, id, arg1_name,       \
-                                    arg1_val)                                  \
-  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_SAMPLE, category_group,   \
-                                   name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \
-                                   arg1_val)
-
 // ASYNC_STEP_* APIs should be only used by legacy code. New code should
 // consider using NESTABLE_ASYNC_* APIs to describe substeps within an async
 // event.
@@ -570,13 +612,6 @@
       TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id,             \
       TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
       arg1_name, arg1_val)
-#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP2(category_group, name, id,      \
-                                                timestamp, arg1_name,          \
-                                                arg1_val, arg2_name, arg2_val) \
-  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                          \
-      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id,                 \
-      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE,     \
-      arg1_name, arg1_val, arg2_name, arg2_val)
 #define TRACE_EVENT_COPY_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, id, \
                                                      timestamp)                \
   INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                          \
@@ -666,13 +701,6 @@
       TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id,                  \
       TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE,    \
       arg1_name, arg1_val)
-#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP2(category_group, name, id,       \
-                                              timestamp, arg1_name, arg1_val, \
-                                              arg2_name, arg2_val)            \
-  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                         \
-      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id,                  \
-      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE,    \
-      arg1_name, arg1_val, arg2_name, arg2_val)
 
 // NESTABLE_ASYNC_* APIs are used to describe an async operation, which can
 // be nested within a NESTABLE_ASYNC event and/or have inner NESTABLE_ASYNC
@@ -732,19 +760,16 @@
       TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
 
 // Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
-// with none, one or two associated argument. If the category is not enabled,
-// then this does nothing.
-#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT0(category_group, name, id)        \
-  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
-                                   category_group, name, id,                 \
-                                   TRACE_EVENT_FLAG_NONE)
-
+// with one associated argument. If the category is not enabled, then this
+// does nothing.
 #define TRACE_EVENT_NESTABLE_ASYNC_INSTANT1(category_group, name, id,        \
                                             arg1_name, arg1_val)             \
   INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
                                    category_group, name, id,                 \
                                    TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
-
+// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
+// with 2 associated arguments. If the category is not enabled, then this
+// does nothing.
 #define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2(                              \
     category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val)   \
   INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                       \
@@ -919,58 +944,48 @@
 #define TRACE_EVENT_CLOCK_SYNC_ISSUER(sync_id, issue_ts, issue_end_ts)         \
   INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                                     \
       TRACE_EVENT_PHASE_CLOCK_SYNC, "__metadata", "clock_sync",                \
-      issue_end_ts, TRACE_EVENT_FLAG_NONE,                                     \
-      "sync_id", sync_id, "issue_ts", issue_ts)
+      issue_end_ts.ToInternalValue(), TRACE_EVENT_FLAG_NONE,                   \
+      "sync_id", sync_id, "issue_ts", issue_ts.ToInternalValue())
 
 // Macros to track the life time and value of arbitrary client objects.
 // See also TraceTrackableObject.
 #define TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group, name, id) \
   INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                  \
-      TRACE_EVENT_PHASE_CREATE_OBJECT, category_group, name, id,     \
-      TRACE_EVENT_FLAG_NONE)
+      TRACE_EVENT_PHASE_CREATE_OBJECT, category_group, name,         \
+      TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
 
 #define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group, name, id, \
                                             snapshot)                 \
   INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                   \
       TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name,        \
-      id, TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
+      TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
 
-#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID_AND_TIMESTAMP(                     \
-    category_group, name, id, timestamp, snapshot)                             \
-  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                          \
-      TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name,                 \
-      id, TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
-      "snapshot", snapshot)
+#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID_AND_TIMESTAMP(                    \
+    category_group, name, id, timestamp, snapshot)                            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                         \
+      TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name,                \
+      TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, \
+      TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
 
 #define TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group, name, id) \
   INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                  \
-      TRACE_EVENT_PHASE_DELETE_OBJECT, category_group, name, id,     \
-      TRACE_EVENT_FLAG_NONE)
+      TRACE_EVENT_PHASE_DELETE_OBJECT, category_group, name,         \
+      TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
 
 // Records entering and leaving trace event contexts. |category_group| and
 // |name| specify the context category and type. |context| is a
 // snapshotted context object id.
-#define TRACE_EVENT_ENTER_CONTEXT(category_group, name, context)      \
-  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                   \
-      TRACE_EVENT_PHASE_ENTER_CONTEXT, category_group, name, context, \
-      TRACE_EVENT_FLAG_NONE)
-#define TRACE_EVENT_LEAVE_CONTEXT(category_group, name, context)      \
-  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                   \
-      TRACE_EVENT_PHASE_LEAVE_CONTEXT, category_group, name, context, \
-      TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ENTER_CONTEXT(category_group, name, context) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                              \
+      TRACE_EVENT_PHASE_ENTER_CONTEXT, category_group, name,     \
+      TRACE_ID_DONT_MANGLE(context), TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_LEAVE_CONTEXT(category_group, name, context) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                              \
+      TRACE_EVENT_PHASE_LEAVE_CONTEXT, category_group, name,     \
+      TRACE_ID_DONT_MANGLE(context), TRACE_EVENT_FLAG_NONE)
 #define TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context) \
-  INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context)
-
-// Macro to specify that two trace IDs are identical. For example,
-// TRACE_LINK_IDS(
-//     "category", "name",
-//     TRACE_ID_WITH_SCOPE("net::URLRequest", 0x1000),
-//     TRACE_ID_WITH_SCOPE("blink::ResourceFetcher::FetchRequest", 0x2000))
-// tells the trace consumer that events with ID ("net::URLRequest", 0x1000) from
-// the current process have the same ID as events with ID
-// ("blink::ResourceFetcher::FetchRequest", 0x2000).
-#define TRACE_LINK_IDS(category_group, name, id, linked_id) \
-  INTERNAL_TRACE_EVENT_ADD_LINK_IDS(category_group, name, id, linked_id);
+  INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name,       \
+                                      TRACE_ID_DONT_MANGLE(context))
 
 // Macro to efficiently determine if a given category group is enabled.
 #define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret)             \
@@ -1037,13 +1052,11 @@
 #define TRACE_EVENT_PHASE_CLOCK_SYNC ('c')
 #define TRACE_EVENT_PHASE_ENTER_CONTEXT ('(')
 #define TRACE_EVENT_PHASE_LEAVE_CONTEXT (')')
-#define TRACE_EVENT_PHASE_LINK_IDS ('=')
 
 // Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
 #define TRACE_EVENT_FLAG_NONE (static_cast<unsigned int>(0))
 #define TRACE_EVENT_FLAG_COPY (static_cast<unsigned int>(1 << 0))
 #define TRACE_EVENT_FLAG_HAS_ID (static_cast<unsigned int>(1 << 1))
-// TODO(crbug.com/639003): Free this bit after ID mangling is deprecated.
 #define TRACE_EVENT_FLAG_MANGLE_ID (static_cast<unsigned int>(1 << 2))
 #define TRACE_EVENT_FLAG_SCOPE_OFFSET (static_cast<unsigned int>(1 << 3))
 #define TRACE_EVENT_FLAG_SCOPE_EXTRA (static_cast<unsigned int>(1 << 4))
@@ -1054,8 +1067,6 @@
 #define TRACE_EVENT_FLAG_FLOW_OUT (static_cast<unsigned int>(1 << 9))
 #define TRACE_EVENT_FLAG_HAS_CONTEXT_ID (static_cast<unsigned int>(1 << 10))
 #define TRACE_EVENT_FLAG_HAS_PROCESS_ID (static_cast<unsigned int>(1 << 11))
-#define TRACE_EVENT_FLAG_HAS_LOCAL_ID (static_cast<unsigned int>(1 << 12))
-#define TRACE_EVENT_FLAG_HAS_GLOBAL_ID (static_cast<unsigned int>(1 << 13))
 
 #define TRACE_EVENT_FLAG_SCOPE_MASK                          \
   (static_cast<unsigned int>(TRACE_EVENT_FLAG_SCOPE_OFFSET | \
diff --git a/base/trace_event/etw_manifest/etw_manifest.gyp b/base/trace_event/etw_manifest/etw_manifest.gyp
new file mode 100644
index 0000000..b2f0eb8
--- /dev/null
+++ b/base/trace_event/etw_manifest/etw_manifest.gyp
@@ -0,0 +1,41 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+  'targets': [
+    {
+      # GN version: //base/trace_event/etw_manifest/BUILD.gn
+      'target_name': 'etw_manifest',
+      'type': 'none',
+      'toolsets': ['host', 'target'],
+      'hard_dependency': 1,
+      'conditions': [
+        ['OS=="win"', {
+          'sources': [
+            'chrome_events_win.man',
+          ],
+          'variables': {
+            'man_output_dir': '<(SHARED_INTERMEDIATE_DIR)/base/trace_event/etw_manifest',
+          },
+          'rules': [{
+            # Rule to run the message compiler.
+            'rule_name': 'message_compiler',
+            'extension': 'man',
+            'outputs': [
+              '<(man_output_dir)/chrome_events_win.h',
+              '<(man_output_dir)/chrome_events_win.rc',
+            ],
+            'action': [
+              'mc.exe',
+              '-h', '<(man_output_dir)',
+              '-r', '<(man_output_dir)/.',
+              '-um',
+              '<(RULE_INPUT_PATH)',
+            ],
+            'message': 'Running message compiler on <(RULE_INPUT_PATH)',
+          }],
+        }],
+      ],
+    }
+  ]
+}
diff --git a/base/trace_event/event_name_filter.cc b/base/trace_event/event_name_filter.cc
deleted file mode 100644
index 8d0058c..0000000
--- a/base/trace_event/event_name_filter.cc
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/event_name_filter.h"
-
-#include "base/trace_event/trace_event_impl.h"
-
-namespace base {
-namespace trace_event {
-
-// static
-const char EventNameFilter::kName[] = "event_whitelist_predicate";
-
-EventNameFilter::EventNameFilter(
-    std::unique_ptr<EventNamesWhitelist> event_names_whitelist)
-    : event_names_whitelist_(std::move(event_names_whitelist)) {}
-
-EventNameFilter::~EventNameFilter() {}
-
-bool EventNameFilter::FilterTraceEvent(const TraceEvent& trace_event) const {
-  return event_names_whitelist_->count(trace_event.name()) != 0;
-}
-
-}  // namespace trace_event
-}  // namespace base
diff --git a/base/trace_event/event_name_filter.h b/base/trace_event/event_name_filter.h
deleted file mode 100644
index 19333b3..0000000
--- a/base/trace_event/event_name_filter.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TRACE_EVENT_EVENT_NAME_FILTER_H_
-#define BASE_TRACE_EVENT_EVENT_NAME_FILTER_H_
-
-#include <memory>
-#include <string>
-#include <unordered_set>
-
-#include "base/base_export.h"
-#include "base/macros.h"
-#include "base/trace_event/trace_event_filter.h"
-
-namespace base {
-namespace trace_event {
-
-class TraceEvent;
-
-// Filters trace events by checking the full name against a whitelist.
-// The current implementation is quite simple and dumb and just uses a
-// hashtable which requires char* to std::string conversion. It could be smarter
-// and use a bloom filter trie. However, today this is used too rarely to
-// justify that cost.
-class BASE_EXPORT EventNameFilter : public TraceEventFilter {
- public:
-  using EventNamesWhitelist = std::unordered_set<std::string>;
-  static const char kName[];
-
-  EventNameFilter(std::unique_ptr<EventNamesWhitelist>);
-  ~EventNameFilter() override;
-
-  // TraceEventFilter implementation.
-  bool FilterTraceEvent(const TraceEvent&) const override;
-
- private:
-  std::unique_ptr<const EventNamesWhitelist> event_names_whitelist_;
-
-  DISALLOW_COPY_AND_ASSIGN(EventNameFilter);
-};
-
-}  // namespace trace_event
-}  // namespace base
-
-#endif  // BASE_TRACE_EVENT_EVENT_NAME_FILTER_H_
diff --git a/base/trace_event/event_name_filter_unittest.cc b/base/trace_event/event_name_filter_unittest.cc
deleted file mode 100644
index 0bc2a4d..0000000
--- a/base/trace_event/event_name_filter_unittest.cc
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/event_name_filter.h"
-
-#include "base/memory/ptr_util.h"
-#include "base/trace_event/trace_event_impl.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-namespace trace_event {
-
-const TraceEvent& MakeTraceEvent(const char* name) {
-  static TraceEvent event;
-  event.Reset();
-  event.Initialize(0, TimeTicks(), ThreadTicks(), 'b', nullptr, name, "", 0, 0,
-                   0, nullptr, nullptr, nullptr, nullptr, 0);
-  return event;
-}
-
-TEST(TraceEventNameFilterTest, Whitelist) {
-  auto empty_whitelist = MakeUnique<EventNameFilter::EventNamesWhitelist>();
-  auto filter = MakeUnique<EventNameFilter>(std::move(empty_whitelist));
-
-  // No events should be filtered if the whitelist is empty.
-  EXPECT_FALSE(filter->FilterTraceEvent(MakeTraceEvent("foo")));
-
-  auto whitelist = MakeUnique<EventNameFilter::EventNamesWhitelist>();
-  whitelist->insert("foo");
-  whitelist->insert("bar");
-  filter = MakeUnique<EventNameFilter>(std::move(whitelist));
-  EXPECT_TRUE(filter->FilterTraceEvent(MakeTraceEvent("foo")));
-  EXPECT_FALSE(filter->FilterTraceEvent(MakeTraceEvent("fooz")));
-  EXPECT_FALSE(filter->FilterTraceEvent(MakeTraceEvent("afoo")));
-  EXPECT_TRUE(filter->FilterTraceEvent(MakeTraceEvent("bar")));
-  EXPECT_FALSE(filter->FilterTraceEvent(MakeTraceEvent("foobar")));
-}
-
-}  // namespace trace_event
-}  // namespace base
diff --git a/base/trace_event/heap_profiler_allocation_context_tracker.cc b/base/trace_event/heap_profiler_allocation_context_tracker.cc
index b47dc16..31f311a 100644
--- a/base/trace_event/heap_profiler_allocation_context_tracker.cc
+++ b/base/trace_event/heap_profiler_allocation_context_tracker.cc
@@ -29,6 +29,7 @@
 const size_t kMaxTaskDepth = 16u;
 AllocationContextTracker* const kInitializingSentinel =
     reinterpret_cast<AllocationContextTracker*>(-1);
+const char kTracingOverhead[] = "tracing_overhead";
 
 ThreadLocalStorage::StaticSlot g_tls_alloc_ctx_tracker = TLS_INITIALIZER;
 
@@ -107,17 +108,17 @@
 }
 
 void AllocationContextTracker::PushPseudoStackFrame(
-    AllocationContextTracker::PseudoStackFrame stack_frame) {
+    const char* trace_event_name) {
   // Impose a limit on the height to verify that every push is popped, because
   // in practice the pseudo stack never grows higher than ~20 frames.
   if (pseudo_stack_.size() < kMaxStackDepth)
-    pseudo_stack_.push_back(stack_frame);
+    pseudo_stack_.push_back(trace_event_name);
   else
     NOTREACHED();
 }
 
 void AllocationContextTracker::PopPseudoStackFrame(
-    AllocationContextTracker::PseudoStackFrame stack_frame) {
+    const char* trace_event_name) {
   // Guard for stack underflow. If tracing was started with a TRACE_EVENT in
   // scope, the frame was never pushed, so it is possible that pop is called
   // on an empty stack.
@@ -127,10 +128,8 @@
   // Assert that pushes and pops are nested correctly. This DCHECK can be
   // hit if some TRACE_EVENT macro is unbalanced (a TRACE_EVENT_END* call
   // without a corresponding TRACE_EVENT_BEGIN).
-  DCHECK(stack_frame == pseudo_stack_.back())
-      << "Encountered an unmatched TRACE_EVENT_END: "
-      << stack_frame.trace_event_name
-      << " vs event in stack: " << pseudo_stack_.back().trace_event_name;
+  DCHECK_EQ(trace_event_name, pseudo_stack_.back())
+      << "Encountered an unmatched TRACE_EVENT_END";
 
   pseudo_stack_.pop_back();
 }
@@ -156,15 +155,21 @@
 }
 
 // static
-bool AllocationContextTracker::GetContextSnapshot(AllocationContext* ctx) {
-  if (ignore_scope_depth_)
-    return false;
+AllocationContext AllocationContextTracker::GetContextSnapshot() {
+  AllocationContext ctx;
+
+  if (ignore_scope_depth_) {
+    ctx.backtrace.frames[0] = StackFrame::FromTraceEventName(kTracingOverhead);
+    ctx.type_name = kTracingOverhead;
+    ctx.backtrace.frame_count = 1;
+    return ctx;
+  }
 
   CaptureMode mode = static_cast<CaptureMode>(
       subtle::NoBarrier_Load(&capture_mode_));
 
-  auto* backtrace = std::begin(ctx->backtrace.frames);
-  auto* backtrace_end = std::end(ctx->backtrace.frames);
+  auto* backtrace = std::begin(ctx.backtrace.frames);
+  auto* backtrace_end = std::end(ctx.backtrace.frames);
 
   if (!thread_name_) {
     // Ignore the string allocation made by GetAndLeakThreadName to avoid
@@ -188,12 +193,11 @@
       }
     case CaptureMode::PSEUDO_STACK:
       {
-        for (const PseudoStackFrame& stack_frame : pseudo_stack_) {
+        for (const char* event_name: pseudo_stack_) {
           if (backtrace == backtrace_end) {
             break;
           }
-          *backtrace++ =
-              StackFrame::FromTraceEventName(stack_frame.trace_event_name);
+          *backtrace++ = StackFrame::FromTraceEventName(event_name);
         }
         break;
       }
@@ -218,32 +222,24 @@
 
         // Copy frames backwards
         size_t backtrace_capacity = backtrace_end - backtrace;
-        int32_t top_frame_index = (backtrace_capacity >= frame_count)
-                                      ? 0
-                                      : frame_count - backtrace_capacity;
-        for (int32_t i = frame_count - 1; i >= top_frame_index; --i) {
-          const void* frame = frames[i];
+        size_t top_frame_index = (backtrace_capacity >= frame_count) ?
+            0 :
+            frame_count - backtrace_capacity;
+        for (size_t i = frame_count; i > top_frame_index;) {
+          const void* frame = frames[--i];
           *backtrace++ = StackFrame::FromProgramCounter(frame);
         }
         break;
       }
   }
 
-  ctx->backtrace.frame_count = backtrace - std::begin(ctx->backtrace.frames);
+  ctx.backtrace.frame_count = backtrace - std::begin(ctx.backtrace.frames);
 
   // TODO(ssid): Fix crbug.com/594803 to add file name as 3rd dimension
   // (component name) in the heap profiler and not piggy back on the type name.
-  if (!task_contexts_.empty()) {
-    ctx->type_name = task_contexts_.back();
-  } else if (!pseudo_stack_.empty()) {
-    // If task context was unavailable, then the category names are taken from
-    // trace events.
-    ctx->type_name = pseudo_stack_.back().trace_event_category;
-  } else {
-    ctx->type_name = nullptr;
-  }
+  ctx.type_name = task_contexts_.empty() ? nullptr : task_contexts_.back();
 
-  return true;
+  return ctx;
 }
 
 }  // namespace trace_event
diff --git a/base/trace_event/heap_profiler_allocation_context_tracker.h b/base/trace_event/heap_profiler_allocation_context_tracker.h
index 4f2a8c9..454200c 100644
--- a/base/trace_event/heap_profiler_allocation_context_tracker.h
+++ b/base/trace_event/heap_profiler_allocation_context_tracker.h
@@ -10,6 +10,7 @@
 #include "base/atomicops.h"
 #include "base/base_export.h"
 #include "base/debug/stack_trace.h"
+#include "base/logging.h"
 #include "base/macros.h"
 #include "base/trace_event/heap_profiler_allocation_context.h"
 
@@ -29,17 +30,6 @@
     NATIVE_STACK    // GetContextSnapshot() returns native (real) stack trace
   };
 
-  // Stack frame constructed from trace events in codebase.
-  struct BASE_EXPORT PseudoStackFrame {
-    const char* trace_event_category;
-    const char* trace_event_name;
-
-    bool operator==(const PseudoStackFrame& other) const {
-      return trace_event_category == other.trace_event_category &&
-             trace_event_name == other.trace_event_name;
-    }
-  };
-
   // Globally sets capturing mode.
   // TODO(primiano): How to guard against *_STACK -> DISABLED -> *_STACK?
   static void SetCaptureMode(CaptureMode mode);
@@ -70,8 +60,8 @@
   static void SetCurrentThreadName(const char* name);
 
   // Starts and ends a new ignore scope between which the allocations are
-  // ignored by the heap profiler. GetContextSnapshot() returns false when
-  // allocations are ignored.
+  // ignored in the heap profiler. A dummy context that short circuits to
+  // "tracing_overhead" is returned for these allocations.
   void begin_ignore_scope() { ignore_scope_depth_++; }
   void end_ignore_scope() {
     if (ignore_scope_depth_)
@@ -79,19 +69,18 @@
   }
 
   // Pushes a frame onto the thread-local pseudo stack.
-  void PushPseudoStackFrame(PseudoStackFrame stack_frame);
+  void PushPseudoStackFrame(const char* trace_event_name);
 
   // Pops a frame from the thread-local pseudo stack.
-  void PopPseudoStackFrame(PseudoStackFrame stack_frame);
+  void PopPseudoStackFrame(const char* trace_event_name);
 
   // Push and pop current task's context. A stack is used to support nested
   // tasks and the top of the stack will be used in allocation context.
   void PushCurrentTaskContext(const char* context);
   void PopCurrentTaskContext(const char* context);
 
-  // Fills a snapshot of the current thread-local context. Doesn't fill and
-  // returns false if allocations are being ignored.
-  bool GetContextSnapshot(AllocationContext* snapshot);
+  // Returns a snapshot of the current thread-local context.
+  AllocationContext GetContextSnapshot();
 
   ~AllocationContextTracker();
 
@@ -101,7 +90,7 @@
   static subtle::Atomic32 capture_mode_;
 
   // The pseudo stack where frames are |TRACE_EVENT| names.
-  std::vector<PseudoStackFrame> pseudo_stack_;
+  std::vector<const char*> pseudo_stack_;
 
   // The thread name is used as the first entry in the pseudo stack.
   const char* thread_name_;
diff --git a/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc b/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
index 577f500..3064a6a 100644
--- a/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
+++ b/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
@@ -11,7 +11,6 @@
 #include "base/trace_event/heap_profiler.h"
 #include "base/trace_event/heap_profiler_allocation_context.h"
 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
-#include "base/trace_event/memory_dump_manager.h"
 #include "base/trace_event/trace_event.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -27,25 +26,13 @@
 const char kFroyo[] = "Froyo";
 const char kGingerbread[] = "Gingerbread";
 
-const char kFilteringTraceConfig[] =
-    "{"
-    "  \"event_filters\": ["
-    "    {"
-    "      \"excluded_categories\": [],"
-    "      \"filter_args\": {},"
-    "      \"filter_predicate\": \"heap_profiler_predicate\","
-    "      \"included_categories\": [\"*\"]"
-    "    }"
-    "  ]"
-    "}";
-
 // Asserts that the fixed-size array |expected_backtrace| matches the backtrace
 // in |AllocationContextTracker::GetContextSnapshot|.
 template <size_t N>
 void AssertBacktraceEquals(const StackFrame(&expected_backtrace)[N]) {
-  AllocationContext ctx;
-  ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
-                  ->GetContextSnapshot(&ctx));
+  AllocationContext ctx =
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->GetContextSnapshot();
 
   auto* actual = std::begin(ctx.backtrace.frames);
   auto* actual_bottom = actual + ctx.backtrace.frame_count;
@@ -65,9 +52,9 @@
 
 void AssertBacktraceContainsOnlyThreadName() {
   StackFrame t = StackFrame::FromThreadName(kThreadName);
-  AllocationContext ctx;
-  ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
-                  ->GetContextSnapshot(&ctx));
+  AllocationContext ctx =
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->GetContextSnapshot();
 
   ASSERT_EQ(1u, ctx.backtrace.frame_count);
   ASSERT_EQ(t, ctx.backtrace.frames[0]);
@@ -76,19 +63,17 @@
 class AllocationContextTrackerTest : public testing::Test {
  public:
   void SetUp() override {
+    TraceConfig config("");
+    TraceLog::GetInstance()->SetEnabled(config, TraceLog::RECORDING_MODE);
     AllocationContextTracker::SetCaptureMode(
         AllocationContextTracker::CaptureMode::PSEUDO_STACK);
-    // Enabling memory-infra category sets default memory dump config which
-    // includes filters for capturing pseudo stack.
-    TraceConfig config(kFilteringTraceConfig);
-    TraceLog::GetInstance()->SetEnabled(config, TraceLog::FILTERING_MODE);
     AllocationContextTracker::SetCurrentThreadName(kThreadName);
   }
 
   void TearDown() override {
     AllocationContextTracker::SetCaptureMode(
         AllocationContextTracker::CaptureMode::DISABLED);
-    TraceLog::GetInstance()->SetDisabled(TraceLog::FILTERING_MODE);
+    TraceLog::GetInstance()->SetDisabled();
   }
 };
 
@@ -121,12 +106,6 @@
       AssertBacktraceEquals(frame_ce);
     }
 
-    {
-      TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("Testing"), kCupcake);
-      StackFrame frame_cc[] = {t, c, c};
-      AssertBacktraceEquals(frame_cc);
-    }
-
     AssertBacktraceEquals(frame_c);
   }
 
@@ -243,9 +222,9 @@
 
   {
     TRACE_EVENT0("Testing", kGingerbread);
-    AllocationContext ctx;
-    ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
-                    ->GetContextSnapshot(&ctx));
+    AllocationContext ctx =
+        AllocationContextTracker::GetInstanceForCurrentThread()
+            ->GetContextSnapshot();
 
     // The pseudo stack relies on pointer equality, not deep string comparisons.
     ASSERT_EQ(t, ctx.backtrace.frames[0]);
@@ -254,54 +233,38 @@
   }
 
   {
-    AllocationContext ctx;
-    ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
-                    ->GetContextSnapshot(&ctx));
+    AllocationContext ctx =
+        AllocationContextTracker::GetInstanceForCurrentThread()
+            ->GetContextSnapshot();
     ASSERT_EQ(t, ctx.backtrace.frames[0]);
     ASSERT_EQ(c, ctx.backtrace.frames[1]);
     ASSERT_EQ(f, ctx.backtrace.frames[11]);
   }
 }
 
-TEST_F(AllocationContextTrackerTest, TrackCategoryName) {
+TEST_F(AllocationContextTrackerTest, TrackTaskContext) {
   const char kContext1[] = "context1";
   const char kContext2[] = "context2";
   {
     // The context from the scoped task event should be used as type name.
     TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION event1(kContext1);
-    AllocationContext ctx1;
-    ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
-                    ->GetContextSnapshot(&ctx1));
+    AllocationContext ctx1 =
+        AllocationContextTracker::GetInstanceForCurrentThread()
+            ->GetContextSnapshot();
     ASSERT_EQ(kContext1, ctx1.type_name);
 
     // In case of nested events, the last event's context should be used.
     TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION event2(kContext2);
-    AllocationContext ctx2;
-    ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
-                    ->GetContextSnapshot(&ctx2));
+    AllocationContext ctx2 =
+        AllocationContextTracker::GetInstanceForCurrentThread()
+            ->GetContextSnapshot();
     ASSERT_EQ(kContext2, ctx2.type_name);
   }
 
-  {
-    // Type should be category name of the last seen trace event.
-    TRACE_EVENT0("Testing", kCupcake);
-    AllocationContext ctx1;
-    ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
-                    ->GetContextSnapshot(&ctx1));
-    ASSERT_EQ("Testing", std::string(ctx1.type_name));
-
-    TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("Testing"), kCupcake);
-    AllocationContext ctx2;
-    ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
-                    ->GetContextSnapshot(&ctx2));
-    ASSERT_EQ(TRACE_DISABLED_BY_DEFAULT("Testing"),
-              std::string(ctx2.type_name));
-  }
-
   // Type should be nullptr without task event.
-  AllocationContext ctx;
-  ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
-                  ->GetContextSnapshot(&ctx));
+  AllocationContext ctx =
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->GetContextSnapshot();
   ASSERT_FALSE(ctx.type_name);
 }
 
@@ -309,9 +272,13 @@
   TRACE_EVENT0("Testing", kCupcake);
   TRACE_EVENT0("Testing", kDonut);
   HEAP_PROFILER_SCOPED_IGNORE;
-  AllocationContext ctx;
-  ASSERT_FALSE(AllocationContextTracker::GetInstanceForCurrentThread()
-                   ->GetContextSnapshot(&ctx));
+  AllocationContext ctx =
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->GetContextSnapshot();
+  const StringPiece kTracingOverhead("tracing_overhead");
+  ASSERT_EQ(kTracingOverhead,
+            static_cast<const char*>(ctx.backtrace.frames[0].value));
+  ASSERT_EQ(1u, ctx.backtrace.frame_count);
 }
 
 }  // namespace trace_event
diff --git a/base/trace_event/heap_profiler_allocation_register.cc b/base/trace_event/heap_profiler_allocation_register.cc
index 63d4061..2c2cd37 100644
--- a/base/trace_event/heap_profiler_allocation_register.cc
+++ b/base/trace_event/heap_profiler_allocation_register.cc
@@ -60,12 +60,12 @@
   // The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has
   // been chosen carefully based on measurements with real-word data (addresses
   // recorded from a Chrome trace run). It is the first prime after 2^17. For
-  // |shift|, 15 yield good results for both 2^18 and 2^19 bucket sizes.
-  // Microbenchmarks show that this simple scheme outperforms fancy hashes like
-  // Murmur3 by 20 to 40 percent.
+  // |shift|, 13, 14 and 15 yield good results. These values are tuned to 2^18
+  // buckets. Microbenchmarks show that this simple scheme outperforms fancy
+  // hashes like Murmur3 by 20 to 40 percent.
   const uintptr_t key = reinterpret_cast<uintptr_t>(address);
   const uintptr_t a = 131101;
-  const uintptr_t shift = 15;
+  const uintptr_t shift = 14;
   const uintptr_t h = (key * a) >> shift;
   return h;
 }
diff --git a/base/trace_event/heap_profiler_allocation_register.h b/base/trace_event/heap_profiler_allocation_register.h
index d6a02fa..86e2721 100644
--- a/base/trace_event/heap_profiler_allocation_register.h
+++ b/base/trace_event/heap_profiler_allocation_register.h
@@ -16,7 +16,6 @@
 #include "base/process/process_metrics.h"
 #include "base/template_util.h"
 #include "base/trace_event/heap_profiler_allocation_context.h"
-#include "build/build_config.h"
 
 namespace base {
 namespace trace_event {
@@ -46,7 +45,8 @@
   using KVPair = std::pair<const Key, Value>;
 
   // For implementation simplicity API uses integer index instead
-  // of iterators. Most operations (except Find) on KVIndex are O(1).
+  // of iterators. Most operations (except FindValidIndex) on KVIndex
+  // are O(1).
   using KVIndex = size_t;
   static const KVIndex kInvalidKVIndex = static_cast<KVIndex>(-1);
 
@@ -199,9 +199,7 @@
     // the simplest solution is to just allocate a humongous chunk of address
     // space.
 
-    CHECK_LT(next_unused_cell_, num_cells_ + 1)
-        << "Allocation Register hash table has too little capacity. Increase "
-           "the capacity to run heap profiler in large sessions.";
+    DCHECK_LT(next_unused_cell_, num_cells_ + 1);
 
     return &cells_[idx];
   }
@@ -302,25 +300,15 @@
  private:
   friend AllocationRegisterTest;
 
-// Expect lower number of allocations from mobile platforms. Load factor
-// (capacity / bucket count) is kept less than 10 for optimal hashing. The
-// number of buckets should be changed together with AddressHasher.
-#if defined(OS_ANDROID) || defined(OS_IOS)
+  // Expect max 1.5M allocations. Number of buckets is 2^18 for optimal
+  // hashing and should be changed together with AddressHasher.
   static const size_t kAllocationBuckets = 1 << 18;
   static const size_t kAllocationCapacity = 1500000;
-#else
-  static const size_t kAllocationBuckets = 1 << 19;
-  static const size_t kAllocationCapacity = 5000000;
-#endif
 
-  // 2^16 works well with BacktraceHasher. When increasing this number make
-  // sure BacktraceHasher still produces low number of collisions.
-  static const size_t kBacktraceBuckets = 1 << 16;
-#if defined(OS_ANDROID)
-  static const size_t kBacktraceCapacity = 32000;  // 22K was observed
-#else
-  static const size_t kBacktraceCapacity = 55000;  // 45K was observed on Linux
-#endif
+  // Expect max 2^15 unique backtraces. Can be changed to 2^16 without
+  // needing to tweak BacktraceHasher implementation.
+  static const size_t kBacktraceBuckets = 1 << 15;
+  static const size_t kBacktraceCapacity = kBacktraceBuckets;
 
   struct BacktraceHasher {
     size_t operator () (const Backtrace& backtrace) const;
diff --git a/base/trace_event/heap_profiler_event_filter.cc b/base/trace_event/heap_profiler_event_filter.cc
deleted file mode 100644
index 6c91c91..0000000
--- a/base/trace_event/heap_profiler_event_filter.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/heap_profiler_event_filter.h"
-
-#include "base/trace_event/category_registry.h"
-#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
-#include "base/trace_event/trace_category.h"
-#include "base/trace_event/trace_event.h"
-#include "base/trace_event/trace_event_impl.h"
-
-namespace base {
-namespace trace_event {
-
-namespace {
-
-inline bool IsPseudoStackEnabled() {
-  return AllocationContextTracker::capture_mode() ==
-         AllocationContextTracker::CaptureMode::PSEUDO_STACK;
-}
-
-inline AllocationContextTracker* GetThreadLocalTracker() {
-  return AllocationContextTracker::GetInstanceForCurrentThread();
-}
-
-}  // namespace
-
-// static
-const char HeapProfilerEventFilter::kName[] = "heap_profiler_predicate";
-
-HeapProfilerEventFilter::HeapProfilerEventFilter() {}
-HeapProfilerEventFilter::~HeapProfilerEventFilter() {}
-
-bool HeapProfilerEventFilter::FilterTraceEvent(
-    const TraceEvent& trace_event) const {
-  if (!IsPseudoStackEnabled())
-    return true;
-
-  // TODO(primiano): Add support for events with copied name crbug.com/581079.
-  if (trace_event.flags() & TRACE_EVENT_FLAG_COPY)
-    return true;
-
-  const auto* category = CategoryRegistry::GetCategoryByStatePtr(
-      trace_event.category_group_enabled());
-  AllocationContextTracker::PseudoStackFrame frame = {category->name(),
-                                                      trace_event.name()};
-  if (trace_event.phase() == TRACE_EVENT_PHASE_BEGIN ||
-      trace_event.phase() == TRACE_EVENT_PHASE_COMPLETE) {
-    GetThreadLocalTracker()->PushPseudoStackFrame(frame);
-  } else if (trace_event.phase() == TRACE_EVENT_PHASE_END) {
-    // The pop for |TRACE_EVENT_PHASE_COMPLETE| events is in |EndEvent|.
-    GetThreadLocalTracker()->PopPseudoStackFrame(frame);
-  }
-  // Do not filter-out any events and always return true. TraceLog adds the
-  // event only if it is enabled for recording.
-  return true;
-}
-
-void HeapProfilerEventFilter::EndEvent(const char* category_name,
-                                       const char* event_name) const {
-  if (IsPseudoStackEnabled())
-    GetThreadLocalTracker()->PopPseudoStackFrame({category_name, event_name});
-}
-
-}  // namespace trace_event
-}  // namespace base
diff --git a/base/trace_event/heap_profiler_event_filter.h b/base/trace_event/heap_profiler_event_filter.h
deleted file mode 100644
index 47368a1..0000000
--- a/base/trace_event/heap_profiler_event_filter.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_EVENT_FILTER_H_
-#define BASE_TRACE_EVENT_HEAP_PROFILER_EVENT_FILTER_H_
-
-#include "base/base_export.h"
-#include "base/macros.h"
-#include "base/trace_event/trace_event_filter.h"
-
-namespace base {
-namespace trace_event {
-
-class TraceEvent;
-
-// This filter unconditionally accepts all events and pushes/pops them from the
-// thread-local AllocationContextTracker instance as they are seen.
-// This is used to cheaply construct the heap profiler pseudo stack without
-// having to actually record all events.
-class BASE_EXPORT HeapProfilerEventFilter : public TraceEventFilter {
- public:
-  static const char kName[];
-
-  HeapProfilerEventFilter();
-  ~HeapProfilerEventFilter() override;
-
-  // TraceEventFilter implementation.
-  bool FilterTraceEvent(const TraceEvent& trace_event) const override;
-  void EndEvent(const char* category_name,
-                const char* event_name) const override;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(HeapProfilerEventFilter);
-};
-
-}  // namespace trace_event
-}  // namespace base
-
-#endif  // BASE_TRACE_EVENT_HEAP_PROFILER_EVENT_FILTER_H_
diff --git a/base/trace_event/heap_profiler_heap_dump_writer.cc b/base/trace_event/heap_profiler_heap_dump_writer.cc
index 8043fff..1bf06db 100644
--- a/base/trace_event/heap_profiler_heap_dump_writer.cc
+++ b/base/trace_event/heap_profiler_heap_dump_writer.cc
@@ -314,7 +314,8 @@
   internal::HeapDumpWriter writer(
       session_state.stack_frame_deduplicator(),
       session_state.type_name_deduplicator(),
-      session_state.heap_profiler_breakdown_threshold_bytes());
+      session_state.memory_dump_config().heap_profiler_options
+          .breakdown_threshold_bytes);
   return Serialize(writer.Summarize(metrics_by_context));
 }
 
diff --git a/base/trace_event/heap_profiler_stack_frame_deduplicator.cc b/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
index fc5da0d..49a2350 100644
--- a/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
+++ b/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
@@ -11,7 +11,6 @@
 #include <utility>
 
 #include "base/strings/stringprintf.h"
-#include "base/trace_event/memory_usage_estimator.h"
 #include "base/trace_event/trace_event_argument.h"
 #include "base/trace_event/trace_event_memory_overhead.h"
 
@@ -24,10 +23,6 @@
 StackFrameDeduplicator::FrameNode::FrameNode(const FrameNode& other) = default;
 StackFrameDeduplicator::FrameNode::~FrameNode() {}
 
-size_t StackFrameDeduplicator::FrameNode::EstimateMemoryUsage() const {
-  return base::trace_event::EstimateMemoryUsage(children);
-}
-
 StackFrameDeduplicator::StackFrameDeduplicator() {}
 StackFrameDeduplicator::~StackFrameDeduplicator() {}
 
@@ -121,10 +116,19 @@
 
 void StackFrameDeduplicator::EstimateTraceMemoryOverhead(
     TraceEventMemoryOverhead* overhead) {
-  size_t memory_usage =
-      EstimateMemoryUsage(frames_) + EstimateMemoryUsage(roots_);
+  // The sizes here are only estimates; they fail to take into account the
+  // overhead of the tree nodes for the map, but as an estimate this should be
+  // fine.
+  size_t maps_size = roots_.size() * sizeof(std::pair<StackFrame, int>);
+  size_t frames_allocated = frames_.capacity() * sizeof(FrameNode);
+  size_t frames_resident = frames_.size() * sizeof(FrameNode);
+
+  for (const FrameNode& node : frames_)
+    maps_size += node.children.size() * sizeof(std::pair<StackFrame, int>);
+
   overhead->Add("StackFrameDeduplicator",
-                sizeof(StackFrameDeduplicator) + memory_usage);
+                sizeof(StackFrameDeduplicator) + maps_size + frames_allocated,
+                sizeof(StackFrameDeduplicator) + maps_size + frames_resident);
 }
 
 }  // namespace trace_event
diff --git a/base/trace_event/heap_profiler_stack_frame_deduplicator.h b/base/trace_event/heap_profiler_stack_frame_deduplicator.h
index 66d430f..4932534 100644
--- a/base/trace_event/heap_profiler_stack_frame_deduplicator.h
+++ b/base/trace_event/heap_profiler_stack_frame_deduplicator.h
@@ -34,8 +34,6 @@
     FrameNode(const FrameNode& other);
     ~FrameNode();
 
-    size_t EstimateMemoryUsage() const;
-
     StackFrame frame;
 
     // The index of the parent stack frame in |frames_|, or -1 if there is no
diff --git a/base/trace_event/heap_profiler_type_name_deduplicator.cc b/base/trace_event/heap_profiler_type_name_deduplicator.cc
index a6dab51..055f86a 100644
--- a/base/trace_event/heap_profiler_type_name_deduplicator.cc
+++ b/base/trace_event/heap_profiler_type_name_deduplicator.cc
@@ -10,10 +10,7 @@
 #include <utility>
 
 #include "base/json/string_escape.h"
-#include "base/strings/string_split.h"
 #include "base/strings/stringprintf.h"
-#include "base/trace_event/memory_usage_estimator.h"
-#include "base/trace_event/trace_event.h"
 #include "base/trace_event/trace_event_memory_overhead.h"
 
 namespace base {
@@ -21,24 +18,16 @@
 
 namespace {
 
-// If |type_name| is file name then extract directory name. Or if |type_name| is
-// category name, then disambiguate multple categories and remove
-// "disabled-by-default" prefix if present.
-StringPiece ExtractCategoryFromTypeName(const char* type_name) {
+// Extract directory name if |type_name| was file name. Otherwise, return
+// |type_name|.
+StringPiece ExtractDirNameFromFileName(const char* type_name) {
   StringPiece result(type_name);
   size_t last_seperator = result.find_last_of("\\/");
 
   // If |type_name| was a not a file path, the seperator will not be found, so
   // the whole type name is returned.
-  if (last_seperator == StringPiece::npos) {
-    // Use the first the category name if it has ",".
-    size_t first_comma_position = result.find(',');
-    if (first_comma_position != StringPiece::npos)
-      result = result.substr(0, first_comma_position);
-    if (result.starts_with(TRACE_DISABLED_BY_DEFAULT("")))
-      result.remove_prefix(sizeof(TRACE_DISABLED_BY_DEFAULT("")) - 1);
+  if (last_seperator == StringPiece::npos)
     return result;
-  }
 
   // Remove the file name from the path.
   result.remove_suffix(result.length() - last_seperator);
@@ -93,7 +82,7 @@
 
     // TODO(ssid): crbug.com/594803 the type name is misused for file name in
     // some cases.
-    StringPiece type_info = ExtractCategoryFromTypeName(it->first);
+    StringPiece type_info = ExtractDirNameFromFileName(it->first);
 
     // |EscapeJSONString| appends, it does not overwrite |buffer|.
     bool put_in_quotes = true;
@@ -106,9 +95,12 @@
 
 void TypeNameDeduplicator::EstimateTraceMemoryOverhead(
     TraceEventMemoryOverhead* overhead) {
-  size_t memory_usage = EstimateMemoryUsage(type_ids_);
+  // The size here is only an estimate; it fails to take into account the size
+  // of the tree nodes for the map, but as an estimate this should be fine.
+  size_t map_size = type_ids_.size() * sizeof(std::pair<const char*, int>);
+
   overhead->Add("TypeNameDeduplicator",
-                sizeof(TypeNameDeduplicator) + memory_usage);
+                sizeof(TypeNameDeduplicator) + map_size);
 }
 
 }  // namespace trace_event
diff --git a/base/trace_event/malloc_dump_provider.cc b/base/trace_event/malloc_dump_provider.cc
index 3565b8b..c3d3258 100644
--- a/base/trace_event/malloc_dump_provider.cc
+++ b/base/trace_event/malloc_dump_provider.cc
@@ -9,7 +9,6 @@
 #include "base/allocator/allocator_extension.h"
 #include "base/allocator/allocator_shim.h"
 #include "base/allocator/features.h"
-#include "base/debug/profiler.h"
 #include "base/trace_event/heap_profiler_allocation_context.h"
 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
 #include "base/trace_event/heap_profiler_allocation_register.h"
@@ -23,32 +22,26 @@
 #else
 #include <malloc.h>
 #endif
-#if defined(OS_WIN)
-#include <windows.h>
-#endif
 
 namespace base {
 namespace trace_event {
 
-namespace {
 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
+namespace {
 
 using allocator::AllocatorDispatch;
 
-void* HookAlloc(const AllocatorDispatch* self, size_t size, void* context) {
+void* HookAlloc(const AllocatorDispatch* self, size_t size) {
   const AllocatorDispatch* const next = self->next;
-  void* ptr = next->alloc_function(next, size, context);
+  void* ptr = next->alloc_function(next, size);
   if (ptr)
     MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
   return ptr;
 }
 
-void* HookZeroInitAlloc(const AllocatorDispatch* self,
-                        size_t n,
-                        size_t size,
-                        void* context) {
+void* HookZeroInitAlloc(const AllocatorDispatch* self, size_t n, size_t size) {
   const AllocatorDispatch* const next = self->next;
-  void* ptr = next->alloc_zero_initialized_function(next, n, size, context);
+  void* ptr = next->alloc_zero_initialized_function(next, n, size);
   if (ptr)
     MallocDumpProvider::GetInstance()->InsertAllocation(ptr, n * size);
   return ptr;
@@ -56,127 +49,41 @@
 
 void* HookllocAligned(const AllocatorDispatch* self,
                       size_t alignment,
-                      size_t size,
-                      void* context) {
+                      size_t size) {
   const AllocatorDispatch* const next = self->next;
-  void* ptr = next->alloc_aligned_function(next, alignment, size, context);
+  void* ptr = next->alloc_aligned_function(next, alignment, size);
   if (ptr)
     MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
   return ptr;
 }
 
-void* HookRealloc(const AllocatorDispatch* self,
-                  void* address,
-                  size_t size,
-                  void* context) {
+void* HookRealloc(const AllocatorDispatch* self, void* address, size_t size) {
   const AllocatorDispatch* const next = self->next;
-  void* ptr = next->realloc_function(next, address, size, context);
+  void* ptr = next->realloc_function(next, address, size);
   MallocDumpProvider::GetInstance()->RemoveAllocation(address);
   if (size > 0)  // realloc(size == 0) means free().
     MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
   return ptr;
 }
 
-void HookFree(const AllocatorDispatch* self, void* address, void* context) {
+void HookFree(const AllocatorDispatch* self, void* address) {
   if (address)
     MallocDumpProvider::GetInstance()->RemoveAllocation(address);
   const AllocatorDispatch* const next = self->next;
-  next->free_function(next, address, context);
-}
-
-size_t HookGetSizeEstimate(const AllocatorDispatch* self,
-                           void* address,
-                           void* context) {
-  const AllocatorDispatch* const next = self->next;
-  return next->get_size_estimate_function(next, address, context);
-}
-
-unsigned HookBatchMalloc(const AllocatorDispatch* self,
-                         size_t size,
-                         void** results,
-                         unsigned num_requested,
-                         void* context) {
-  const AllocatorDispatch* const next = self->next;
-  unsigned count =
-      next->batch_malloc_function(next, size, results, num_requested, context);
-  for (unsigned i = 0; i < count; ++i) {
-    MallocDumpProvider::GetInstance()->InsertAllocation(results[i], size);
-  }
-  return count;
-}
-
-void HookBatchFree(const AllocatorDispatch* self,
-                   void** to_be_freed,
-                   unsigned num_to_be_freed,
-                   void* context) {
-  const AllocatorDispatch* const next = self->next;
-  for (unsigned i = 0; i < num_to_be_freed; ++i) {
-    MallocDumpProvider::GetInstance()->RemoveAllocation(to_be_freed[i]);
-  }
-  next->batch_free_function(next, to_be_freed, num_to_be_freed, context);
-}
-
-void HookFreeDefiniteSize(const AllocatorDispatch* self,
-                          void* ptr,
-                          size_t size,
-                          void* context) {
-  if (ptr)
-    MallocDumpProvider::GetInstance()->RemoveAllocation(ptr);
-  const AllocatorDispatch* const next = self->next;
-  next->free_definite_size_function(next, ptr, size, context);
+  next->free_function(next, address);
 }
 
 AllocatorDispatch g_allocator_hooks = {
-    &HookAlloc,            /* alloc_function */
-    &HookZeroInitAlloc,    /* alloc_zero_initialized_function */
-    &HookllocAligned,      /* alloc_aligned_function */
-    &HookRealloc,          /* realloc_function */
-    &HookFree,             /* free_function */
-    &HookGetSizeEstimate,  /* get_size_estimate_function */
-    &HookBatchMalloc,      /* batch_malloc_function */
-    &HookBatchFree,        /* batch_free_function */
-    &HookFreeDefiniteSize, /* free_definite_size_function */
-    nullptr,               /* next */
-};
-#endif  // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
-
-#if defined(OS_WIN)
-// A structure containing some information about a given heap.
-struct WinHeapInfo {
-  size_t committed_size;
-  size_t uncommitted_size;
-  size_t allocated_size;
-  size_t block_count;
+    &HookAlloc,         /* alloc_function */
+    &HookZeroInitAlloc, /* alloc_zero_initialized_function */
+    &HookllocAligned,   /* alloc_aligned_function */
+    &HookRealloc,       /* realloc_function */
+    &HookFree,          /* free_function */
+    nullptr,            /* next */
 };
 
-// NOTE: crbug.com/665516
-// Unfortunately, there is no safe way to collect information from secondary
-// heaps due to limitations and racy nature of this piece of WinAPI.
-void WinHeapMemoryDumpImpl(WinHeapInfo* crt_heap_info) {
-#if defined(SYZYASAN)
-  if (base::debug::IsBinaryInstrumented())
-    return;
-#endif
-
-  // Iterate through whichever heap our CRT is using.
-  HANDLE crt_heap = reinterpret_cast<HANDLE>(_get_heap_handle());
-  ::HeapLock(crt_heap);
-  PROCESS_HEAP_ENTRY heap_entry;
-  heap_entry.lpData = nullptr;
-  // Walk over all the entries in the main heap.
-  while (::HeapWalk(crt_heap, &heap_entry) != FALSE) {
-    if ((heap_entry.wFlags & PROCESS_HEAP_ENTRY_BUSY) != 0) {
-      crt_heap_info->allocated_size += heap_entry.cbData;
-      crt_heap_info->block_count++;
-    } else if ((heap_entry.wFlags & PROCESS_HEAP_REGION) != 0) {
-      crt_heap_info->committed_size += heap_entry.Region.dwCommittedSize;
-      crt_heap_info->uncommitted_size += heap_entry.Region.dwUnCommittedSize;
-    }
-  }
-  CHECK(::HeapUnlock(crt_heap) == TRUE);
-}
-#endif  // defined(OS_WIN)
 }  // namespace
+#endif  // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
 
 // static
 const char MallocDumpProvider::kAllocatedObjects[] = "malloc/allocated_objects";
@@ -199,7 +106,6 @@
   size_t total_virtual_size = 0;
   size_t resident_size = 0;
   size_t allocated_objects_size = 0;
-  size_t allocated_objects_count = 0;
 #if defined(USE_TCMALLOC)
   bool res =
       allocator::GetNumericProperty("generic.heap_size", &total_virtual_size);
@@ -211,35 +117,18 @@
                                       &allocated_objects_size);
   DCHECK(res);
 #elif defined(OS_MACOSX) || defined(OS_IOS)
-  malloc_statistics_t stats = {0};
+  malloc_statistics_t stats;
+  memset(&stats, 0, sizeof(stats));
   malloc_zone_statistics(nullptr, &stats);
   total_virtual_size = stats.size_allocated;
   allocated_objects_size = stats.size_in_use;
 
-  // Resident size is approximated pretty well by stats.max_size_in_use.
-  // However, on macOS, freed blocks are both resident and reusable, which is
-  // semantically equivalent to deallocated. The implementation of libmalloc
-  // will also only hold a fixed number of freed regions before actually
-  // starting to deallocate them, so stats.max_size_in_use is also not
-  // representative of the peak size. As a result, stats.max_size_in_use is
-  // typically somewhere between actually resident [non-reusable] pages, and
-  // peak size. This is not very useful, so we just use stats.size_in_use for
-  // resident_size, even though it's an underestimate and fails to account for
-  // fragmentation. See
-  // https://bugs.chromium.org/p/chromium/issues/detail?id=695263#c1.
-  resident_size = stats.size_in_use;
-#elif defined(OS_WIN)
-  WinHeapInfo main_heap_info = {};
-  WinHeapMemoryDumpImpl(&main_heap_info);
-  total_virtual_size =
-      main_heap_info.committed_size + main_heap_info.uncommitted_size;
-  // Resident size is approximated with committed heap size. Note that it is
-  // possible to do this with better accuracy on windows by intersecting the
-  // working set with the virtual memory ranges occuipied by the heap. It's not
-  // clear that this is worth it, as it's fairly expensive to do.
-  resident_size = main_heap_info.committed_size;
-  allocated_objects_size = main_heap_info.allocated_size;
-  allocated_objects_count = main_heap_info.block_count;
+  // The resident size is approximated to the max size in use, which would count
+  // the total size of all regions other than the free bytes at the end of each
+  // region. In each allocation region the allocations are rounded off to a
+  // fixed quantum, so the excess region will not be resident.
+  // See crrev.com/1531463004 for detailed explanation.
+  resident_size = stats.max_size_in_use;
 #else
   struct mallinfo info = mallinfo();
   DCHECK_GE(info.arena + info.hblkhd, info.uordblks);
@@ -249,8 +138,6 @@
   // |arena| + |hblkhd|. For more details see link: http://goo.gl/fMR8lF.
   total_virtual_size = info.arena + info.hblkhd;
   resident_size = info.uordblks;
-
-  // Total allocated space is given by |uordblks|.
   allocated_objects_size = info.uordblks;
 #endif
 
@@ -260,17 +147,13 @@
   outer_dump->AddScalar(MemoryAllocatorDump::kNameSize,
                         MemoryAllocatorDump::kUnitsBytes, resident_size);
 
+  // Total allocated space is given by |uordblks|.
   MemoryAllocatorDump* inner_dump = pmd->CreateAllocatorDump(kAllocatedObjects);
   inner_dump->AddScalar(MemoryAllocatorDump::kNameSize,
                         MemoryAllocatorDump::kUnitsBytes,
                         allocated_objects_size);
-  if (allocated_objects_count != 0) {
-    inner_dump->AddScalar(MemoryAllocatorDump::kNameObjectCount,
-                          MemoryAllocatorDump::kUnitsObjects,
-                          allocated_objects_count);
-  }
 
-  if (resident_size > allocated_objects_size) {
+  if (resident_size - allocated_objects_size > 0) {
     // Explicitly specify why is extra memory resident. In tcmalloc it accounts
     // for free lists and caches. In mac and ios it accounts for the
     // fragmentation and metadata.
@@ -350,10 +233,7 @@
   auto* tracker = AllocationContextTracker::GetInstanceForCurrentThread();
   if (!tracker)
     return;
-
-  AllocationContext context;
-  if (!tracker->GetContextSnapshot(&context))
-    return;
+  AllocationContext context = tracker->GetContextSnapshot();
 
   AutoLock lock(allocation_register_lock_);
   if (!allocation_register_)
diff --git a/base/trace_event/malloc_dump_provider.h b/base/trace_event/malloc_dump_provider.h
index 384033c..4746cf5 100644
--- a/base/trace_event/malloc_dump_provider.h
+++ b/base/trace_event/malloc_dump_provider.h
@@ -15,7 +15,7 @@
 #include "base/trace_event/memory_dump_provider.h"
 #include "build/build_config.h"
 
-#if defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_WIN) || \
+#if defined(OS_LINUX) || defined(OS_ANDROID) || \
     (defined(OS_MACOSX) && !defined(OS_IOS))
 #define MALLOC_MEMORY_TRACING_SUPPORTED
 #endif
diff --git a/base/trace_event/memory_allocator_dump.h b/base/trace_event/memory_allocator_dump.h
index c781f07..7d10236 100644
--- a/base/trace_event/memory_allocator_dump.h
+++ b/base/trace_event/memory_allocator_dump.h
@@ -19,6 +19,7 @@
 namespace base {
 namespace trace_event {
 
+class MemoryDumpManager;
 class ProcessMemoryDump;
 class TracedValue;
 
@@ -69,6 +70,11 @@
   // Called at trace generation time to populate the TracedValue.
   void AsValueInto(TracedValue* value) const;
 
+  // Get the ProcessMemoryDump instance that owns this.
+  ProcessMemoryDump* process_memory_dump() const {
+    return process_memory_dump_;
+  }
+
   // Use enum Flags to set values.
   void set_flags(int flags) { flags_ |= flags; }
   void clear_flags(int flags) { flags_ &= ~flags; }
diff --git a/base/trace_event/memory_dump_manager.cc b/base/trace_event/memory_dump_manager.cc
index 5a54a77..eed070a 100644
--- a/base/trace_event/memory_dump_manager.cc
+++ b/base/trace_event/memory_dump_manager.cc
@@ -7,26 +7,21 @@
 #include <algorithm>
 #include <utility>
 
-#include "base/allocator/features.h"
 #include "base/atomic_sequence_num.h"
 #include "base/base_switches.h"
 #include "base/command_line.h"
 #include "base/compiler_specific.h"
-#include "base/debug/alias.h"
 #include "base/debug/debugging_flags.h"
 #include "base/debug/stack_trace.h"
-#include "base/debug/thread_heap_usage_tracker.h"
 #include "base/memory/ptr_util.h"
 #include "base/threading/thread.h"
 #include "base/threading/thread_task_runner_handle.h"
 #include "base/trace_event/heap_profiler.h"
 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
-#include "base/trace_event/heap_profiler_event_filter.h"
 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
 #include "base/trace_event/heap_profiler_type_name_deduplicator.h"
 #include "base/trace_event/malloc_dump_provider.h"
 #include "base/trace_event/memory_dump_provider.h"
-#include "base/trace_event/memory_dump_scheduler.h"
 #include "base/trace_event/memory_dump_session_state.h"
 #include "base/trace_event/memory_infra_background_whitelist.h"
 #include "base/trace_event/process_memory_dump.h"
@@ -38,6 +33,10 @@
 #include "base/trace_event/java_heap_dump_provider_android.h"
 #endif
 
+#if defined(OS_WIN)
+#include "base/trace_event/winheap_dump_provider_win.h"
+#endif
+
 namespace base {
 namespace trace_event {
 
@@ -50,31 +49,6 @@
 StaticAtomicSequenceNumber g_next_guid;
 MemoryDumpManager* g_instance_for_testing = nullptr;
 
-// The list of names of dump providers that are blacklisted from strict thread
-// affinity check on unregistration. These providers could potentially cause
-// crashes on build bots if they do not unregister on right thread.
-// TODO(ssid): Fix all the dump providers to unregister if needed and clear the
-// blacklist, crbug.com/643438.
-const char* const kStrictThreadCheckBlacklist[] = {
-    "ClientDiscardableSharedMemoryManager",
-    "ContextProviderCommandBuffer",
-    "DiscardableSharedMemoryManager",
-    "FontCaches",
-    "GpuMemoryBufferVideoFramePool",
-    "IndexedDBBackingStore",
-    "Sql",
-    "ThreadLocalEventBuffer",
-    "TraceLog",
-    "URLRequestContext",
-    "VpxVideoDecoder",
-    "cc::SoftwareImageDecodeCache",
-    "cc::StagingBufferPool",
-    "gpu::BufferManager",
-    "gpu::MappedMemoryManager",
-    "gpu::RenderbufferManager",
-    "BlacklistTestDumpProvider"  // for testing
-};
-
 // Callback wrapper to hook upon the completion of RequestGlobalDump() and
 // inject trace markers.
 void OnGlobalDumpDone(MemoryDumpCallback wrapped_callback,
@@ -136,6 +110,8 @@
 const char* const MemoryDumpManager::kSystemAllocatorPoolName =
 #if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
     MallocDumpProvider::kAllocatedObjects;
+#elif defined(OS_WIN)
+    WinHeapDumpProvider::kAllocatedObjects;
 #else
     nullptr;
 #endif
@@ -166,9 +142,6 @@
   // At this point the command line may not be initialized but we try to
   // enable the heap profiler to capture allocations as soon as possible.
   EnableHeapProfilingIfNeeded();
-
-  strict_thread_check_blacklist_.insert(std::begin(kStrictThreadCheckBlacklist),
-                                        std::end(kStrictThreadCheckBlacklist));
 }
 
 MemoryDumpManager::~MemoryDumpManager() {
@@ -189,20 +162,18 @@
   if (profiling_mode == "") {
     AllocationContextTracker::SetCaptureMode(
         AllocationContextTracker::CaptureMode::PSEUDO_STACK);
+  }
+  else if (profiling_mode == switches::kEnableHeapProfilingModeNative) {
 #if HAVE_TRACE_STACK_FRAME_POINTERS && \
     (BUILDFLAG(ENABLE_PROFILING) || !defined(NDEBUG))
-  } else if (profiling_mode == switches::kEnableHeapProfilingModeNative) {
     // We need frame pointers for native tracing to work, and they are
     // enabled in profiling and debug builds.
     AllocationContextTracker::SetCaptureMode(
         AllocationContextTracker::CaptureMode::NATIVE_STACK);
-#endif
-#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
-  } else if (profiling_mode == switches::kEnableHeapProfilingTaskProfiler) {
-    // Enable heap tracking, which in turn enables capture of heap usage
-    // tracking in tracked_objects.cc.
-    if (!base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled())
-      base::debug::ThreadHeapUsageTracker::EnableHeapTracking();
+#else
+    CHECK(false) << "'" << profiling_mode << "' mode for "
+                 << switches::kEnableHeapProfiling << " flag is not supported "
+                 << "for this platform / build type.";
 #endif
   } else {
     CHECK(false) << "Invalid mode '" << profiling_mode << "' for "
@@ -235,33 +206,14 @@
                        nullptr);
 #endif
 
-  TRACE_EVENT_WARMUP_CATEGORY(kTraceCategory);
-
-  // TODO(ssid): This should be done in EnableHeapProfiling so that we capture
-  // more allocations (crbug.com/625170).
-  if (AllocationContextTracker::capture_mode() ==
-          AllocationContextTracker::CaptureMode::PSEUDO_STACK &&
-      !(TraceLog::GetInstance()->enabled_modes() & TraceLog::FILTERING_MODE)) {
-    // Create trace config with heap profiling filter.
-    TraceConfig::EventFilterConfig heap_profiler_filter_config(
-        HeapProfilerEventFilter::kName);
-    heap_profiler_filter_config.AddIncludedCategory("*");
-    heap_profiler_filter_config.AddIncludedCategory(
-        MemoryDumpManager::kTraceCategory);
-    TraceConfig::EventFilters filters;
-    filters.push_back(heap_profiler_filter_config);
-    TraceConfig filtering_trace_config;
-    filtering_trace_config.SetEventFilters(filters);
-
-    TraceLog::GetInstance()->SetEnabled(filtering_trace_config,
-                                        TraceLog::FILTERING_MODE);
-  }
+#if defined(OS_WIN)
+  RegisterDumpProvider(WinHeapDumpProvider::GetInstance(), "WinHeap", nullptr);
+#endif
 
   // If tracing was enabled before initializing MemoryDumpManager, we missed the
   // OnTraceLogEnabled() event. Synthetize it so we can late-join the party.
-  // IsEnabled is called before adding observer to avoid calling
-  // OnTraceLogEnabled twice.
   bool is_tracing_already_enabled = TraceLog::GetInstance()->IsEnabled();
+  TRACE_EVENT0(kTraceCategory, "init");  // Add to trace-viewer category list.
   TraceLog::GetInstance()->AddEnabledStateObserver(this);
   if (is_tracing_already_enabled)
     OnTraceLogEnabled();
@@ -310,11 +262,6 @@
       new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options,
                                  whitelisted_for_background_mode);
 
-  if (options.is_fast_polling_supported) {
-    DCHECK(!mdpinfo->task_runner) << "MemoryDumpProviders capable of fast "
-                                     "polling must NOT be thread bound.";
-  }
-
   {
     AutoLock lock(lock_);
     bool already_registered = !dump_providers_.insert(mdpinfo).second;
@@ -322,15 +269,6 @@
     // path for RenderThreadImpl::Init().
     if (already_registered)
       return;
-
-    // The list of polling MDPs is populated OnTraceLogEnabled(). This code
-    // deals with the case of a MDP capable of fast polling that is registered
-    // after the OnTraceLogEnabled()
-    if (options.is_fast_polling_supported && dump_thread_) {
-      dump_thread_->task_runner()->PostTask(
-          FROM_HERE, Bind(&MemoryDumpManager::RegisterPollingMDPOnDumpThread,
-                          Unretained(this), mdpinfo));
-    }
   }
 
   if (heap_profiling_enabled_)
@@ -369,18 +307,9 @@
     // - At the end of this function, if no dump is in progress.
     // - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is
     //   removed from |pending_dump_providers|.
-    // - When the provider is removed from |dump_providers_for_polling_|.
     DCHECK(!(*mdp_iter)->owned_dump_provider);
     (*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
-  } else if (strict_thread_check_blacklist_.count((*mdp_iter)->name) == 0 ||
-             subtle::NoBarrier_Load(&memory_tracing_enabled_)) {
-    // If dump provider's name is on |strict_thread_check_blacklist_|, then the
-    // DCHECK is fired only when tracing is enabled. Otherwise the DCHECK is
-    // fired even when tracing is not enabled (stricter).
-    // TODO(ssid): Remove this condition after removing all the dump providers
-    // in the blacklist and the buildbots are no longer flakily hitting the
-    // DCHECK, crbug.com/643438.
-
+  } else if (subtle::NoBarrier_Load(&memory_tracing_enabled_)) {
     // If you hit this DCHECK, your dump provider has a bug.
     // Unregistration of a MemoryDumpProvider is safe only if:
     // - The MDP has specified a sequenced task runner affinity AND the
@@ -396,13 +325,6 @@
         << "unregister itself in a racy way. Please file a crbug.";
   }
 
-  if ((*mdp_iter)->options.is_fast_polling_supported && dump_thread_) {
-    DCHECK(take_mdp_ownership_and_delete_async);
-    dump_thread_->task_runner()->PostTask(
-        FROM_HERE, Bind(&MemoryDumpManager::UnregisterPollingMDPOnDumpThread,
-                        Unretained(this), *mdp_iter));
-  }
-
   // The MDPInfo instance can still be referenced by the
   // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason
   // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump()
@@ -412,28 +334,6 @@
   dump_providers_.erase(mdp_iter);
 }
 
-void MemoryDumpManager::RegisterPollingMDPOnDumpThread(
-    scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
-  AutoLock lock(lock_);
-  dump_providers_for_polling_.insert(mdpinfo);
-
-  // Notify ready for polling when first polling supported provider is
-  // registered. This handles the case where OnTraceLogEnabled() did not notify
-  // ready since no polling supported mdp has yet been registered.
-  if (dump_providers_for_polling_.size() == 1)
-    dump_scheduler_->NotifyPollingSupported();
-}
-
-void MemoryDumpManager::UnregisterPollingMDPOnDumpThread(
-    scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
-  mdpinfo->dump_provider->SuspendFastMemoryPolling();
-
-  AutoLock lock(lock_);
-  dump_providers_for_polling_.erase(mdpinfo);
-  DCHECK(!dump_providers_for_polling_.empty())
-      << "All polling MDPs cannot be unregistered.";
-}
-
 void MemoryDumpManager::RequestGlobalDump(
     MemoryDumpType dump_type,
     MemoryDumpLevelOfDetail level_of_detail,
@@ -513,10 +413,8 @@
     // with disallowed modes. If |session_state_| is null then tracing is
     // disabled.
     CHECK(!session_state_ ||
-          session_state_->IsDumpModeAllowed(args.level_of_detail));
-
-    if (dump_scheduler_)
-      dump_scheduler_->NotifyDumpTriggered();
+          session_state_->memory_dump_config().allowed_dump_modes.count(
+              args.level_of_detail));
   }
 
   TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump",
@@ -672,16 +570,6 @@
                            TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
                            "dump_provider.name", mdpinfo->name);
 
-    // A stack allocated string with dump provider name is useful to debug
-    // crashes while invoking dump after a |dump_provider| is not unregistered
-    // in safe way.
-    // TODO(ssid): Remove this after fixing crbug.com/643438.
-    char provider_name_for_debugging[16];
-    strncpy(provider_name_for_debugging, mdpinfo->name,
-            sizeof(provider_name_for_debugging) - 1);
-    provider_name_for_debugging[sizeof(provider_name_for_debugging) - 1] = '\0';
-    base::debug::Alias(provider_name_for_debugging);
-
     // Pid of the target process being dumped. Often kNullProcessId (= current
     // process), non-zero when the coordinator process creates dumps on behalf
     // of child processes (see crbug.com/461788).
@@ -699,28 +587,6 @@
   SetupNextMemoryDump(std::move(pmd_async_state));
 }
 
-bool MemoryDumpManager::PollFastMemoryTotal(uint64_t* memory_total) {
-#if DCHECK_IS_ON()
-  {
-    AutoLock lock(lock_);
-    if (dump_thread_)
-      DCHECK(dump_thread_->task_runner()->BelongsToCurrentThread());
-  }
-#endif
-  if (dump_providers_for_polling_.empty())
-    return false;
-
-  *memory_total = 0;
-  // Note that we call PollFastMemoryTotal() even if the dump provider is
-  // disabled (unregistered). This is to avoid taking lock while polling.
-  for (const auto& mdpinfo : dump_providers_for_polling_) {
-    uint64_t value = 0;
-    mdpinfo->dump_provider->PollFastMemoryTotal(&value);
-    *memory_total += value;
-  }
-  return true;
-}
-
 // static
 void MemoryDumpManager::FinalizeDumpAndAddToTrace(
     std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
@@ -797,15 +663,11 @@
     return;
   }
 
-  const TraceConfig& trace_config =
+  const TraceConfig trace_config =
       TraceLog::GetInstance()->GetCurrentTraceConfig();
-  const TraceConfig::MemoryDumpConfig& memory_dump_config =
-      trace_config.memory_dump_config();
   scoped_refptr<MemoryDumpSessionState> session_state =
       new MemoryDumpSessionState;
-  session_state->SetAllowedDumpModes(memory_dump_config.allowed_dump_modes);
-  session_state->set_heap_profiler_breakdown_threshold_bytes(
-      memory_dump_config.heap_profiler_options.breakdown_threshold_bytes);
+  session_state->SetMemoryDumpConfig(trace_config.memory_dump_config());
   if (heap_profiling_enabled_) {
     // If heap profiling is enabled, the stack frame deduplicator and type name
     // deduplicator will be in use. Add a metadata events to write the frames
@@ -819,26 +681,14 @@
     TRACE_EVENT_API_ADD_METADATA_EVENT(
         TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames",
         "stackFrames",
-        MakeUnique<SessionStateConvertableProxy<StackFrameDeduplicator>>(
-            session_state, &MemoryDumpSessionState::stack_frame_deduplicator));
+        WrapUnique(new SessionStateConvertableProxy<StackFrameDeduplicator>(
+            session_state, &MemoryDumpSessionState::stack_frame_deduplicator)));
 
     TRACE_EVENT_API_ADD_METADATA_EVENT(
         TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames",
         "typeNames",
-        MakeUnique<SessionStateConvertableProxy<TypeNameDeduplicator>>(
-            session_state, &MemoryDumpSessionState::type_name_deduplicator));
-  }
-
-  std::unique_ptr<MemoryDumpScheduler> dump_scheduler(
-      new MemoryDumpScheduler(this, dump_thread->task_runner()));
-  DCHECK_LE(memory_dump_config.triggers.size(), 3u);
-  for (const auto& trigger : memory_dump_config.triggers) {
-    if (!session_state->IsDumpModeAllowed(trigger.level_of_detail)) {
-      NOTREACHED();
-      continue;
-    }
-    dump_scheduler->AddTrigger(trigger.trigger_type, trigger.level_of_detail,
-                               trigger.min_time_between_dumps_ms);
+        WrapUnique(new SessionStateConvertableProxy<TypeNameDeduplicator>(
+            session_state, &MemoryDumpSessionState::type_name_deduplicator)));
   }
 
   {
@@ -849,65 +699,48 @@
 
     DCHECK(!dump_thread_);
     dump_thread_ = std::move(dump_thread);
-    dump_scheduler_ = std::move(dump_scheduler);
 
     subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
 
-    dump_providers_for_polling_.clear();
-    for (const auto& mdpinfo : dump_providers_) {
-      if (mdpinfo->options.is_fast_polling_supported)
-        dump_providers_for_polling_.insert(mdpinfo);
+    // TODO(primiano): This is a temporary hack to disable periodic memory dumps
+    // when running memory benchmarks until telemetry uses TraceConfig to
+    // enable/disable periodic dumps. See crbug.com/529184 .
+    if (!is_coordinator_ ||
+        CommandLine::ForCurrentProcess()->HasSwitch(
+            "enable-memory-benchmarking")) {
+      return;
     }
-    // Notify polling supported only if some polling supported provider was
-    // registered, else RegisterPollingMDPOnDumpThread() will notify when first
-    // polling MDP registers.
-    if (!dump_providers_for_polling_.empty())
-      dump_scheduler_->NotifyPollingSupported();
-
-    // Only coordinator process triggers periodic global memory dumps.
-    if (is_coordinator_)
-      dump_scheduler_->NotifyPeriodicTriggerSupported();
   }
 
+  // Enable periodic dumps if necessary.
+  periodic_dump_timer_.Start(trace_config.memory_dump_config().triggers);
 }
 
 void MemoryDumpManager::OnTraceLogDisabled() {
   // There might be a memory dump in progress while this happens. Therefore,
   // ensure that the MDM state which depends on the tracing enabled / disabled
   // state is always accessed by the dumping methods holding the |lock_|.
-  if (!subtle::NoBarrier_Load(&memory_tracing_enabled_))
-    return;
   subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
   std::unique_ptr<Thread> dump_thread;
-  std::unique_ptr<MemoryDumpScheduler> scheduler;
   {
     AutoLock lock(lock_);
     dump_thread = std::move(dump_thread_);
     session_state_ = nullptr;
-    scheduler = std::move(dump_scheduler_);
   }
-  scheduler->DisableAllTriggers();
 
   // Thread stops are blocking and must be performed outside of the |lock_|
   // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it).
+  periodic_dump_timer_.Stop();
   if (dump_thread)
     dump_thread->Stop();
-
-  // |dump_providers_for_polling_| must be cleared only after the dump thread is
-  // stopped (polling tasks are done).
-  {
-    AutoLock lock(lock_);
-    for (const auto& mdpinfo : dump_providers_for_polling_)
-      mdpinfo->dump_provider->SuspendFastMemoryPolling();
-    dump_providers_for_polling_.clear();
-  }
 }
 
 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) {
   AutoLock lock(lock_);
   if (!session_state_)
     return false;
-  return session_state_->IsDumpModeAllowed(dump_mode);
+  return session_state_->memory_dump_config().allowed_dump_modes.count(
+             dump_mode) != 0;
 }
 
 uint64_t MemoryDumpManager::GetTracingProcessId() const {
@@ -973,5 +806,78 @@
   return iter->second.get();
 }
 
+MemoryDumpManager::PeriodicGlobalDumpTimer::PeriodicGlobalDumpTimer() {}
+
+MemoryDumpManager::PeriodicGlobalDumpTimer::~PeriodicGlobalDumpTimer() {
+  Stop();
+}
+
+void MemoryDumpManager::PeriodicGlobalDumpTimer::Start(
+    const std::vector<TraceConfig::MemoryDumpConfig::Trigger>& triggers_list) {
+  if (triggers_list.empty())
+    return;
+
+  // At the moment the periodic support is limited to at most one periodic
+  // trigger per dump mode. All intervals should be an integer multiple of the
+  // smallest interval specified.
+  periodic_dumps_count_ = 0;
+  uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max();
+  uint32_t light_dump_period_ms = 0;
+  uint32_t heavy_dump_period_ms = 0;
+  DCHECK_LE(triggers_list.size(), 3u);
+  auto* mdm = MemoryDumpManager::GetInstance();
+  for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) {
+    DCHECK_NE(0u, config.periodic_interval_ms);
+    switch (config.level_of_detail) {
+      case MemoryDumpLevelOfDetail::BACKGROUND:
+        DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::BACKGROUND));
+        break;
+      case MemoryDumpLevelOfDetail::LIGHT:
+        DCHECK_EQ(0u, light_dump_period_ms);
+        DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::LIGHT));
+        light_dump_period_ms = config.periodic_interval_ms;
+        break;
+      case MemoryDumpLevelOfDetail::DETAILED:
+        DCHECK_EQ(0u, heavy_dump_period_ms);
+        DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::DETAILED));
+        heavy_dump_period_ms = config.periodic_interval_ms;
+        break;
+    }
+    min_timer_period_ms =
+        std::min(min_timer_period_ms, config.periodic_interval_ms);
+  }
+
+  DCHECK_EQ(0u, light_dump_period_ms % min_timer_period_ms);
+  light_dump_rate_ = light_dump_period_ms / min_timer_period_ms;
+  DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms);
+  heavy_dump_rate_ = heavy_dump_period_ms / min_timer_period_ms;
+
+  timer_.Start(FROM_HERE, TimeDelta::FromMilliseconds(min_timer_period_ms),
+               base::Bind(&PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump,
+                          base::Unretained(this)));
+}
+
+void MemoryDumpManager::PeriodicGlobalDumpTimer::Stop() {
+  if (IsRunning()) {
+    timer_.Stop();
+  }
+}
+
+bool MemoryDumpManager::PeriodicGlobalDumpTimer::IsRunning() {
+  return timer_.IsRunning();
+}
+
+void MemoryDumpManager::PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump() {
+  MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
+  if (light_dump_rate_ > 0 && periodic_dumps_count_ % light_dump_rate_ == 0)
+    level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
+  if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0)
+    level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
+  ++periodic_dumps_count_;
+
+  MemoryDumpManager::GetInstance()->RequestGlobalDump(
+      MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
+}
+
 }  // namespace trace_event
 }  // namespace base
diff --git a/base/trace_event/memory_dump_manager.h b/base/trace_event/memory_dump_manager.h
index 92cc2f4..06b772c 100644
--- a/base/trace_event/memory_dump_manager.h
+++ b/base/trace_event/memory_dump_manager.h
@@ -18,6 +18,7 @@
 #include "base/memory/ref_counted.h"
 #include "base/memory/singleton.h"
 #include "base/synchronization/lock.h"
+#include "base/timer/timer.h"
 #include "base/trace_event/memory_dump_request_args.h"
 #include "base/trace_event/process_memory_dump.h"
 #include "base/trace_event/trace_event.h"
@@ -32,7 +33,6 @@
 class MemoryDumpManagerDelegate;
 class MemoryDumpProvider;
 class MemoryDumpSessionState;
-class MemoryDumpScheduler;
 
 // This is the interface exposed to the rest of the codebase to deal with
 // memory tracing. The main entry point for clients is represented by
@@ -94,8 +94,7 @@
   // This method takes ownership of the dump provider and guarantees that:
   //  - The |mdp| will be deleted at some point in the near future.
   //  - Its deletion will not happen concurrently with the OnMemoryDump() call.
-  // Note that OnMemoryDump() and PollFastMemoryTotal() calls can still happen
-  // after this method returns.
+  // Note that OnMemoryDump() calls can still happen after this method returns.
   void UnregisterAndDeleteDumpProviderSoon(
       std::unique_ptr<MemoryDumpProvider> mdp);
 
@@ -117,9 +116,6 @@
   void OnTraceLogEnabled() override;
   void OnTraceLogDisabled() override;
 
-  // Enable heap profiling if kEnableHeapProfiling is specified.
-  void EnableHeapProfilingIfNeeded();
-
   // Returns true if the dump mode is allowed for current tracing session.
   bool IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode);
 
@@ -155,7 +151,6 @@
   friend struct DefaultSingletonTraits<MemoryDumpManager>;
   friend class MemoryDumpManagerDelegate;
   friend class MemoryDumpManagerTest;
-  friend class MemoryDumpScheduler;
 
   // Descriptor used to hold information about registered MDPs.
   // Some important considerations about lifetime of this object:
@@ -278,6 +273,31 @@
     DISALLOW_COPY_AND_ASSIGN(ProcessMemoryDumpAsyncState);
   };
 
+  // Sets up periodic memory dump timers to start global dump requests based on
+  // the dump triggers from trace config.
+  class BASE_EXPORT PeriodicGlobalDumpTimer {
+   public:
+    PeriodicGlobalDumpTimer();
+    ~PeriodicGlobalDumpTimer();
+
+    void Start(const std::vector<TraceConfig::MemoryDumpConfig::Trigger>&
+                   triggers_list);
+    void Stop();
+
+    bool IsRunning();
+
+   private:
+    // Periodically called by the timer.
+    void RequestPeriodicGlobalDump();
+
+    RepeatingTimer timer_;
+    uint32_t periodic_dumps_count_;
+    uint32_t light_dump_rate_;
+    uint32_t heavy_dump_rate_;
+
+    DISALLOW_COPY_AND_ASSIGN(PeriodicGlobalDumpTimer);
+  };
+
   static const int kMaxConsecutiveFailuresCount;
   static const char* const kSystemAllocatorPoolName;
 
@@ -288,6 +308,9 @@
   static void FinalizeDumpAndAddToTrace(
       std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state);
 
+  // Enable heap profiling if kEnableHeapProfiling is specified.
+  void EnableHeapProfilingIfNeeded();
+
   // Internal, used only by MemoryDumpManagerDelegate.
   // Creates a memory dump for the current process and appends it to the trace.
   // |callback| will be invoked asynchronously upon completion on the same
@@ -306,14 +329,6 @@
   // runner.
   void InvokeOnMemoryDump(ProcessMemoryDumpAsyncState* owned_pmd_async_state);
 
-  // Records a quick total memory usage in |memory_total|. This is used to track
-  // and detect peaks in the memory usage of the process without having to
-  // record all data from dump providers. This value is approximate to trade-off
-  // speed, and not consistent with the rest of the memory-infra metrics. Must
-  // be called on the dump thread.
-  // Returns true if |memory_total| was updated by polling at least 1 MDP.
-  bool PollFastMemoryTotal(uint64_t* memory_total);
-
   // Helper for RegierDumpProvider* functions.
   void RegisterDumpProviderInternal(
       MemoryDumpProvider* mdp,
@@ -325,29 +340,13 @@
   void UnregisterDumpProviderInternal(MemoryDumpProvider* mdp,
                                       bool take_mdp_ownership_and_delete_async);
 
-  // Adds / removes provider that supports polling to
-  // |dump_providers_for_polling_|.
-  void RegisterPollingMDPOnDumpThread(
-      scoped_refptr<MemoryDumpProviderInfo> mdpinfo);
-  void UnregisterPollingMDPOnDumpThread(
-      scoped_refptr<MemoryDumpProviderInfo> mdpinfo);
-
   // An ordererd set of registered MemoryDumpProviderInfo(s), sorted by task
   // runner affinity (MDPs belonging to the same task runners are adjacent).
   MemoryDumpProviderInfo::OrderedSet dump_providers_;
 
-  // A copy of mdpinfo list that support polling. It must be accessed only on
-  // the dump thread if dump thread exists.
-  MemoryDumpProviderInfo::OrderedSet dump_providers_for_polling_;
-
   // Shared among all the PMDs to keep state scoped to the tracing session.
   scoped_refptr<MemoryDumpSessionState> session_state_;
 
-  // The list of names of dump providers that are blacklisted from strict thread
-  // affinity check on unregistration.
-  std::unordered_set<StringPiece, StringPieceHash>
-      strict_thread_check_blacklist_;
-
   MemoryDumpManagerDelegate* delegate_;  // Not owned.
 
   // When true, this instance is in charge of coordinating periodic dumps.
@@ -361,8 +360,8 @@
   // dump_providers_enabled_ list) when tracing is not enabled.
   subtle::AtomicWord memory_tracing_enabled_;
 
-  // For triggering memory dumps.
-  std::unique_ptr<MemoryDumpScheduler> dump_scheduler_;
+  // For time-triggered periodic dumps.
+  PeriodicGlobalDumpTimer periodic_dump_timer_;
 
   // Thread used for MemoryDumpProviders which don't specify a task runner
   // affinity.
diff --git a/base/trace_event/memory_dump_manager_unittest.cc b/base/trace_event/memory_dump_manager_unittest.cc
index 51d4194..d14093c 100644
--- a/base/trace_event/memory_dump_manager_unittest.cc
+++ b/base/trace_event/memory_dump_manager_unittest.cc
@@ -16,16 +16,13 @@
 #include "base/run_loop.h"
 #include "base/strings/stringprintf.h"
 #include "base/synchronization/waitable_event.h"
-#include "base/test/sequenced_worker_pool_owner.h"
 #include "base/test/test_io_thread.h"
 #include "base/test/trace_event_analyzer.h"
 #include "base/threading/platform_thread.h"
-#include "base/threading/sequenced_task_runner_handle.h"
 #include "base/threading/sequenced_worker_pool.h"
 #include "base/threading/thread.h"
 #include "base/threading/thread_task_runner_handle.h"
 #include "base/trace_event/memory_dump_provider.h"
-#include "base/trace_event/memory_dump_scheduler.h"
 #include "base/trace_event/memory_infra_background_whitelist.h"
 #include "base/trace_event/process_memory_dump.h"
 #include "base/trace_event/trace_buffer.h"
@@ -73,10 +70,8 @@
   mdm->set_dumper_registrations_ignored_for_testing(true);
 }
 
-void RegisterDumpProvider(
-    MemoryDumpProvider* mdp,
-    scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
-  RegisterDumpProvider(mdp, task_runner, MemoryDumpProvider::Options());
+void RegisterDumpProvider(MemoryDumpProvider* mdp) {
+  RegisterDumpProvider(mdp, nullptr, MemoryDumpProvider::Options());
 }
 
 void RegisterDumpProviderWithSequencedTaskRunner(
@@ -99,20 +94,6 @@
     quit_closure.Run();
 }
 
-// Posts |task| to |task_runner| and blocks until it is executed.
-void PostTaskAndWait(const tracked_objects::Location& from_here,
-                     SequencedTaskRunner* task_runner,
-                     const base::Closure& task) {
-  base::WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
-                            WaitableEvent::InitialState::NOT_SIGNALED);
-  task_runner->PostTask(from_here, task);
-  task_runner->PostTask(
-      FROM_HERE, base::Bind(&WaitableEvent::Signal, base::Unretained(&event)));
-  // The SequencedTaskRunner guarantees that |event| will only be signaled after
-  // |task| is executed.
-  event.Wait();
-}
-
 }  // namespace
 
 // Testing MemoryDumpManagerDelegate which, by default, short-circuits dump
@@ -143,8 +124,6 @@
   MOCK_METHOD0(Destructor, void());
   MOCK_METHOD2(OnMemoryDump,
                bool(const MemoryDumpArgs& args, ProcessMemoryDump* pmd));
-  MOCK_METHOD1(PollFastMemoryTotal, void(uint64_t* memory_total));
-  MOCK_METHOD0(SuspendFastMemoryPolling, void());
 
   MockMemoryDumpProvider() : enable_mock_destructor(false) {
     ON_CALL(*this, OnMemoryDump(_, _))
@@ -156,10 +135,6 @@
           EXPECT_TRUE(pmd->session_state().get() != nullptr);
           return true;
         }));
-
-    ON_CALL(*this, PollFastMemoryTotal(_))
-        .WillByDefault(
-            Invoke([](uint64_t* memory_total) -> void { NOTREACHED(); }));
   }
   ~MockMemoryDumpProvider() override {
     if (enable_mock_destructor)
@@ -172,7 +147,8 @@
 class TestSequencedTaskRunner : public SequencedTaskRunner {
  public:
   TestSequencedTaskRunner()
-      : worker_pool_(2 /* max_threads */, "Test Task Runner"),
+      : worker_pool_(
+            new SequencedWorkerPool(2 /* max_threads */, "Test Task Runner")),
         enabled_(true),
         num_of_post_tasks_(0) {}
 
@@ -190,21 +166,19 @@
                        const Closure& task,
                        TimeDelta delay) override {
     num_of_post_tasks_++;
-    if (enabled_) {
-      return worker_pool_.pool()->PostSequencedWorkerTask(token_, from_here,
-                                                          task);
-    }
+    if (enabled_)
+      return worker_pool_->PostSequencedWorkerTask(token_, from_here, task);
     return false;
   }
 
   bool RunsTasksOnCurrentThread() const override {
-    return worker_pool_.pool()->RunsTasksOnCurrentThread();
+    return worker_pool_->IsRunningSequenceOnCurrentThread(token_);
   }
 
  private:
   ~TestSequencedTaskRunner() override {}
 
-  SequencedWorkerPoolOwner worker_pool_;
+  scoped_refptr<SequencedWorkerPool> worker_pool_;
   const SequencedWorkerPool::SequenceToken token_;
   bool enabled_;
   unsigned num_of_post_tasks_;
@@ -241,10 +215,6 @@
     task_runner->PostTask(FROM_HERE, closure);
   }
 
-  void PollFastMemoryTotal(uint64_t* memory_total) {
-    mdm_->PollFastMemoryTotal(memory_total);
-  }
-
  protected:
   void InitializeMemoryDumpManager(bool is_coordinator) {
     mdm_->set_dumper_registrations_ignored_for_testing(true);
@@ -274,7 +244,7 @@
   void DisableTracing() { TraceLog::GetInstance()->SetDisabled(); }
 
   bool IsPeriodicDumpingEnabled() const {
-    return mdm_->dump_scheduler_->IsPeriodicTimerRunningForTesting();
+    return mdm_->periodic_dump_timer_.IsRunning();
   }
 
   int GetMaxConsecutiveFailuresCount() const {
@@ -298,7 +268,7 @@
 TEST_F(MemoryDumpManagerTest, SingleDumper) {
   InitializeMemoryDumpManager(false /* is_coordinator */);
   MockMemoryDumpProvider mdp;
-  RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
+  RegisterDumpProvider(&mdp);
 
   // Check that the dumper is not called if the memory category is not enabled.
   EnableTracingWithLegacyCategories("foobar-but-not-memory");
@@ -339,7 +309,7 @@
   InitializeMemoryDumpManager(false /* is_coordinator */);
   MockMemoryDumpProvider mdp;
 
-  RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
+  RegisterDumpProvider(&mdp);
   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
   EXPECT_CALL(mdp, OnMemoryDump(IsDetailedDump(), _)).WillOnce(Return(true));
@@ -350,7 +320,7 @@
 
   // Check that requesting dumps with low level of detail actually propagates to
   // OnMemoryDump() call on dump providers.
-  RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
+  RegisterDumpProvider(&mdp);
   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
   EXPECT_CALL(mdp, OnMemoryDump(IsLightDump(), _)).WillOnce(Return(true));
@@ -365,8 +335,8 @@
   InitializeMemoryDumpManager(false /* is_coordinator */);
   MockMemoryDumpProvider mdp1;
   MockMemoryDumpProvider mdp2;
-  RegisterDumpProvider(&mdp1, nullptr);
-  RegisterDumpProvider(&mdp2, nullptr);
+  RegisterDumpProvider(&mdp1);
+  RegisterDumpProvider(&mdp2);
 
   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
   const MemoryDumpSessionState* session_state =
@@ -402,7 +372,7 @@
   MockMemoryDumpProvider mdp2;
 
   // Enable only mdp1.
-  RegisterDumpProvider(&mdp1, ThreadTaskRunnerHandle::Get());
+  RegisterDumpProvider(&mdp1);
   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
   EXPECT_CALL(mdp1, OnMemoryDump(_, _)).WillOnce(Return(true));
@@ -413,7 +383,7 @@
 
   // Invert: enable mdp1 and disable mdp2.
   mdm_->UnregisterDumpProvider(&mdp1);
-  RegisterDumpProvider(&mdp2, nullptr);
+  RegisterDumpProvider(&mdp2);
   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
   EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
@@ -423,7 +393,7 @@
   DisableTracing();
 
   // Enable both mdp1 and mdp2.
-  RegisterDumpProvider(&mdp1, nullptr);
+  RegisterDumpProvider(&mdp1);
   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
   EXPECT_CALL(mdp1, OnMemoryDump(_, _)).WillOnce(Return(true));
@@ -439,7 +409,7 @@
   InitializeMemoryDumpManager(false /* is_coordinator */);
   MockMemoryDumpProvider mdp;
 
-  RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
+  RegisterDumpProvider(&mdp);
 
   {
     EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
@@ -461,7 +431,7 @@
     DisableTracing();
   }
 
-  RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
+  RegisterDumpProvider(&mdp);
   mdm_->UnregisterDumpProvider(&mdp);
 
   {
@@ -473,9 +443,9 @@
     DisableTracing();
   }
 
-  RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
+  RegisterDumpProvider(&mdp);
   mdm_->UnregisterDumpProvider(&mdp);
-  RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
+  RegisterDumpProvider(&mdp);
 
   {
     EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
@@ -597,8 +567,8 @@
   MockMemoryDumpProvider mdp1;
   MockMemoryDumpProvider mdp2;
 
-  RegisterDumpProvider(&mdp1, nullptr);
-  RegisterDumpProvider(&mdp2, nullptr);
+  RegisterDumpProvider(&mdp1);
+  RegisterDumpProvider(&mdp2);
   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
 
   const int kNumDumps = 2 * GetMaxConsecutiveFailuresCount();
@@ -631,7 +601,7 @@
   MockMemoryDumpProvider mdp1;
   MockMemoryDumpProvider mdp2;
 
-  RegisterDumpProvider(&mdp1, nullptr);
+  RegisterDumpProvider(&mdp1);
   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
 
   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(4);
@@ -641,7 +611,7 @@
       .WillOnce(Return(true))
       .WillOnce(
           Invoke([&mdp2](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
-            RegisterDumpProvider(&mdp2, nullptr);
+            RegisterDumpProvider(&mdp2);
             return true;
           }))
       .WillRepeatedly(Return(true));
@@ -717,16 +687,13 @@
   // unregister the other one.
   for (const std::unique_ptr<MockMemoryDumpProvider>& mdp : mdps) {
     int other_idx = (mdps.front() == mdp);
-    // TestIOThread's task runner must be obtained from the main thread but can
-    // then be used from other threads.
-    scoped_refptr<SingleThreadTaskRunner> other_runner =
-        threads[other_idx]->task_runner();
+    TestIOThread* other_thread = threads[other_idx].get();
     MockMemoryDumpProvider* other_mdp = mdps[other_idx].get();
-    auto on_dump = [this, other_runner, other_mdp, &on_memory_dump_call_count](
+    auto on_dump = [this, other_thread, other_mdp, &on_memory_dump_call_count](
         const MemoryDumpArgs& args, ProcessMemoryDump* pmd) {
-      PostTaskAndWait(FROM_HERE, other_runner.get(),
-                      base::Bind(&MemoryDumpManager::UnregisterDumpProvider,
-                                 base::Unretained(&*mdm_), other_mdp));
+      other_thread->PostTaskAndWait(
+          FROM_HERE, base::Bind(&MemoryDumpManager::UnregisterDumpProvider,
+                                base::Unretained(&*mdm_), other_mdp));
       on_memory_dump_call_count++;
       return true;
     };
@@ -749,75 +716,6 @@
   DisableTracing();
 }
 
-TEST_F(MemoryDumpManagerTest, TestPollingOnDumpThread) {
-  InitializeMemoryDumpManager(false /* is_coordinator */);
-  std::unique_ptr<MockMemoryDumpProvider> mdp1(new MockMemoryDumpProvider());
-  std::unique_ptr<MockMemoryDumpProvider> mdp2(new MockMemoryDumpProvider());
-  mdp1->enable_mock_destructor = true;
-  mdp2->enable_mock_destructor = true;
-
-  EXPECT_CALL(*mdp1, SuspendFastMemoryPolling()).Times(1);
-  EXPECT_CALL(*mdp2, SuspendFastMemoryPolling()).Times(1);
-  EXPECT_CALL(*mdp1, Destructor());
-  EXPECT_CALL(*mdp2, Destructor());
-
-  MemoryDumpProvider::Options options;
-  options.is_fast_polling_supported = true;
-  RegisterDumpProvider(mdp1.get(), nullptr, options);
-
-  RunLoop run_loop;
-  scoped_refptr<SingleThreadTaskRunner> test_task_runner =
-      ThreadTaskRunnerHandle::Get();
-  auto quit_closure = run_loop.QuitClosure();
-
-  const int kPollsToQuit = 10;
-  int call_count = 0;
-  MemoryDumpManager* mdm = mdm_.get();
-  const auto poll_function1 = [&call_count, &test_task_runner, quit_closure,
-                               &mdp2, mdm, &options, kPollsToQuit,
-                               this](uint64_t* total) -> void {
-    ++call_count;
-    if (call_count == 1)
-      RegisterDumpProvider(mdp2.get(), nullptr, options, kMDPName);
-    else if (call_count == 4)
-      mdm->UnregisterAndDeleteDumpProviderSoon(std::move(mdp2));
-    else if (call_count == kPollsToQuit)
-      test_task_runner->PostTask(FROM_HERE, quit_closure);
-
-    // Record increase of 1 GiB of memory at each call.
-    *total = static_cast<uint64_t>(call_count) * 1024 * 1024 * 1024;
-  };
-  EXPECT_CALL(*mdp1, PollFastMemoryTotal(_))
-      .Times(testing::AtLeast(kPollsToQuit))
-      .WillRepeatedly(Invoke(poll_function1));
-
-  // Depending on the order of PostTask calls the mdp2 might be registered after
-  // all polls or in between polls.
-  EXPECT_CALL(*mdp2, PollFastMemoryTotal(_))
-      .Times(Between(0, kPollsToQuit - 1))
-      .WillRepeatedly(Return());
-
-  MemoryDumpScheduler::SetPollingIntervalForTesting(1);
-  EnableTracingWithTraceConfig(
-      TraceConfigMemoryTestUtil::GetTraceConfig_PeakDetectionTrigger(3));
-
-  int last_poll_to_request_dump = -2;
-  EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _))
-      .Times(testing::AtLeast(2))
-      .WillRepeatedly(Invoke([&last_poll_to_request_dump, &call_count](
-                                 const MemoryDumpRequestArgs& args,
-                                 const MemoryDumpCallback& callback) -> void {
-        // Minimum number of polls between dumps must be 3 (polling interval is
-        // 1ms).
-        EXPECT_GE(call_count - last_poll_to_request_dump, 3);
-        last_poll_to_request_dump = call_count;
-      }));
-
-  run_loop.Run();
-  DisableTracing();
-  mdm_->UnregisterAndDeleteDumpProviderSoon(std::move(mdp1));
-}
-
 // If a thread (with a dump provider living on it) is torn down during a dump
 // its dump provider should be skipped but the dump itself should succeed.
 TEST_F(MemoryDumpManagerTest, TearDownThreadWhileDumping) {
@@ -840,14 +738,9 @@
   for (const std::unique_ptr<MockMemoryDumpProvider>& mdp : mdps) {
     int other_idx = (mdps.front() == mdp);
     TestIOThread* other_thread = threads[other_idx].get();
-    // TestIOThread isn't thread-safe and must be stopped on the |main_runner|.
-    scoped_refptr<SequencedTaskRunner> main_runner =
-        SequencedTaskRunnerHandle::Get();
-    auto on_dump = [other_thread, main_runner, &on_memory_dump_call_count](
+    auto on_dump = [other_thread, &on_memory_dump_call_count](
         const MemoryDumpArgs& args, ProcessMemoryDump* pmd) {
-      PostTaskAndWait(
-          FROM_HERE, main_runner.get(),
-          base::Bind(&TestIOThread::Stop, base::Unretained(other_thread)));
+      other_thread->Stop();
       on_memory_dump_call_count++;
       return true;
     };
@@ -875,7 +768,7 @@
 TEST_F(MemoryDumpManagerTest, CallbackCalledOnFailure) {
   InitializeMemoryDumpManager(false /* is_coordinator */);
   MockMemoryDumpProvider mdp1;
-  RegisterDumpProvider(&mdp1, nullptr);
+  RegisterDumpProvider(&mdp1);
 
   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(0);
   EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
@@ -890,7 +783,7 @@
 // began, it will still late-join the party (real use case: startup tracing).
 TEST_F(MemoryDumpManagerTest, InitializedAfterStartOfTracing) {
   MockMemoryDumpProvider mdp;
-  RegisterDumpProvider(&mdp, nullptr);
+  RegisterDumpProvider(&mdp);
   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
 
   // First check that a RequestGlobalDump() issued before the MemoryDumpManager
@@ -1073,7 +966,7 @@
 
   // Create both same-thread MDP and another MDP with dedicated thread
   MockMemoryDumpProvider mdp1;
-  RegisterDumpProvider(&mdp1, nullptr);
+  RegisterDumpProvider(&mdp1);
   MockMemoryDumpProvider mdp2;
   RegisterDumpProvider(&mdp2, mdp_thread->task_runner(), kDefaultOptions);
   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
@@ -1192,8 +1085,8 @@
       const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
     thread_ref = PlatformThread::CurrentRef();
     TestIOThread thread_for_unregistration(TestIOThread::kAutoStart);
-    PostTaskAndWait(
-        FROM_HERE, thread_for_unregistration.task_runner().get(),
+    thread_for_unregistration.PostTaskAndWait(
+        FROM_HERE,
         base::Bind(
             &MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon,
             base::Unretained(MemoryDumpManager::GetInstance()),
@@ -1223,7 +1116,7 @@
   InitializeMemoryDumpManager(false /* is_coordinator */);
   SetDumpProviderWhitelistForTesting(kTestMDPWhitelist);
   std::unique_ptr<MockMemoryDumpProvider> mdp1(new MockMemoryDumpProvider);
-  RegisterDumpProvider(mdp1.get(), nullptr);
+  RegisterDumpProvider(mdp1.get());
   std::unique_ptr<MockMemoryDumpProvider> mdp2(new MockMemoryDumpProvider);
   RegisterDumpProvider(mdp2.get(), nullptr, kDefaultOptions,
                        kWhitelistedMDPName);
@@ -1274,22 +1167,5 @@
   DisableTracing();
 }
 
-TEST_F(MemoryDumpManagerTest, TestBlacklistedUnsafeUnregistration) {
-  InitializeMemoryDumpManager(false /* is_coordinator */);
-  MockMemoryDumpProvider mdp1;
-  RegisterDumpProvider(&mdp1, nullptr, kDefaultOptions,
-                       "BlacklistTestDumpProvider");
-  // Not calling UnregisterAndDeleteDumpProviderSoon() should not crash.
-  mdm_->UnregisterDumpProvider(&mdp1);
-
-  Thread thread("test thread");
-  thread.Start();
-  RegisterDumpProvider(&mdp1, thread.task_runner(), kDefaultOptions,
-                       "BlacklistTestDumpProvider");
-  // Unregistering on wrong thread should not crash.
-  mdm_->UnregisterDumpProvider(&mdp1);
-  thread.Stop();
-}
-
 }  // namespace trace_event
 }  // namespace base
diff --git a/base/trace_event/memory_dump_provider.h b/base/trace_event/memory_dump_provider.h
index 76c2969..2c50286 100644
--- a/base/trace_event/memory_dump_provider.h
+++ b/base/trace_event/memory_dump_provider.h
@@ -22,8 +22,7 @@
   struct Options {
     Options()
         : target_pid(kNullProcessId),
-          dumps_on_single_thread_task_runner(false),
-          is_fast_polling_supported(false) {}
+          dumps_on_single_thread_task_runner(false) {}
 
     // If the dump provider generates dumps on behalf of another process,
     // |target_pid| contains the pid of that process.
@@ -35,11 +34,6 @@
     // a SingleThreadTaskRunner, which is usually the case. It is faster to run
     // all providers that run on the same thread together without thread hops.
     bool dumps_on_single_thread_task_runner;
-
-    // Set to true if the dump provider implementation supports high frequency
-    // polling. Only providers running without task runner affinity are
-    // supported.
-    bool is_fast_polling_supported;
   };
 
   virtual ~MemoryDumpProvider() {}
@@ -58,18 +52,6 @@
   // collecting extensive allocation data, if supported.
   virtual void OnHeapProfilingEnabled(bool) {}
 
-  // Quickly record the total memory usage in |memory_total|. This method will
-  // be called only when the dump provider registration has
-  // |is_fast_polling_supported| set to true. This method is used for polling at
-  // high frequency for detecting peaks. See comment on
-  // |is_fast_polling_supported| option if you need to override this method.
-  virtual void PollFastMemoryTotal(uint64_t* /* memory_total */) {}
-
-  // Indicates that fast memory polling is not going to be used in the near
-  // future and the MDP can tear down any resource kept around for fast memory
-  // polling.
-  virtual void SuspendFastMemoryPolling() {}
-
  protected:
   MemoryDumpProvider() {}
 
diff --git a/base/trace_event/memory_dump_request_args.cc b/base/trace_event/memory_dump_request_args.cc
index bf72bef..e6c5b87 100644
--- a/base/trace_event/memory_dump_request_args.cc
+++ b/base/trace_event/memory_dump_request_args.cc
@@ -12,28 +12,19 @@
 // static
 const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type) {
   switch (dump_type) {
+    case MemoryDumpType::TASK_BEGIN:
+      return "task_begin";
+    case MemoryDumpType::TASK_END:
+      return "task_end";
     case MemoryDumpType::PERIODIC_INTERVAL:
       return "periodic_interval";
     case MemoryDumpType::EXPLICITLY_TRIGGERED:
       return "explicitly_triggered";
-    case MemoryDumpType::PEAK_MEMORY_USAGE:
-      return "peak_memory_usage";
   }
   NOTREACHED();
   return "unknown";
 }
 
-MemoryDumpType StringToMemoryDumpType(const std::string& str) {
-  if (str == "periodic_interval")
-    return MemoryDumpType::PERIODIC_INTERVAL;
-  if (str == "explicitly_triggered")
-    return MemoryDumpType::EXPLICITLY_TRIGGERED;
-  if (str == "peak_memory_usage")
-    return MemoryDumpType::PEAK_MEMORY_USAGE;
-  NOTREACHED();
-  return MemoryDumpType::LAST;
-}
-
 const char* MemoryDumpLevelOfDetailToString(
     const MemoryDumpLevelOfDetail& level_of_detail) {
   switch (level_of_detail) {
diff --git a/base/trace_event/memory_dump_request_args.h b/base/trace_event/memory_dump_request_args.h
index 90a866f..f3ff9d8 100644
--- a/base/trace_event/memory_dump_request_args.h
+++ b/base/trace_event/memory_dump_request_args.h
@@ -18,19 +18,16 @@
 namespace trace_event {
 
 // Captures the reason why a memory dump is being requested. This is to allow
-// selective enabling of dumps, filtering and post-processing. Important: this
-// must be kept consistent with
-// services/resource_coordinator/public/cpp/memory/memory_infra_traits.cc.
+// selective enabling of dumps, filtering and post-processing.
 enum class MemoryDumpType {
-  PERIODIC_INTERVAL,     // Dumping memory at periodic intervals.
+  TASK_BEGIN,         // Dumping memory at the beginning of a message-loop task.
+  TASK_END,           // Dumping memory at the ending of a message-loop task.
+  PERIODIC_INTERVAL,  // Dumping memory at periodic intervals.
   EXPLICITLY_TRIGGERED,  // Non maskable dump request.
-  PEAK_MEMORY_USAGE,     // Dumping memory at detected peak total memory usage.
-  LAST = PEAK_MEMORY_USAGE  // For IPC macros.
+  LAST = EXPLICITLY_TRIGGERED // For IPC macros.
 };
 
 // Tells the MemoryDumpProvider(s) how much detailed their dumps should be.
-// Important: this must be kept consistent with
-// services/resource_Coordinator/public/cpp/memory/memory_infra_traits.cc.
 enum class MemoryDumpLevelOfDetail : uint32_t {
   FIRST,
 
@@ -53,8 +50,7 @@
 };
 
 // Initial request arguments for a global memory dump. (see
-// MemoryDumpManager::RequestGlobalMemoryDump()). Important: this must be kept
-// consistent with services/memory_infra/public/cpp/memory_infra_traits.cc.
+// MemoryDumpManager::RequestGlobalMemoryDump()).
 struct BASE_EXPORT MemoryDumpRequestArgs {
   // Globally unique identifier. In multi-process dumps, all processes issue a
   // local dump with the same guid. This allows the trace importers to
@@ -76,8 +72,6 @@
 
 BASE_EXPORT const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type);
 
-BASE_EXPORT MemoryDumpType StringToMemoryDumpType(const std::string& str);
-
 BASE_EXPORT const char* MemoryDumpLevelOfDetailToString(
     const MemoryDumpLevelOfDetail& level_of_detail);
 
diff --git a/base/trace_event/memory_dump_scheduler.cc b/base/trace_event/memory_dump_scheduler.cc
deleted file mode 100644
index eaa8d63..0000000
--- a/base/trace_event/memory_dump_scheduler.cc
+++ /dev/null
@@ -1,304 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/memory_dump_scheduler.h"
-
-#include "base/process/process_metrics.h"
-#include "base/single_thread_task_runner.h"
-#include "base/threading/thread_task_runner_handle.h"
-#include "base/trace_event/memory_dump_manager.h"
-#include "build/build_config.h"
-
-namespace base {
-namespace trace_event {
-
-namespace {
-// Threshold on increase in memory from last dump beyond which a new dump must
-// be triggered.
-int64_t kDefaultMemoryIncreaseThreshold = 50 * 1024 * 1024;  // 50MiB
-const uint32_t kMemoryTotalsPollingInterval = 25;
-uint32_t g_polling_interval_ms_for_testing = 0;
-}  // namespace
-
-MemoryDumpScheduler::MemoryDumpScheduler(
-    MemoryDumpManager* mdm,
-    scoped_refptr<SingleThreadTaskRunner> polling_task_runner)
-    : mdm_(mdm), polling_state_(polling_task_runner) {}
-
-MemoryDumpScheduler::~MemoryDumpScheduler() {}
-
-void MemoryDumpScheduler::AddTrigger(MemoryDumpType trigger_type,
-                                     MemoryDumpLevelOfDetail level_of_detail,
-                                     uint32_t min_time_between_dumps_ms) {
-  if (trigger_type == MemoryDumpType::PEAK_MEMORY_USAGE) {
-    DCHECK(!periodic_state_.is_configured);
-    DCHECK_EQ(PollingTriggerState::DISABLED, polling_state_.current_state);
-    DCHECK_NE(0u, min_time_between_dumps_ms);
-
-    polling_state_.level_of_detail = level_of_detail;
-    polling_state_.min_polls_between_dumps =
-        (min_time_between_dumps_ms + polling_state_.polling_interval_ms - 1) /
-        polling_state_.polling_interval_ms;
-    polling_state_.current_state = PollingTriggerState::CONFIGURED;
-  } else if (trigger_type == MemoryDumpType::PERIODIC_INTERVAL) {
-    DCHECK_EQ(PollingTriggerState::DISABLED, polling_state_.current_state);
-    periodic_state_.is_configured = true;
-    DCHECK_NE(0u, min_time_between_dumps_ms);
-    switch (level_of_detail) {
-      case MemoryDumpLevelOfDetail::BACKGROUND:
-        break;
-      case MemoryDumpLevelOfDetail::LIGHT:
-        DCHECK_EQ(0u, periodic_state_.light_dump_period_ms);
-        periodic_state_.light_dump_period_ms = min_time_between_dumps_ms;
-        break;
-      case MemoryDumpLevelOfDetail::DETAILED:
-        DCHECK_EQ(0u, periodic_state_.heavy_dump_period_ms);
-        periodic_state_.heavy_dump_period_ms = min_time_between_dumps_ms;
-        break;
-    }
-
-    periodic_state_.min_timer_period_ms = std::min(
-        periodic_state_.min_timer_period_ms, min_time_between_dumps_ms);
-    DCHECK_EQ(0u, periodic_state_.light_dump_period_ms %
-                      periodic_state_.min_timer_period_ms);
-    DCHECK_EQ(0u, periodic_state_.heavy_dump_period_ms %
-                      periodic_state_.min_timer_period_ms);
-  }
-}
-
-void MemoryDumpScheduler::NotifyPeriodicTriggerSupported() {
-  if (!periodic_state_.is_configured || periodic_state_.timer.IsRunning())
-    return;
-  periodic_state_.light_dumps_rate = periodic_state_.light_dump_period_ms /
-                                     periodic_state_.min_timer_period_ms;
-  periodic_state_.heavy_dumps_rate = periodic_state_.heavy_dump_period_ms /
-                                     periodic_state_.min_timer_period_ms;
-
-  periodic_state_.dump_count = 0;
-  periodic_state_.timer.Start(
-      FROM_HERE,
-      TimeDelta::FromMilliseconds(periodic_state_.min_timer_period_ms),
-      Bind(&MemoryDumpScheduler::RequestPeriodicGlobalDump, Unretained(this)));
-}
-
-void MemoryDumpScheduler::NotifyPollingSupported() {
-  if (polling_state_.current_state != PollingTriggerState::CONFIGURED)
-    return;
-
-  polling_state_.current_state = PollingTriggerState::ENABLED;
-  polling_state_.ResetTotals();
-
-  polling_state_.polling_task_runner->PostTask(
-      FROM_HERE,
-      Bind(&MemoryDumpScheduler::PollMemoryOnPollingThread, Unretained(this)));
-}
-
-void MemoryDumpScheduler::NotifyDumpTriggered() {
-  if (polling_state_.polling_task_runner &&
-      polling_state_.polling_task_runner->RunsTasksOnCurrentThread()) {
-    polling_state_.polling_task_runner->PostTask(
-        FROM_HERE,
-        Bind(&MemoryDumpScheduler::NotifyDumpTriggered, Unretained(this)));
-    return;
-  }
-  if (polling_state_.current_state != PollingTriggerState::ENABLED)
-    return;
-
-  polling_state_.ResetTotals();
-}
-
-void MemoryDumpScheduler::DisableAllTriggers() {
-  if (periodic_state_.timer.IsRunning())
-    periodic_state_.timer.Stop();
-  DisablePolling();
-}
-
-void MemoryDumpScheduler::DisablePolling() {
-  if (polling_state_.polling_task_runner->RunsTasksOnCurrentThread()) {
-    if (polling_state_.polling_task_runner->PostTask(
-            FROM_HERE,
-            Bind(&MemoryDumpScheduler::DisablePolling, Unretained(this))))
-      return;
-  }
-  polling_state_.current_state = PollingTriggerState::DISABLED;
-  polling_state_.polling_task_runner = nullptr;
-}
-
-// static
-void MemoryDumpScheduler::SetPollingIntervalForTesting(uint32_t interval) {
-  g_polling_interval_ms_for_testing = interval;
-}
-
-bool MemoryDumpScheduler::IsPeriodicTimerRunningForTesting() {
-  return periodic_state_.timer.IsRunning();
-}
-
-void MemoryDumpScheduler::RequestPeriodicGlobalDump() {
-  MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
-  if (periodic_state_.light_dumps_rate > 0 &&
-      periodic_state_.dump_count % periodic_state_.light_dumps_rate == 0)
-    level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
-  if (periodic_state_.heavy_dumps_rate > 0 &&
-      periodic_state_.dump_count % periodic_state_.heavy_dumps_rate == 0)
-    level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
-  ++periodic_state_.dump_count;
-
-  mdm_->RequestGlobalDump(MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
-}
-
-void MemoryDumpScheduler::PollMemoryOnPollingThread() {
-  if (polling_state_.current_state != PollingTriggerState::ENABLED)
-    return;
-
-  uint64_t polled_memory = 0;
-  bool res = mdm_->PollFastMemoryTotal(&polled_memory);
-  DCHECK(res);
-  if (polling_state_.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) {
-    TRACE_COUNTER1(MemoryDumpManager::kTraceCategory, "PolledMemoryMB",
-                   polled_memory / 1024 / 1024);
-  }
-
-  if (ShouldTriggerDump(polled_memory)) {
-    TRACE_EVENT_INSTANT1(MemoryDumpManager::kTraceCategory,
-                         "Peak memory dump Triggered",
-                         TRACE_EVENT_SCOPE_PROCESS, "total_usage_MB",
-                         polled_memory / 1024 / 1024);
-
-    mdm_->RequestGlobalDump(MemoryDumpType::PEAK_MEMORY_USAGE,
-                            polling_state_.level_of_detail);
-  }
-
-  // TODO(ssid): Use RequestSchedulerCallback, crbug.com/607533.
-  ThreadTaskRunnerHandle::Get()->PostDelayedTask(
-      FROM_HERE,
-      Bind(&MemoryDumpScheduler::PollMemoryOnPollingThread, Unretained(this)),
-      TimeDelta::FromMilliseconds(polling_state_.polling_interval_ms));
-}
-
-bool MemoryDumpScheduler::ShouldTriggerDump(uint64_t current_memory_total) {
-  // This function tries to detect peak memory usage as discussed in
-  // https://goo.gl/0kOU4A.
-
-  if (current_memory_total == 0)
-    return false;
-
-  bool should_dump = false;
-  ++polling_state_.num_polls_from_last_dump;
-  if (polling_state_.last_dump_memory_total == 0) {
-    // If it's first sample then trigger memory dump.
-    should_dump = true;
-  } else if (polling_state_.min_polls_between_dumps >
-             polling_state_.num_polls_from_last_dump) {
-    return false;
-  }
-
-  int64_t increase_from_last_dump =
-      current_memory_total - polling_state_.last_dump_memory_total;
-  should_dump |=
-      increase_from_last_dump > polling_state_.memory_increase_threshold;
-  should_dump |= IsCurrentSamplePeak(current_memory_total);
-  if (should_dump)
-    polling_state_.ResetTotals();
-  return should_dump;
-}
-
-bool MemoryDumpScheduler::IsCurrentSamplePeak(
-    uint64_t current_memory_total_bytes) {
-  uint64_t current_memory_total_kb = current_memory_total_bytes / 1024;
-  polling_state_.last_memory_totals_kb_index =
-      (polling_state_.last_memory_totals_kb_index + 1) %
-      PollingTriggerState::kMaxNumMemorySamples;
-  uint64_t mean = 0;
-  for (uint32_t i = 0; i < PollingTriggerState::kMaxNumMemorySamples; ++i) {
-    if (polling_state_.last_memory_totals_kb[i] == 0) {
-      // Not enough samples to detect peaks.
-      polling_state_
-          .last_memory_totals_kb[polling_state_.last_memory_totals_kb_index] =
-          current_memory_total_kb;
-      return false;
-    }
-    mean += polling_state_.last_memory_totals_kb[i];
-  }
-  mean = mean / PollingTriggerState::kMaxNumMemorySamples;
-  uint64_t variance = 0;
-  for (uint32_t i = 0; i < PollingTriggerState::kMaxNumMemorySamples; ++i) {
-    variance += (polling_state_.last_memory_totals_kb[i] - mean) *
-                (polling_state_.last_memory_totals_kb[i] - mean);
-  }
-  variance = variance / PollingTriggerState::kMaxNumMemorySamples;
-
-  polling_state_
-      .last_memory_totals_kb[polling_state_.last_memory_totals_kb_index] =
-      current_memory_total_kb;
-
-  // If stddev is less than 0.2% then we consider that the process is inactive.
-  bool is_stddev_low = variance < mean / 500 * mean / 500;
-  if (is_stddev_low)
-    return false;
-
-  // (mean + 3.69 * stddev) corresponds to a value that is higher than current
-  // sample with 99.99% probability.
-  return (current_memory_total_kb - mean) * (current_memory_total_kb - mean) >
-         (3.69 * 3.69 * variance);
-}
-
-MemoryDumpScheduler::PeriodicTriggerState::PeriodicTriggerState()
-    : is_configured(false),
-      dump_count(0),
-      min_timer_period_ms(std::numeric_limits<uint32_t>::max()),
-      light_dumps_rate(0),
-      heavy_dumps_rate(0),
-      light_dump_period_ms(0),
-      heavy_dump_period_ms(0) {}
-
-MemoryDumpScheduler::PeriodicTriggerState::~PeriodicTriggerState() {
-  DCHECK(!timer.IsRunning());
-}
-
-MemoryDumpScheduler::PollingTriggerState::PollingTriggerState(
-    scoped_refptr<SingleThreadTaskRunner> polling_task_runner)
-    : current_state(DISABLED),
-      level_of_detail(MemoryDumpLevelOfDetail::FIRST),
-      polling_task_runner(polling_task_runner),
-      polling_interval_ms(g_polling_interval_ms_for_testing
-                              ? g_polling_interval_ms_for_testing
-                              : kMemoryTotalsPollingInterval),
-      min_polls_between_dumps(0),
-      num_polls_from_last_dump(-1),
-      last_dump_memory_total(0),
-      memory_increase_threshold(0),
-      last_memory_totals_kb_index(0) {}
-
-MemoryDumpScheduler::PollingTriggerState::~PollingTriggerState() {
-  DCHECK(!polling_task_runner);
-}
-
-void MemoryDumpScheduler::PollingTriggerState::ResetTotals() {
-  if (!memory_increase_threshold) {
-    memory_increase_threshold = kDefaultMemoryIncreaseThreshold;
-#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) || \
-    defined(OS_ANDROID)
-    // Set threshold to 1% of total system memory.
-    SystemMemoryInfoKB meminfo;
-    bool res = GetSystemMemoryInfo(&meminfo);
-    if (res)
-      memory_increase_threshold = (meminfo.total / 100) * 1024;
-#endif
-  }
-
-  // Update the |last_dump_memory_total|'s value from the totals if it's not
-  // first poll.
-  if (num_polls_from_last_dump >= 0 &&
-      last_memory_totals_kb[last_memory_totals_kb_index]) {
-    last_dump_memory_total =
-        last_memory_totals_kb[last_memory_totals_kb_index] * 1024;
-  }
-  num_polls_from_last_dump = 0;
-  for (uint32_t i = 0; i < kMaxNumMemorySamples; ++i)
-    last_memory_totals_kb[i] = 0;
-  last_memory_totals_kb_index = 0;
-}
-
-}  // namespace trace_event
-}  // namespace base
diff --git a/base/trace_event/memory_dump_scheduler.h b/base/trace_event/memory_dump_scheduler.h
deleted file mode 100644
index fd21fce..0000000
--- a/base/trace_event/memory_dump_scheduler.h
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_SCHEDULER_H
-#define BASE_TRACE_EVENT_MEMORY_DUMP_SCHEDULER_H
-
-#include "base/base_export.h"
-#include "base/gtest_prod_util.h"
-#include "base/memory/ref_counted.h"
-#include "base/timer/timer.h"
-#include "base/trace_event/memory_dump_request_args.h"
-
-namespace base {
-class SingleThreadTaskRunner;
-
-namespace trace_event {
-
-class MemoryDumpManager;
-
-// Schedules global dump requests based on the triggers added.
-class BASE_EXPORT MemoryDumpScheduler {
- public:
-  MemoryDumpScheduler(
-      MemoryDumpManager* mdm_,
-      scoped_refptr<SingleThreadTaskRunner> polling_task_runner);
-  ~MemoryDumpScheduler();
-
-  // Adds triggers for scheduling global dumps. Both periodic and peak triggers
-  // cannot be added together. At the moment the periodic support is limited to
-  // at most one periodic trigger per dump mode and peak triggers are limited to
-  // at most one. All intervals should be an integeral multiple of the smallest
-  // interval specified.
-  void AddTrigger(MemoryDumpType trigger_type,
-                  MemoryDumpLevelOfDetail level_of_detail,
-                  uint32_t min_time_between_dumps_ms);
-
-  // Starts periodic dumps.
-  void NotifyPeriodicTriggerSupported();
-
-  // Starts polling memory total.
-  void NotifyPollingSupported();
-
-  // Resets time for triggering dump to account for minimum time between the
-  // dumps.
-  void NotifyDumpTriggered();
-
-  // Disables all triggers.
-  void DisableAllTriggers();
-
- private:
-  friend class MemoryDumpManagerTest;
-  FRIEND_TEST_ALL_PREFIXES(MemoryDumpManagerTest, TestPollingOnDumpThread);
-
-  // Helper class to schdule periodic memory dumps.
-  struct PeriodicTriggerState {
-    PeriodicTriggerState();
-    ~PeriodicTriggerState();
-
-    bool is_configured;
-
-    RepeatingTimer timer;
-    uint32_t dump_count;
-    uint32_t min_timer_period_ms;
-    uint32_t light_dumps_rate;
-    uint32_t heavy_dumps_rate;
-
-    uint32_t light_dump_period_ms;
-    uint32_t heavy_dump_period_ms;
-
-    DISALLOW_COPY_AND_ASSIGN(PeriodicTriggerState);
-  };
-
-  struct PollingTriggerState {
-    enum State {
-      CONFIGURED,  // Polling trigger was added.
-      ENABLED,     // Polling is running.
-      DISABLED     // Polling is disabled.
-    };
-
-    static const uint32_t kMaxNumMemorySamples = 50;
-
-    explicit PollingTriggerState(
-        scoped_refptr<SingleThreadTaskRunner> polling_task_runner);
-    ~PollingTriggerState();
-
-    // Helper to clear the tracked memory totals and poll count from last dump.
-    void ResetTotals();
-
-    State current_state;
-    MemoryDumpLevelOfDetail level_of_detail;
-
-    scoped_refptr<SingleThreadTaskRunner> polling_task_runner;
-    uint32_t polling_interval_ms;
-
-    // Minimum numer of polls after the last dump at which next dump can be
-    // triggered.
-    int min_polls_between_dumps;
-    int num_polls_from_last_dump;
-
-    uint64_t last_dump_memory_total;
-    int64_t memory_increase_threshold;
-    uint64_t last_memory_totals_kb[kMaxNumMemorySamples];
-    uint32_t last_memory_totals_kb_index;
-
-    DISALLOW_COPY_AND_ASSIGN(PollingTriggerState);
-  };
-
-  // Helper to set polling disabled on the polling thread.
-  void DisablePolling();
-
-  // Periodically called by the timer.
-  void RequestPeriodicGlobalDump();
-
-  // Called for polling memory usage and trigger dumps if peak is detected.
-  void PollMemoryOnPollingThread();
-
-  // Returns true if peak memory value is detected.
-  bool ShouldTriggerDump(uint64_t current_memory_total);
-
-  // Helper to detect peaks in memory usage.
-  bool IsCurrentSamplePeak(uint64_t current_memory_total);
-
-  // Must be set before enabling tracing.
-  static void SetPollingIntervalForTesting(uint32_t interval);
-
-  // True if periodic dumping is enabled.
-  bool IsPeriodicTimerRunningForTesting();
-
-  MemoryDumpManager* mdm_;
-
-  PeriodicTriggerState periodic_state_;
-  PollingTriggerState polling_state_;
-
-  DISALLOW_COPY_AND_ASSIGN(MemoryDumpScheduler);
-};
-
-}  // namespace trace_event
-}  // namespace base
-
-#endif  // BASE_TRACE_EVENT_MEMORY_DUMP_SCHEDULER_H
diff --git a/base/trace_event/memory_dump_session_state.cc b/base/trace_event/memory_dump_session_state.cc
index d26b82a..b3d9a8c 100644
--- a/base/trace_event/memory_dump_session_state.cc
+++ b/base/trace_event/memory_dump_session_state.cc
@@ -7,8 +7,8 @@
 namespace base {
 namespace trace_event {
 
-MemoryDumpSessionState::MemoryDumpSessionState()
-    : heap_profiler_breakdown_threshold_bytes_(0) {}
+MemoryDumpSessionState::MemoryDumpSessionState() {}
+
 MemoryDumpSessionState::~MemoryDumpSessionState() {}
 
 void MemoryDumpSessionState::SetStackFrameDeduplicator(
@@ -23,14 +23,9 @@
   type_name_deduplicator_ = std::move(type_name_deduplicator);
 }
 
-void MemoryDumpSessionState::SetAllowedDumpModes(
-    std::set<MemoryDumpLevelOfDetail> allowed_dump_modes) {
-  allowed_dump_modes_ = allowed_dump_modes;
-}
-
-bool MemoryDumpSessionState::IsDumpModeAllowed(
-    MemoryDumpLevelOfDetail dump_mode) const {
-  return allowed_dump_modes_.count(dump_mode) != 0;
+void MemoryDumpSessionState::SetMemoryDumpConfig(
+    const TraceConfig::MemoryDumpConfig& config) {
+  memory_dump_config_ = config;
 }
 
 }  // namespace trace_event
diff --git a/base/trace_event/memory_dump_session_state.h b/base/trace_event/memory_dump_session_state.h
index 46092cb..f199ec1 100644
--- a/base/trace_event/memory_dump_session_state.h
+++ b/base/trace_event/memory_dump_session_state.h
@@ -6,12 +6,11 @@
 #define BASE_TRACE_EVENT_MEMORY_DUMP_SESSION_STATE_H_
 
 #include <memory>
-#include <set>
 
 #include "base/base_export.h"
 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
 #include "base/trace_event/heap_profiler_type_name_deduplicator.h"
-#include "base/trace_event/memory_dump_request_args.h"
+#include "base/trace_event/trace_config.h"
 
 namespace base {
 namespace trace_event {
@@ -41,18 +40,11 @@
   void SetTypeNameDeduplicator(
       std::unique_ptr<TypeNameDeduplicator> type_name_deduplicator);
 
-  void SetAllowedDumpModes(
-      std::set<MemoryDumpLevelOfDetail> allowed_dump_modes);
-
-  bool IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) const;
-
-  void set_heap_profiler_breakdown_threshold_bytes(uint32_t value) {
-    heap_profiler_breakdown_threshold_bytes_ = value;
+  const TraceConfig::MemoryDumpConfig& memory_dump_config() const {
+    return memory_dump_config_;
   }
 
-  uint32_t heap_profiler_breakdown_threshold_bytes() const {
-    return heap_profiler_breakdown_threshold_bytes_;
-  }
+  void SetMemoryDumpConfig(const TraceConfig::MemoryDumpConfig& config);
 
  private:
   friend class RefCountedThreadSafe<MemoryDumpSessionState>;
@@ -66,9 +58,9 @@
   // trace is finalized.
   std::unique_ptr<TypeNameDeduplicator> type_name_deduplicator_;
 
-  std::set<MemoryDumpLevelOfDetail> allowed_dump_modes_;
-
-  uint32_t heap_profiler_breakdown_threshold_bytes_;
+  // The memory dump config, copied at the time when the tracing session was
+  // started.
+  TraceConfig::MemoryDumpConfig memory_dump_config_;
 };
 
 }  // namespace trace_event
diff --git a/base/trace_event/memory_infra_background_whitelist.cc b/base/trace_event/memory_infra_background_whitelist.cc
index ae74322..aed187f 100644
--- a/base/trace_event/memory_infra_background_whitelist.cc
+++ b/base/trace_event/memory_infra_background_whitelist.cc
@@ -17,26 +17,20 @@
 // providers can be added here only if the background mode dump has very
 // less performance and memory overhead.
 const char* const kDumpProviderWhitelist[] = {
-    "android::ResourceManagerImpl",
     "BlinkGC",
-    "ClientDiscardableSharedMemoryManager",
+    "ChildDiscardableSharedMemoryManager",
     "DOMStorage",
-    "DiscardableSharedMemoryManager",
+    "HostDiscardableSharedMemoryManager",
     "IndexedDBBackingStore",
     "JavaHeap",
-    "LevelDB",
     "LeveldbValueStore",
     "Malloc",
-    "MemoryCache",
     "PartitionAlloc",
     "ProcessMemoryMetrics",
     "Skia",
     "Sql",
-    "URLRequestContext",
     "V8Isolate",
     "WinHeap",
-    "SyncDirectory",
-    "TabRestoreServiceHelper",
     nullptr  // End of list marker.
 };
 
@@ -52,7 +46,6 @@
     "java_heap",
     "java_heap/allocated_objects",
     "leveldb/index_db/0x?",
-    "leveldb/leveldb_proto/0x?",
     "leveldb/value_store/Extensions.Database.Open.Settings/0x?",
     "leveldb/value_store/Extensions.Database.Open.Rules/0x?",
     "leveldb/value_store/Extensions.Database.Open.State/0x?",
@@ -62,33 +55,14 @@
     "malloc",
     "malloc/allocated_objects",
     "malloc/metadata_fragmentation_caches",
-    "net/http_network_session_0x?",
-    "net/http_network_session_0x?/quic_stream_factory",
-    "net/http_network_session_0x?/socket_pool",
-    "net/http_network_session_0x?/spdy_session_pool",
-    "net/http_network_session_0x?/stream_factory",
-    "net/sdch_manager_0x?",
-    "net/ssl_session_cache",
-    "net/url_request_context_0x?",
-    "net/url_request_context_0x?/http_cache",
-    "net/url_request_context_0x?/http_network_session",
-    "net/url_request_context_0x?/sdch_manager",
-    "web_cache/Image_resources",
-    "web_cache/CSS stylesheet_resources",
-    "web_cache/Script_resources",
-    "web_cache/XSL stylesheet_resources",
-    "web_cache/Font_resources",
-    "web_cache/Other_resources",
     "partition_alloc/allocated_objects",
     "partition_alloc/partitions",
-    "partition_alloc/partitions/array_buffer",
     "partition_alloc/partitions/buffer",
     "partition_alloc/partitions/fast_malloc",
     "partition_alloc/partitions/layout",
     "skia/sk_glyph_cache",
     "skia/sk_resource_cache",
     "sqlite",
-    "ui/resource_manager_0x?",
     "v8/isolate_0x?/heap_spaces",
     "v8/isolate_0x?/heap_spaces/code_space",
     "v8/isolate_0x?/heap_spaces/large_object_space",
@@ -100,47 +74,6 @@
     "v8/isolate_0x?/zapped_for_debug",
     "winheap",
     "winheap/allocated_objects",
-    "sync/0x?/kernel",
-    "sync/0x?/store",
-    "sync/0x?/model_type/APP",
-    "sync/0x?/model_type/APP_LIST",
-    "sync/0x?/model_type/APP_NOTIFICATION",
-    "sync/0x?/model_type/APP_SETTING",
-    "sync/0x?/model_type/ARC_PACKAGE",
-    "sync/0x?/model_type/ARTICLE",
-    "sync/0x?/model_type/AUTOFILL",
-    "sync/0x?/model_type/AUTOFILL_PROFILE",
-    "sync/0x?/model_type/AUTOFILL_WALLET",
-    "sync/0x?/model_type/BOOKMARK",
-    "sync/0x?/model_type/DEVICE_INFO",
-    "sync/0x?/model_type/DICTIONARY",
-    "sync/0x?/model_type/EXPERIMENTS",
-    "sync/0x?/model_type/EXTENSION",
-    "sync/0x?/model_type/EXTENSION_SETTING",
-    "sync/0x?/model_type/FAVICON_IMAGE",
-    "sync/0x?/model_type/FAVICON_TRACKING",
-    "sync/0x?/model_type/HISTORY_DELETE_DIRECTIVE",
-    "sync/0x?/model_type/MANAGED_USER",
-    "sync/0x?/model_type/MANAGED_USER_SETTING",
-    "sync/0x?/model_type/MANAGED_USER_SHARED_SETTING",
-    "sync/0x?/model_type/MANAGED_USER_WHITELIST",
-    "sync/0x?/model_type/NIGORI",
-    "sync/0x?/model_type/PASSWORD",
-    "sync/0x?/model_type/PREFERENCE",
-    "sync/0x?/model_type/PRINTER",
-    "sync/0x?/model_type/PRIORITY_PREFERENCE",
-    "sync/0x?/model_type/READING_LIST",
-    "sync/0x?/model_type/SEARCH_ENGINE",
-    "sync/0x?/model_type/SESSION",
-    "sync/0x?/model_type/SYNCED_NOTIFICATION",
-    "sync/0x?/model_type/SYNCED_NOTIFICATION_APP_INFO",
-    "sync/0x?/model_type/THEME",
-    "sync/0x?/model_type/TYPED_URL",
-    "sync/0x?/model_type/WALLET_METADATA",
-    "sync/0x?/model_type/WIFI_CREDENTIAL",
-    "tab_restore/service_helper_0x?/entries",
-    "tab_restore/service_helper_0x?/entries/tab_0x?",
-    "tab_restore/service_helper_0x?/entries/window_0x?",
     nullptr  // End of list marker.
 };
 
diff --git a/base/trace_event/memory_usage_estimator.cc b/base/trace_event/memory_usage_estimator.cc
deleted file mode 100644
index c769d5b..0000000
--- a/base/trace_event/memory_usage_estimator.cc
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/memory_usage_estimator.h"
-
-namespace base {
-namespace trace_event {
-
-template size_t EstimateMemoryUsage(const std::string&);
-template size_t EstimateMemoryUsage(const string16&);
-
-}  // namespace trace_event
-}  // namespace base
diff --git a/base/trace_event/memory_usage_estimator.h b/base/trace_event/memory_usage_estimator.h
deleted file mode 100644
index db4ea69..0000000
--- a/base/trace_event/memory_usage_estimator.h
+++ /dev/null
@@ -1,549 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TRACE_EVENT_MEMORY_USAGE_ESTIMATOR_H_
-#define BASE_TRACE_EVENT_MEMORY_USAGE_ESTIMATOR_H_
-
-#include <stdint.h>
-
-#include <array>
-#include <deque>
-#include <list>
-#include <map>
-#include <memory>
-#include <queue>
-#include <set>
-#include <stack>
-#include <string>
-#include <type_traits>
-#include <unordered_map>
-#include <unordered_set>
-#include <vector>
-
-#include "base/base_export.h"
-#include "base/containers/linked_list.h"
-#include "base/strings/string16.h"
-#include "base/template_util.h"
-
-// Composable memory usage estimators.
-//
-// This file defines set of EstimateMemoryUsage(object) functions that return
-// approximate memory usage of their argument.
-//
-// The ultimate goal is to make memory usage estimation for a class simply a
-// matter of aggregating EstimateMemoryUsage() results over all fields.
-//
-// That is achieved via composability: if EstimateMemoryUsage() is defined
-// for T then EstimateMemoryUsage() is also defined for any combination of
-// containers holding T (e.g. std::map<int, std::vector<T>>).
-//
-// There are two ways of defining EstimateMemoryUsage() for a type:
-//
-// 1. As a global function 'size_t EstimateMemoryUsage(T)' in
-//    in base::trace_event namespace.
-//
-// 2. As 'size_t T::EstimateMemoryUsage() const' method. In this case
-//    EstimateMemoryUsage(T) function in base::trace_event namespace is
-//    provided automatically.
-//
-// Here is an example implementation:
-//
-// size_t foo::bar::MyClass::EstimateMemoryUsage() const {
-//   return base::trace_event::EstimateMemoryUsage(name_) +
-//          base::trace_event::EstimateMemoryUsage(id_) +
-//          base::trace_event::EstimateMemoryUsage(items_);
-// }
-//
-// The approach is simple: first call EstimateMemoryUsage() on all members,
-// then recursively fix compilation errors that are caused by types not
-// implementing EstimateMemoryUsage().
-
-namespace base {
-namespace trace_event {
-
-// Declarations
-
-// If T declares 'EstimateMemoryUsage() const' member function, then
-// global function EstimateMemoryUsage(T) is available, and just calls
-// the member function.
-template <class T>
-auto EstimateMemoryUsage(const T& object)
-    -> decltype(object.EstimateMemoryUsage());
-
-// String
-
-template <class C, class T, class A>
-size_t EstimateMemoryUsage(const std::basic_string<C, T, A>& string);
-
-// Arrays
-
-template <class T, size_t N>
-size_t EstimateMemoryUsage(const std::array<T, N>& array);
-
-template <class T, size_t N>
-size_t EstimateMemoryUsage(T (&array)[N]);
-
-template <class T>
-size_t EstimateMemoryUsage(const T* array, size_t array_length);
-
-// std::unique_ptr
-
-template <class T, class D>
-size_t EstimateMemoryUsage(const std::unique_ptr<T, D>& ptr);
-
-template <class T, class D>
-size_t EstimateMemoryUsage(const std::unique_ptr<T[], D>& array,
-                           size_t array_length);
-
-// std::shared_ptr
-
-template <class T>
-size_t EstimateMemoryUsage(const std::shared_ptr<T>& ptr);
-
-// Containers
-
-template <class F, class S>
-size_t EstimateMemoryUsage(const std::pair<F, S>& pair);
-
-template <class T, class A>
-size_t EstimateMemoryUsage(const std::vector<T, A>& vector);
-
-template <class T, class A>
-size_t EstimateMemoryUsage(const std::list<T, A>& list);
-
-template <class T>
-size_t EstimateMemoryUsage(const base::LinkedList<T>& list);
-
-template <class T, class C, class A>
-size_t EstimateMemoryUsage(const std::set<T, C, A>& set);
-
-template <class T, class C, class A>
-size_t EstimateMemoryUsage(const std::multiset<T, C, A>& set);
-
-template <class K, class V, class C, class A>
-size_t EstimateMemoryUsage(const std::map<K, V, C, A>& map);
-
-template <class K, class V, class C, class A>
-size_t EstimateMemoryUsage(const std::multimap<K, V, C, A>& map);
-
-template <class T, class H, class KE, class A>
-size_t EstimateMemoryUsage(const std::unordered_set<T, H, KE, A>& set);
-
-template <class T, class H, class KE, class A>
-size_t EstimateMemoryUsage(const std::unordered_multiset<T, H, KE, A>& set);
-
-template <class K, class V, class H, class KE, class A>
-size_t EstimateMemoryUsage(const std::unordered_map<K, V, H, KE, A>& map);
-
-template <class K, class V, class H, class KE, class A>
-size_t EstimateMemoryUsage(const std::unordered_multimap<K, V, H, KE, A>& map);
-
-template <class T, class A>
-size_t EstimateMemoryUsage(const std::deque<T, A>& deque);
-
-template <class T, class C>
-size_t EstimateMemoryUsage(const std::queue<T, C>& queue);
-
-template <class T, class C>
-size_t EstimateMemoryUsage(const std::priority_queue<T, C>& queue);
-
-template <class T, class C>
-size_t EstimateMemoryUsage(const std::stack<T, C>& stack);
-
-// TODO(dskiba):
-//   std::forward_list
-
-// Definitions
-
-namespace internal {
-
-// HasEMU<T>::value is true iff EstimateMemoryUsage(T) is available.
-// (This is the default version, which is false.)
-template <class T, class X = void>
-struct HasEMU : std::false_type {};
-
-// This HasEMU specialization is only picked up if there exists function
-// EstimateMemoryUsage(const T&) that returns size_t. Simpler ways to
-// achieve this don't work on MSVC.
-template <class T>
-struct HasEMU<
-    T,
-    typename std::enable_if<std::is_same<
-        size_t,
-        decltype(EstimateMemoryUsage(std::declval<const T&>()))>::value>::type>
-    : std::true_type {};
-
-// EMUCaller<T> does three things:
-// 1. Defines Call() method that calls EstimateMemoryUsage(T) if it's
-//    available.
-// 2. If EstimateMemoryUsage(T) is not available, but T has trivial dtor
-//    (i.e. it's POD, integer, pointer, enum, etc.) then it defines Call()
-//    method that returns 0. This is useful for containers, which allocate
-//    memory regardless of T (also for cases like std::map<int, MyClass>).
-// 3. Finally, if EstimateMemoryUsage(T) is not available, then it triggers
-//    a static_assert with a helpful message. That cuts numbers of errors
-//    considerably - if you just call EstimateMemoryUsage(T) but it's not
-//    available for T, then compiler will helpfully list *all* possible
-//    variants of it, with an explanation for each.
-template <class T, class X = void>
-struct EMUCaller {
-  // std::is_same<> below makes static_assert depend on T, in order to
-  // prevent it from asserting regardless instantiation.
-  static_assert(std::is_same<T, std::false_type>::value,
-                "Neither global function 'size_t EstimateMemoryUsage(T)' "
-                "nor member function 'size_t T::EstimateMemoryUsage() const' "
-                "is defined for the type.");
-
-  static size_t Call(const T&) { return 0; }
-};
-
-template <class T>
-struct EMUCaller<T, typename std::enable_if<HasEMU<T>::value>::type> {
-  static size_t Call(const T& value) { return EstimateMemoryUsage(value); }
-};
-
-template <class T>
-struct EMUCaller<
-    T,
-    typename std::enable_if<!HasEMU<T>::value &&
-                            is_trivially_destructible<T>::value>::type> {
-  static size_t Call(const T&) { return 0; }
-};
-
-// Returns reference to the underlying container of a container adapter.
-// Works for std::stack, std::queue and std::priority_queue.
-template <class A>
-const typename A::container_type& GetUnderlyingContainer(const A& adapter) {
-  struct ExposedAdapter : A {
-    using A::c;
-  };
-  return adapter.*&ExposedAdapter::c;
-}
-
-}  // namespace internal
-
-// Proxy that deducts T and calls EMUCaller<T>.
-// To be used by EstimateMemoryUsage() implementations for containers.
-template <class T>
-size_t EstimateItemMemoryUsage(const T& value) {
-  return internal::EMUCaller<T>::Call(value);
-}
-
-template <class I>
-size_t EstimateIterableMemoryUsage(const I& iterable) {
-  size_t memory_usage = 0;
-  for (const auto& item : iterable) {
-    memory_usage += EstimateItemMemoryUsage(item);
-  }
-  return memory_usage;
-}
-
-// Global EstimateMemoryUsage(T) that just calls T::EstimateMemoryUsage().
-template <class T>
-auto EstimateMemoryUsage(const T& object)
-    -> decltype(object.EstimateMemoryUsage()) {
-  static_assert(
-      std::is_same<decltype(object.EstimateMemoryUsage()), size_t>::value,
-      "'T::EstimateMemoryUsage() const' must return size_t.");
-  return object.EstimateMemoryUsage();
-}
-
-// String
-
-template <class C, class T, class A>
-size_t EstimateMemoryUsage(const std::basic_string<C, T, A>& string) {
-  using string_type = std::basic_string<C, T, A>;
-  using value_type = typename string_type::value_type;
-  // C++11 doesn't leave much room for implementors - std::string can
-  // use short string optimization, but that's about it. We detect SSO
-  // by checking that c_str() points inside |string|.
-  const uint8_t* cstr = reinterpret_cast<const uint8_t*>(string.c_str());
-  const uint8_t* inline_cstr = reinterpret_cast<const uint8_t*>(&string);
-  if (cstr >= inline_cstr && cstr < inline_cstr + sizeof(string)) {
-    // SSO string
-    return 0;
-  }
-  return (string.capacity() + 1) * sizeof(value_type);
-}
-
-// Use explicit instantiations from the .cc file (reduces bloat).
-extern template BASE_EXPORT size_t EstimateMemoryUsage(const std::string&);
-extern template BASE_EXPORT size_t EstimateMemoryUsage(const string16&);
-
-// Arrays
-
-template <class T, size_t N>
-size_t EstimateMemoryUsage(const std::array<T, N>& array) {
-  return EstimateIterableMemoryUsage(array);
-}
-
-template <class T, size_t N>
-size_t EstimateMemoryUsage(T (&array)[N]) {
-  return EstimateIterableMemoryUsage(array);
-}
-
-template <class T>
-size_t EstimateMemoryUsage(const T* array, size_t array_length) {
-  size_t memory_usage = sizeof(T) * array_length;
-  for (size_t i = 0; i != array_length; ++i) {
-    memory_usage += EstimateItemMemoryUsage(array[i]);
-  }
-  return memory_usage;
-}
-
-// std::unique_ptr
-
-template <class T, class D>
-size_t EstimateMemoryUsage(const std::unique_ptr<T, D>& ptr) {
-  return ptr ? (sizeof(T) + EstimateItemMemoryUsage(*ptr)) : 0;
-}
-
-template <class T, class D>
-size_t EstimateMemoryUsage(const std::unique_ptr<T[], D>& array,
-                           size_t array_length) {
-  return EstimateMemoryUsage(array.get(), array_length);
-}
-
-// std::shared_ptr
-
-template <class T>
-size_t EstimateMemoryUsage(const std::shared_ptr<T>& ptr) {
-  auto use_count = ptr.use_count();
-  if (use_count == 0) {
-    return 0;
-  }
-  // Model shared_ptr after libc++,
-  // see __shared_ptr_pointer from include/memory
-  struct SharedPointer {
-    void* vtbl;
-    long shared_owners;
-    long shared_weak_owners;
-    T* value;
-  };
-  // If object of size S shared N > S times we prefer to (potentially)
-  // overestimate than to return 0.
-  return sizeof(SharedPointer) +
-         (EstimateItemMemoryUsage(*ptr) + (use_count - 1)) / use_count;
-}
-
-// std::pair
-
-template <class F, class S>
-size_t EstimateMemoryUsage(const std::pair<F, S>& pair) {
-  return EstimateItemMemoryUsage(pair.first) +
-         EstimateItemMemoryUsage(pair.second);
-}
-
-// std::vector
-
-template <class T, class A>
-size_t EstimateMemoryUsage(const std::vector<T, A>& vector) {
-  return sizeof(T) * vector.capacity() + EstimateIterableMemoryUsage(vector);
-}
-
-// std::list
-
-template <class T, class A>
-size_t EstimateMemoryUsage(const std::list<T, A>& list) {
-  using value_type = typename std::list<T, A>::value_type;
-  struct Node {
-    Node* prev;
-    Node* next;
-    value_type value;
-  };
-  return sizeof(Node) * list.size() +
-         EstimateIterableMemoryUsage(list);
-}
-
-template <class T>
-size_t EstimateMemoryUsage(const base::LinkedList<T>& list) {
-  size_t memory_usage = 0u;
-  for (base::LinkNode<T>* node = list.head(); node != list.end();
-       node = node->next()) {
-    // Since we increment by calling node = node->next() we know that node
-    // isn't nullptr.
-    memory_usage += EstimateMemoryUsage(*node->value()) + sizeof(T);
-  }
-  return memory_usage;
-}
-
-// Tree containers
-
-template <class V>
-size_t EstimateTreeMemoryUsage(size_t size) {
-  // Tree containers are modeled after libc++
-  // (__tree_node from include/__tree)
-  struct Node {
-    Node* left;
-    Node* right;
-    Node* parent;
-    bool is_black;
-    V value;
-  };
-  return sizeof(Node) * size;
-}
-
-template <class T, class C, class A>
-size_t EstimateMemoryUsage(const std::set<T, C, A>& set) {
-  using value_type = typename std::set<T, C, A>::value_type;
-  return EstimateTreeMemoryUsage<value_type>(set.size()) +
-         EstimateIterableMemoryUsage(set);
-}
-
-template <class T, class C, class A>
-size_t EstimateMemoryUsage(const std::multiset<T, C, A>& set) {
-  using value_type = typename std::multiset<T, C, A>::value_type;
-  return EstimateTreeMemoryUsage<value_type>(set.size()) +
-         EstimateIterableMemoryUsage(set);
-}
-
-template <class K, class V, class C, class A>
-size_t EstimateMemoryUsage(const std::map<K, V, C, A>& map) {
-  using value_type = typename std::map<K, V, C, A>::value_type;
-  return EstimateTreeMemoryUsage<value_type>(map.size()) +
-         EstimateIterableMemoryUsage(map);
-}
-
-template <class K, class V, class C, class A>
-size_t EstimateMemoryUsage(const std::multimap<K, V, C, A>& map) {
-  using value_type = typename std::multimap<K, V, C, A>::value_type;
-  return EstimateTreeMemoryUsage<value_type>(map.size()) +
-         EstimateIterableMemoryUsage(map);
-}
-
-// HashMap containers
-
-namespace internal {
-
-// While hashtable containers model doesn't depend on STL implementation, one
-// detail still crept in: bucket_count. It's used in size estimation, but its
-// value after inserting N items is not predictable.
-// This function is specialized by unittests to return constant value, thus
-// excluding bucket_count from testing.
-template <class V>
-size_t HashMapBucketCountForTesting(size_t bucket_count) {
-  return bucket_count;
-}
-
-}  // namespace internal
-
-template <class V>
-size_t EstimateHashMapMemoryUsage(size_t bucket_count, size_t size) {
-  // Hashtable containers are modeled after libc++
-  // (__hash_node from include/__hash_table)
-  struct Node {
-    void* next;
-    size_t hash;
-    V value;
-  };
-  using Bucket = void*;
-  bucket_count = internal::HashMapBucketCountForTesting<V>(bucket_count);
-  return sizeof(Bucket) * bucket_count + sizeof(Node) * size;
-}
-
-template <class K, class H, class KE, class A>
-size_t EstimateMemoryUsage(const std::unordered_set<K, H, KE, A>& set) {
-  using value_type = typename std::unordered_set<K, H, KE, A>::value_type;
-  return EstimateHashMapMemoryUsage<value_type>(set.bucket_count(),
-                                                set.size()) +
-         EstimateIterableMemoryUsage(set);
-}
-
-template <class K, class H, class KE, class A>
-size_t EstimateMemoryUsage(const std::unordered_multiset<K, H, KE, A>& set) {
-  using value_type = typename std::unordered_multiset<K, H, KE, A>::value_type;
-  return EstimateHashMapMemoryUsage<value_type>(set.bucket_count(),
-                                                set.size()) +
-         EstimateIterableMemoryUsage(set);
-}
-
-template <class K, class V, class H, class KE, class A>
-size_t EstimateMemoryUsage(const std::unordered_map<K, V, H, KE, A>& map) {
-  using value_type = typename std::unordered_map<K, V, H, KE, A>::value_type;
-  return EstimateHashMapMemoryUsage<value_type>(map.bucket_count(),
-                                                map.size()) +
-         EstimateIterableMemoryUsage(map);
-}
-
-template <class K, class V, class H, class KE, class A>
-size_t EstimateMemoryUsage(const std::unordered_multimap<K, V, H, KE, A>& map) {
-  using value_type =
-      typename std::unordered_multimap<K, V, H, KE, A>::value_type;
-  return EstimateHashMapMemoryUsage<value_type>(map.bucket_count(),
-                                                map.size()) +
-         EstimateIterableMemoryUsage(map);
-}
-
-// std::deque
-
-template <class T, class A>
-size_t EstimateMemoryUsage(const std::deque<T, A>& deque) {
-// Since std::deque implementations are wildly different
-// (see crbug.com/674287), we can't have one "good enough"
-// way to estimate.
-
-// kBlockSize      - minimum size of a block, in bytes
-// kMinBlockLength - number of elements in a block
-//                   if sizeof(T) > kBlockSize
-#if defined(_LIBCPP_VERSION)
-  size_t kBlockSize = 4096;
-  size_t kMinBlockLength = 16;
-#elif defined(__GLIBCXX__)
-  size_t kBlockSize = 512;
-  size_t kMinBlockLength = 1;
-#elif defined(_MSC_VER)
-  size_t kBlockSize = 16;
-  size_t kMinBlockLength = 1;
-#else
-  size_t kBlockSize = 0;
-  size_t kMinBlockLength = 1;
-#endif
-
-  size_t block_length =
-      (sizeof(T) > kBlockSize) ? kMinBlockLength : kBlockSize / sizeof(T);
-
-  size_t blocks = (deque.size() + block_length - 1) / block_length;
-
-#if defined(__GLIBCXX__)
-  // libstdc++: deque always has at least one block
-  if (!blocks)
-    blocks = 1;
-#endif
-
-#if defined(_LIBCPP_VERSION)
-  // libc++: deque keeps at most two blocks when it shrinks,
-  // so even if the size is zero, deque might be holding up
-  // to 4096 * 2 bytes. One way to know whether deque has
-  // ever allocated (and hence has 1 or 2 blocks) is to check
-  // iterator's pointer. Non-zero value means that deque has
-  // at least one block.
-  if (!blocks && deque.begin().operator->())
-    blocks = 1;
-#endif
-
-  return (blocks * block_length * sizeof(T)) +
-         EstimateIterableMemoryUsage(deque);
-}
-
-// Container adapters
-
-template <class T, class C>
-size_t EstimateMemoryUsage(const std::queue<T, C>& queue) {
-  return EstimateMemoryUsage(internal::GetUnderlyingContainer(queue));
-}
-
-template <class T, class C>
-size_t EstimateMemoryUsage(const std::priority_queue<T, C>& queue) {
-  return EstimateMemoryUsage(internal::GetUnderlyingContainer(queue));
-}
-
-template <class T, class C>
-size_t EstimateMemoryUsage(const std::stack<T, C>& stack) {
-  return EstimateMemoryUsage(internal::GetUnderlyingContainer(stack));
-}
-
-}  // namespace trace_event
-}  // namespace base
-
-#endif  // BASE_TRACE_EVENT_MEMORY_USAGE_ESTIMATOR_H_
diff --git a/base/trace_event/memory_usage_estimator_unittest.cc b/base/trace_event/memory_usage_estimator_unittest.cc
deleted file mode 100644
index 80237c0..0000000
--- a/base/trace_event/memory_usage_estimator_unittest.cc
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/memory_usage_estimator.h"
-
-#include <stdlib.h>
-
-#include "base/memory/ptr_util.h"
-#include "base/strings/string16.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-#if defined(ARCH_CPU_64_BITS)
-#define EXPECT_EQ_32_64(_, e, a) EXPECT_EQ(e, a)
-#else
-#define EXPECT_EQ_32_64(e, _, a) EXPECT_EQ(e, a)
-#endif
-
-namespace base {
-namespace trace_event {
-
-namespace {
-
-// Test class with predictable memory usage.
-class Data {
- public:
-  explicit Data(size_t size = 17): size_(size) {
-  }
-
-  size_t size() const { return size_; }
-
-  size_t EstimateMemoryUsage() const {
-    return size_;
-  }
-
-  bool operator < (const Data& other) const {
-    return size_ < other.size_;
-  }
-  bool operator == (const Data& other) const {
-    return size_ == other.size_;
-  }
-
-  struct Hasher {
-    size_t operator () (const Data& data) const {
-      return data.size();
-    }
-  };
-
- private:
-  size_t size_;
-};
-
-}  // namespace
-
-namespace internal {
-
-// This kills variance of bucket_count across STL implementations.
-template <>
-size_t HashMapBucketCountForTesting<Data>(size_t) {
-  return 10;
-}
-template <>
-size_t HashMapBucketCountForTesting<std::pair<const Data, short>>(size_t) {
-  return 10;
-}
-
-}  // namespace internal
-
-TEST(EstimateMemoryUsageTest, String) {
-  std::string string(777, 'a');
-  EXPECT_EQ(string.capacity() + 1, EstimateMemoryUsage(string));
-}
-
-TEST(EstimateMemoryUsageTest, String16) {
-  string16 string(777, 'a');
-  EXPECT_EQ(sizeof(char16) * (string.capacity() + 1),
-            EstimateMemoryUsage(string));
-}
-
-TEST(EstimateMemoryUsageTest, Arrays) {
-  // std::array
-  {
-    std::array<Data, 10> array;
-    EXPECT_EQ(170u, EstimateMemoryUsage(array));
-  }
-
-  // T[N]
-  {
-    Data array[10];
-    EXPECT_EQ(170u, EstimateMemoryUsage(array));
-  }
-
-  // C array
-  {
-    struct Item {
-      char payload[10];
-    };
-    Item* array = new Item[7];
-    EXPECT_EQ(70u, EstimateMemoryUsage(array, 7));
-    delete[] array;
-  }
-}
-
-TEST(EstimateMemoryUsageTest, UniquePtr) {
-  // Empty
-  {
-    std::unique_ptr<Data> ptr;
-    EXPECT_EQ(0u, EstimateMemoryUsage(ptr));
-  }
-
-  // Not empty
-  {
-    std::unique_ptr<Data> ptr(new Data());
-    EXPECT_EQ_32_64(21u, 25u, EstimateMemoryUsage(ptr));
-  }
-
-  // With a pointer
-  {
-    std::unique_ptr<Data*> ptr(new Data*());
-    EXPECT_EQ(sizeof(void*), EstimateMemoryUsage(ptr));
-  }
-
-  // With an array
-  {
-    struct Item {
-      uint32_t payload[10];
-    };
-    std::unique_ptr<Item[]> ptr(new Item[7]);
-    EXPECT_EQ(280u, EstimateMemoryUsage(ptr, 7));
-  }
-}
-
-TEST(EstimateMemoryUsageTest, Vector) {
-  std::vector<Data> vector;
-  vector.reserve(1000);
-
-  // For an empty vector we should return memory usage of its buffer
-  size_t capacity = vector.capacity();
-  size_t expected_size = capacity * sizeof(Data);
-  EXPECT_EQ(expected_size, EstimateMemoryUsage(vector));
-
-  // If vector is not empty, its size should also include memory usages
-  // of all elements.
-  for (size_t i = 0; i != capacity / 2; ++i) {
-    vector.push_back(Data(i));
-    expected_size += EstimateMemoryUsage(vector.back());
-  }
-  EXPECT_EQ(expected_size, EstimateMemoryUsage(vector));
-}
-
-TEST(EstimateMemoryUsageTest, List) {
-  struct POD {
-    short data;
-  };
-  std::list<POD> list;
-  for (int i = 0; i != 1000; ++i) {
-    list.push_back(POD());
-  }
-  EXPECT_EQ_32_64(12000u, 24000u, EstimateMemoryUsage(list));
-}
-
-TEST(EstimateMemoryUsageTest, Set) {
-  std::set<std::pair<int, Data>> set;
-  for (int i = 0; i != 1000; ++i) {
-    set.insert({i, Data(i)});
-  }
-  EXPECT_EQ_32_64(523500u, 547500u, EstimateMemoryUsage(set));
-}
-
-TEST(EstimateMemoryUsageTest, MultiSet) {
-  std::multiset<bool> set;
-  for (int i = 0; i != 1000; ++i) {
-    set.insert((i & 1) != 0);
-  }
-  EXPECT_EQ_32_64(16000u, 32000u, EstimateMemoryUsage(set));
-}
-
-TEST(EstimateMemoryUsageTest, Map) {
-  std::map<Data, int> map;
-  for (int i = 0; i != 1000; ++i) {
-    map.insert({Data(i), i});
-  }
-  EXPECT_EQ_32_64(523500u, 547500u, EstimateMemoryUsage(map));
-}
-
-TEST(EstimateMemoryUsageTest, MultiMap) {
-  std::multimap<char, Data> map;
-  for (int i = 0; i != 1000; ++i) {
-    map.insert({static_cast<char>(i), Data(i)});
-  }
-  EXPECT_EQ_32_64(523500u, 547500u, EstimateMemoryUsage(map));
-}
-
-TEST(EstimateMemoryUsageTest, UnorderedSet) {
-  std::unordered_set<Data, Data::Hasher> set;
-  for (int i = 0; i != 1000; ++i) {
-    set.insert(Data(i));
-  }
-  EXPECT_EQ_32_64(511540u, 523580u, EstimateMemoryUsage(set));
-}
-
-TEST(EstimateMemoryUsageTest, UnorderedMultiSet) {
-  std::unordered_multiset<Data, Data::Hasher> set;
-  for (int i = 0; i != 500; ++i) {
-    set.insert(Data(i));
-    set.insert(Data(i));
-  }
-  EXPECT_EQ_32_64(261540u, 273580u, EstimateMemoryUsage(set));
-}
-
-TEST(EstimateMemoryUsageTest, UnorderedMap) {
-  std::unordered_map<Data, short, Data::Hasher> map;
-  for (int i = 0; i != 1000; ++i) {
-    map.insert({Data(i), static_cast<short>(i)});
-  }
-  EXPECT_EQ_32_64(515540u, 531580u, EstimateMemoryUsage(map));
-}
-
-TEST(EstimateMemoryUsageTest, UnorderedMultiMap) {
-  std::unordered_multimap<Data, short, Data::Hasher> map;
-  for (int i = 0; i != 1000; ++i) {
-    map.insert({Data(i), static_cast<short>(i)});
-  }
-  EXPECT_EQ_32_64(515540u, 531580u, EstimateMemoryUsage(map));
-}
-
-TEST(EstimateMemoryUsageTest, Deque) {
-  std::deque<Data> deque;
-
-  // Pick a large value so that platform-specific accounting
-  // for deque's blocks is small compared to usage of all items.
-  constexpr size_t kDataSize = 100000;
-  for (int i = 0; i != 1500; ++i) {
-    deque.push_back(Data(kDataSize));
-  }
-
-  // Compare against a reasonable minimum (i.e. no overhead).
-  size_t min_expected_usage = deque.size() * (sizeof(Data) + kDataSize);
-  EXPECT_LE(min_expected_usage, EstimateMemoryUsage(deque));
-}
-
-}  // namespace trace_event
-}  // namespace base
diff --git a/base/trace_event/process_memory_dump.cc b/base/trace_event/process_memory_dump.cc
index 63d1340..8269892 100644
--- a/base/trace_event/process_memory_dump.cc
+++ b/base/trace_event/process_memory_dump.cc
@@ -18,7 +18,7 @@
 #include "build/build_config.h"
 
 #if defined(OS_IOS)
-#include <mach/vm_page_size.h>
+#include <sys/sysctl.h>
 #endif
 
 #if defined(OS_POSIX)
@@ -57,13 +57,19 @@
 size_t ProcessMemoryDump::GetSystemPageSize() {
 #if defined(OS_IOS)
   // On iOS, getpagesize() returns the user page sizes, but for allocating
-  // arrays for mincore(), kernel page sizes is needed. Use vm_kernel_page_size
-  // as recommended by Apple, https://forums.developer.apple.com/thread/47532/.
-  // Refer to http://crbug.com/542671 and Apple rdar://23651782
-  return vm_kernel_page_size;
-#else
-  return base::GetPageSize();
+  // arrays for mincore(), kernel page sizes is needed. sysctlbyname() should
+  // be used for this. Refer to crbug.com/542671 and Apple rdar://23651782
+  int pagesize;
+  size_t pagesize_len;
+  int status = sysctlbyname("vm.pagesize", NULL, &pagesize_len, nullptr, 0);
+  if (!status && pagesize_len == sizeof(pagesize)) {
+    if (!sysctlbyname("vm.pagesize", &pagesize, &pagesize_len, nullptr, 0))
+      return pagesize;
+  }
+  LOG(ERROR) << "sysctlbyname(\"vm.pagesize\") failed.";
+  // Falls back to getpagesize() although it may be wrong in certain cases.
 #endif  // defined(OS_IOS)
+  return base::GetPageSize();
 }
 
 // static
@@ -158,14 +164,14 @@
 MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
     const std::string& absolute_name) {
   return AddAllocatorDumpInternal(
-      MakeUnique<MemoryAllocatorDump>(absolute_name, this));
+      WrapUnique(new MemoryAllocatorDump(absolute_name, this)));
 }
 
 MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
     const std::string& absolute_name,
     const MemoryAllocatorDumpGuid& guid) {
   return AddAllocatorDumpInternal(
-      MakeUnique<MemoryAllocatorDump>(absolute_name, this, guid));
+      WrapUnique(new MemoryAllocatorDump(absolute_name, this, guid)));
 }
 
 MemoryAllocatorDump* ProcessMemoryDump::AddAllocatorDumpInternal(
diff --git a/base/trace_event/process_memory_dump.h b/base/trace_event/process_memory_dump.h
index 6f8d167..d020c7d 100644
--- a/base/trace_event/process_memory_dump.h
+++ b/base/trace_event/process_memory_dump.h
@@ -31,6 +31,7 @@
 namespace base {
 namespace trace_event {
 
+class MemoryDumpManager;
 class MemoryDumpSessionState;
 class TracedValue;
 
diff --git a/base/trace_event/trace_buffer.cc b/base/trace_event/trace_buffer.cc
index e26e9fd..d40f430 100644
--- a/base/trace_event/trace_buffer.cc
+++ b/base/trace_event/trace_buffer.cc
@@ -168,8 +168,7 @@
     // have to add the metadata events and flush thread-local buffers even if
     // the buffer is full.
     *index = chunks_.size();
-    // Put nullptr in the slot of a in-flight chunk.
-    chunks_.push_back(nullptr);
+    chunks_.push_back(NULL);  // Put NULL in the slot of a in-flight chunk.
     ++in_flight_chunk_count_;
     // + 1 because zero chunk_seq is not allowed.
     return std::unique_ptr<TraceBufferChunk>(
@@ -182,7 +181,7 @@
     DCHECK_LT(index, chunks_.size());
     DCHECK(!chunks_[index]);
     --in_flight_chunk_count_;
-    chunks_[index] = std::move(chunk);
+    chunks_[index] = chunk.release();
   }
 
   bool IsFull() const override { return chunks_.size() >= max_chunks_; }
@@ -199,7 +198,7 @@
   TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
     if (handle.chunk_index >= chunks_.size())
       return NULL;
-    TraceBufferChunk* chunk = chunks_[handle.chunk_index].get();
+    TraceBufferChunk* chunk = chunks_[handle.chunk_index];
     if (!chunk || chunk->seq() != handle.chunk_seq)
       return NULL;
     return chunk->GetEventAt(handle.event_index);
@@ -208,7 +207,7 @@
   const TraceBufferChunk* NextChunk() override {
     while (current_iteration_index_ < chunks_.size()) {
       // Skip in-flight chunks.
-      const TraceBufferChunk* chunk = chunks_[current_iteration_index_++].get();
+      const TraceBufferChunk* chunk = chunks_[current_iteration_index_++];
       if (chunk)
         return chunk;
     }
@@ -224,7 +223,7 @@
     overhead->Add("TraceBufferVector", chunks_ptr_vector_allocated_size,
                   chunks_ptr_vector_resident_size);
     for (size_t i = 0; i < chunks_.size(); ++i) {
-      TraceBufferChunk* chunk = chunks_[i].get();
+      TraceBufferChunk* chunk = chunks_[i];
       // Skip the in-flight (nullptr) chunks. They will be accounted by the
       // per-thread-local dumpers, see ThreadLocalEventBuffer::OnMemoryDump.
       if (chunk)
@@ -236,7 +235,7 @@
   size_t in_flight_chunk_count_;
   size_t current_iteration_index_;
   size_t max_chunks_;
-  std::vector<std::unique_ptr<TraceBufferChunk>> chunks_;
+  ScopedVector<TraceBufferChunk> chunks_;
 
   DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
 };
diff --git a/base/trace_event/trace_category.h b/base/trace_event/trace_category.h
deleted file mode 100644
index 5a7915a..0000000
--- a/base/trace_event/trace_category.h
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TRACE_EVENT_TRACE_CATEGORY_H_
-#define BASE_TRACE_EVENT_TRACE_CATEGORY_H_
-
-#include <stdint.h>
-
-namespace base {
-namespace trace_event {
-
-// Captures the state of an invidivual trace category. Nothing except tracing
-// internals (e.g., TraceLog) is supposed to have non-const Category pointers.
-struct TraceCategory {
-  // The TRACE_EVENT macros should only use this value as a bool.
-  // These enum values are effectively a public API and third_party projects
-  // depend on their value. Hence, never remove or recycle existing bits, unless
-  // you are sure that all the third-party projects that depend on this have
-  // been updated.
-  enum StateFlags : uint8_t {
-    ENABLED_FOR_RECORDING = 1 << 0,
-
-    // Not used anymore.
-    DEPRECATED_ENABLED_FOR_MONITORING = 1 << 1,
-    DEPRECATED_ENABLED_FOR_EVENT_CALLBACK = 1 << 2,
-
-    ENABLED_FOR_ETW_EXPORT = 1 << 3,
-    ENABLED_FOR_FILTERING = 1 << 4
-  };
-
-  static const TraceCategory* FromStatePtr(const uint8_t* state_ptr) {
-    static_assert(
-        offsetof(TraceCategory, state_) == 0,
-        "|state_| must be the first field of the TraceCategory class.");
-    return reinterpret_cast<const TraceCategory*>(state_ptr);
-  }
-
-  bool is_valid() const { return name_ != nullptr; }
-  void set_name(const char* name) { name_ = name; }
-  const char* name() const {
-    DCHECK(is_valid());
-    return name_;
-  }
-
-  // TODO(primiano): This is an intermediate solution to deal with the fact that
-  // today TRACE_EVENT* macros cache the state ptr. They should just cache the
-  // full TraceCategory ptr, which is immutable, and use these helper function
-  // here. This will get rid of the need of this awkward ptr getter completely.
-  const uint8_t* state_ptr() const {
-    return const_cast<const uint8_t*>(&state_);
-  }
-
-  uint8_t state() const {
-    return *const_cast<volatile const uint8_t*>(&state_);
-  }
-
-  bool is_enabled() const { return state() != 0; }
-
-  void set_state(uint8_t state) {
-    *const_cast<volatile uint8_t*>(&state_) = state;
-  }
-
-  void clear_state_flag(StateFlags flag) { set_state(state() & (~flag)); }
-  void set_state_flag(StateFlags flag) { set_state(state() | flag); }
-
-  uint32_t enabled_filters() const {
-    return *const_cast<volatile const uint32_t*>(&enabled_filters_);
-  }
-
-  bool is_filter_enabled(size_t index) const {
-    DCHECK(index < sizeof(enabled_filters_) * 8);
-    return (enabled_filters() & (1 << index)) != 0;
-  }
-
-  void set_enabled_filters(uint32_t enabled_filters) {
-    *const_cast<volatile uint32_t*>(&enabled_filters_) = enabled_filters;
-  }
-
-  void reset_for_testing() {
-    set_state(0);
-    set_enabled_filters(0);
-  }
-
-  // These fields should not be accessed directly, not even by tracing code.
-  // The only reason why these are not private is because it makes it impossible
-  // to have a global array of TraceCategory in category_registry.cc without
-  // creating initializers. See discussion on goo.gl/qhZN94 and
-  // crbug.com/{660967,660828}.
-
-  // The enabled state. TRACE_EVENT* macros will capture events if any of the
-  // flags here are set. Since TRACE_EVENTx macros are used in a lot of
-  // fast-paths, accesses to this field are non-barriered and racy by design.
-  // This field is mutated when starting/stopping tracing and we don't care
-  // about missing some events.
-  uint8_t state_;
-
-  // When ENABLED_FOR_FILTERING is set, this contains a bitmap to the
-  // coressponding filter (see event_filters.h).
-  uint32_t enabled_filters_;
-
-  // TraceCategory group names are long lived static strings.
-  const char* name_;
-};
-
-}  // namespace trace_event
-}  // namespace base
-
-#endif  // BASE_TRACE_EVENT_TRACE_CATEGORY_H_
diff --git a/base/trace_event/trace_config.cc b/base/trace_event/trace_config.cc
index 36de107..b343ea0 100644
--- a/base/trace_event/trace_config.cc
+++ b/base/trace_event/trace_config.cc
@@ -30,11 +30,13 @@
 const char kRecordContinuously[] = "record-continuously";
 const char kRecordAsMuchAsPossible[] = "record-as-much-as-possible";
 const char kTraceToConsole[] = "trace-to-console";
+const char kEnableSampling[] = "enable-sampling";
 const char kEnableSystrace[] = "enable-systrace";
 const char kEnableArgumentFilter[] = "enable-argument-filter";
 
 // String parameters that can be used to parse the trace config string.
 const char kRecordModeParam[] = "record_mode";
+const char kEnableSamplingParam[] = "enable_sampling";
 const char kEnableSystraceParam[] = "enable_systrace";
 const char kEnableArgumentFilterParam[] = "enable_argument_filter";
 const char kIncludedCategoriesParam[] = "included_categories";
@@ -48,32 +50,24 @@
 const char kMemoryDumpConfigParam[] = "memory_dump_config";
 const char kAllowedDumpModesParam[] = "allowed_dump_modes";
 const char kTriggersParam[] = "triggers";
-const char kTriggerModeParam[] = "mode";
-const char kMinTimeBetweenDumps[] = "min_time_between_dumps_ms";
-const char kTriggerTypeParam[] = "type";
-const char kPeriodicIntervalLegacyParam[] = "periodic_interval_ms";
+const char kPeriodicIntervalParam[] = "periodic_interval_ms";
+const char kModeParam[] = "mode";
 const char kHeapProfilerOptions[] = "heap_profiler_options";
 const char kBreakdownThresholdBytes[] = "breakdown_threshold_bytes";
 
-// String parameters used to parse category event filters.
-const char kEventFiltersParam[] = "event_filters";
-const char kFilterPredicateParam[] = "filter_predicate";
-const char kFilterArgsParam[] = "filter_args";
-
 // Default configuration of memory dumps.
 const TraceConfig::MemoryDumpConfig::Trigger kDefaultHeavyMemoryDumpTrigger = {
-    2000,  // min_time_between_dumps_ms
-    MemoryDumpLevelOfDetail::DETAILED, MemoryDumpType::PERIODIC_INTERVAL};
+    2000,  // periodic_interval_ms
+    MemoryDumpLevelOfDetail::DETAILED};
 const TraceConfig::MemoryDumpConfig::Trigger kDefaultLightMemoryDumpTrigger = {
-    250,  // min_time_between_dumps_ms
-    MemoryDumpLevelOfDetail::LIGHT, MemoryDumpType::PERIODIC_INTERVAL};
+    250,  // periodic_interval_ms
+    MemoryDumpLevelOfDetail::LIGHT};
 
 class ConvertableTraceConfigToTraceFormat
     : public base::trace_event::ConvertableToTraceFormat {
  public:
   explicit ConvertableTraceConfigToTraceFormat(const TraceConfig& trace_config)
       : trace_config_(trace_config) {}
-
   ~ConvertableTraceConfigToTraceFormat() override {}
 
   void AppendAsTraceFormat(std::string* out) const override {
@@ -121,94 +115,6 @@
   heap_profiler_options.Clear();
 }
 
-void TraceConfig::MemoryDumpConfig::Merge(
-    const TraceConfig::MemoryDumpConfig& config) {
-  triggers.insert(triggers.end(), config.triggers.begin(),
-                  config.triggers.end());
-  allowed_dump_modes.insert(config.allowed_dump_modes.begin(),
-                            config.allowed_dump_modes.end());
-  heap_profiler_options.breakdown_threshold_bytes =
-      std::min(heap_profiler_options.breakdown_threshold_bytes,
-               config.heap_profiler_options.breakdown_threshold_bytes);
-}
-
-TraceConfig::EventFilterConfig::EventFilterConfig(
-    const std::string& predicate_name)
-    : predicate_name_(predicate_name) {}
-
-TraceConfig::EventFilterConfig::~EventFilterConfig() {}
-
-TraceConfig::EventFilterConfig::EventFilterConfig(const EventFilterConfig& tc) {
-  *this = tc;
-}
-
-TraceConfig::EventFilterConfig& TraceConfig::EventFilterConfig::operator=(
-    const TraceConfig::EventFilterConfig& rhs) {
-  if (this == &rhs)
-    return *this;
-
-  predicate_name_ = rhs.predicate_name_;
-  included_categories_ = rhs.included_categories_;
-  excluded_categories_ = rhs.excluded_categories_;
-  if (rhs.args_)
-    args_ = rhs.args_->CreateDeepCopy();
-
-  return *this;
-}
-
-void TraceConfig::EventFilterConfig::AddIncludedCategory(
-    const std::string& category) {
-  included_categories_.push_back(category);
-}
-
-void TraceConfig::EventFilterConfig::AddExcludedCategory(
-    const std::string& category) {
-  excluded_categories_.push_back(category);
-}
-
-void TraceConfig::EventFilterConfig::SetArgs(
-    std::unique_ptr<base::DictionaryValue> args) {
-  args_ = std::move(args);
-}
-
-bool TraceConfig::EventFilterConfig::GetArgAsSet(
-    const char* key,
-    std::unordered_set<std::string>* out_set) const {
-  const ListValue* list = nullptr;
-  if (!args_->GetList(key, &list))
-    return false;
-  for (size_t i = 0; i < list->GetSize(); ++i) {
-    std::string value;
-    if (list->GetString(i, &value))
-      out_set->insert(value);
-  }
-  return true;
-}
-
-bool TraceConfig::EventFilterConfig::IsCategoryGroupEnabled(
-    const char* category_group_name) const {
-  CStringTokenizer category_group_tokens(
-      category_group_name, category_group_name + strlen(category_group_name),
-      ",");
-  while (category_group_tokens.GetNext()) {
-    std::string category_group_token = category_group_tokens.token();
-
-    for (const auto& excluded_category : excluded_categories_) {
-      if (base::MatchPattern(category_group_token, excluded_category)) {
-        return false;
-      }
-    }
-
-    for (const auto& included_category : included_categories_) {
-      if (base::MatchPattern(category_group_token, included_category)) {
-        return true;
-      }
-    }
-  }
-
-  return false;
-}
-
 TraceConfig::TraceConfig() {
   InitializeDefault();
 }
@@ -253,14 +159,14 @@
 
 TraceConfig::TraceConfig(const TraceConfig& tc)
     : record_mode_(tc.record_mode_),
+      enable_sampling_(tc.enable_sampling_),
       enable_systrace_(tc.enable_systrace_),
       enable_argument_filter_(tc.enable_argument_filter_),
       memory_dump_config_(tc.memory_dump_config_),
       included_categories_(tc.included_categories_),
       disabled_categories_(tc.disabled_categories_),
       excluded_categories_(tc.excluded_categories_),
-      synthetic_delays_(tc.synthetic_delays_),
-      event_filters_(tc.event_filters_) {}
+      synthetic_delays_(tc.synthetic_delays_) {}
 
 TraceConfig::~TraceConfig() {
 }
@@ -270,6 +176,7 @@
     return *this;
 
   record_mode_ = rhs.record_mode_;
+  enable_sampling_ = rhs.enable_sampling_;
   enable_systrace_ = rhs.enable_systrace_;
   enable_argument_filter_ = rhs.enable_argument_filter_;
   memory_dump_config_ = rhs.memory_dump_config_;
@@ -277,7 +184,6 @@
   disabled_categories_ = rhs.disabled_categories_;
   excluded_categories_ = rhs.excluded_categories_;
   synthetic_delays_ = rhs.synthetic_delays_;
-  event_filters_ = rhs.event_filters_;
   return *this;
 }
 
@@ -294,7 +200,7 @@
 
 std::unique_ptr<ConvertableToTraceFormat>
 TraceConfig::AsConvertableToTraceFormat() const {
-  return MakeUnique<ConvertableTraceConfigToTraceFormat>(*this);
+  return WrapUnique(new ConvertableTraceConfigToTraceFormat(*this));
 }
 
 std::string TraceConfig::ToCategoryFilterString() const {
@@ -365,6 +271,7 @@
 
 void TraceConfig::Merge(const TraceConfig& config) {
   if (record_mode_ != config.record_mode_
+      || enable_sampling_ != config.enable_sampling_
       || enable_systrace_ != config.enable_systrace_
       || enable_argument_filter_ != config.enable_argument_filter_) {
     DLOG(ERROR) << "Attempting to merge trace config with a different "
@@ -382,7 +289,9 @@
     included_categories_.clear();
   }
 
-  memory_dump_config_.Merge(config.memory_dump_config_);
+  memory_dump_config_.triggers.insert(memory_dump_config_.triggers.end(),
+                             config.memory_dump_config_.triggers.begin(),
+                             config.memory_dump_config_.triggers.end());
 
   disabled_categories_.insert(disabled_categories_.end(),
                               config.disabled_categories_.begin(),
@@ -393,12 +302,11 @@
   synthetic_delays_.insert(synthetic_delays_.end(),
                            config.synthetic_delays_.begin(),
                            config.synthetic_delays_.end());
-  event_filters_.insert(event_filters_.end(), config.event_filters().begin(),
-                        config.event_filters().end());
 }
 
 void TraceConfig::Clear() {
   record_mode_ = RECORD_UNTIL_FULL;
+  enable_sampling_ = false;
   enable_systrace_ = false;
   enable_argument_filter_ = false;
   included_categories_.clear();
@@ -406,11 +314,11 @@
   excluded_categories_.clear();
   synthetic_delays_.clear();
   memory_dump_config_.Clear();
-  event_filters_.clear();
 }
 
 void TraceConfig::InitializeDefault() {
   record_mode_ = RECORD_UNTIL_FULL;
+  enable_sampling_ = false;
   enable_systrace_ = false;
   enable_argument_filter_ = false;
 }
@@ -431,6 +339,7 @@
   }
 
   bool val;
+  enable_sampling_ = dict.GetBoolean(kEnableSamplingParam, &val) ? val : false;
   enable_systrace_ = dict.GetBoolean(kEnableSystraceParam, &val) ? val : false;
   enable_argument_filter_ =
       dict.GetBoolean(kEnableArgumentFilterParam, &val) ? val : false;
@@ -443,10 +352,6 @@
   if (dict.GetList(kSyntheticDelaysParam, &category_list))
     SetSyntheticDelaysFromList(*category_list);
 
-  const base::ListValue* category_event_filters = nullptr;
-  if (dict.GetList(kEventFiltersParam, &category_event_filters))
-    SetEventFiltersFromConfigList(*category_event_filters);
-
   if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
     // If dump triggers not set, the client is using the legacy with just
     // category enabled. So, use the default periodic dump config.
@@ -501,6 +406,7 @@
   }
 
   record_mode_ = RECORD_UNTIL_FULL;
+  enable_sampling_ = false;
   enable_systrace_ = false;
   enable_argument_filter_ = false;
   if (!trace_options_string.empty()) {
@@ -515,6 +421,8 @@
         record_mode_ = ECHO_TO_CONSOLE;
       } else if (token == kRecordAsMuchAsPossible) {
         record_mode_ = RECORD_AS_MUCH_AS_POSSIBLE;
+      } else if (token == kEnableSampling) {
+        enable_sampling_ = true;
       } else if (token == kEnableSystrace) {
         enable_systrace_ = true;
       } else if (token == kEnableArgumentFilter) {
@@ -608,26 +516,17 @@
       if (!trigger_list->GetDictionary(i, &trigger))
         continue;
 
-      MemoryDumpConfig::Trigger dump_config;
       int interval = 0;
-      if (!trigger->GetInteger(kMinTimeBetweenDumps, &interval)) {
-        // If "min_time_between_dumps_ms" param was not given, then the trace
-        // config uses old format where only periodic dumps are supported.
-        trigger->GetInteger(kPeriodicIntervalLegacyParam, &interval);
-        dump_config.trigger_type = MemoryDumpType::PERIODIC_INTERVAL;
-      } else {
-        std::string trigger_type_str;
-        trigger->GetString(kTriggerTypeParam, &trigger_type_str);
-        dump_config.trigger_type = StringToMemoryDumpType(trigger_type_str);
-      }
-      DCHECK_GT(interval, 0);
-      dump_config.min_time_between_dumps_ms = static_cast<uint32_t>(interval);
+      if (!trigger->GetInteger(kPeriodicIntervalParam, &interval))
+        continue;
 
+      DCHECK_GT(interval, 0);
+      MemoryDumpConfig::Trigger dump_config;
+      dump_config.periodic_interval_ms = static_cast<uint32_t>(interval);
       std::string level_of_detail_str;
-      trigger->GetString(kTriggerModeParam, &level_of_detail_str);
+      trigger->GetString(kModeParam, &level_of_detail_str);
       dump_config.level_of_detail =
           StringToMemoryDumpLevelOfDetail(level_of_detail_str);
-
       memory_dump_config_.triggers.push_back(dump_config);
     }
   }
@@ -656,50 +555,6 @@
   memory_dump_config_.allowed_dump_modes = GetDefaultAllowedMemoryDumpModes();
 }
 
-void TraceConfig::SetEventFiltersFromConfigList(
-    const base::ListValue& category_event_filters) {
-  event_filters_.clear();
-
-  for (size_t event_filter_index = 0;
-       event_filter_index < category_event_filters.GetSize();
-       ++event_filter_index) {
-    const base::DictionaryValue* event_filter = nullptr;
-    if (!category_event_filters.GetDictionary(event_filter_index,
-                                              &event_filter))
-      continue;
-
-    std::string predicate_name;
-    CHECK(event_filter->GetString(kFilterPredicateParam, &predicate_name))
-        << "Invalid predicate name in category event filter.";
-
-    EventFilterConfig new_config(predicate_name);
-    const base::ListValue* included_list = nullptr;
-    CHECK(event_filter->GetList(kIncludedCategoriesParam, &included_list))
-        << "Missing included_categories in category event filter.";
-
-    for (size_t i = 0; i < included_list->GetSize(); ++i) {
-      std::string category;
-      if (included_list->GetString(i, &category))
-        new_config.AddIncludedCategory(category);
-    }
-
-    const base::ListValue* excluded_list = nullptr;
-    if (event_filter->GetList(kExcludedCategoriesParam, &excluded_list)) {
-      for (size_t i = 0; i < excluded_list->GetSize(); ++i) {
-        std::string category;
-        if (excluded_list->GetString(i, &category))
-          new_config.AddExcludedCategory(category);
-      }
-    }
-
-    const base::DictionaryValue* args_dict = nullptr;
-    if (event_filter->GetDictionary(kFilterArgsParam, &args_dict))
-      new_config.SetArgs(args_dict->CreateDeepCopy());
-
-    event_filters_.push_back(new_config);
-  }
-}
-
 std::unique_ptr<DictionaryValue> TraceConfig::ToDict() const {
   auto dict = MakeUnique<DictionaryValue>();
   switch (record_mode_) {
@@ -719,6 +574,7 @@
       NOTREACHED();
   }
 
+  dict->SetBoolean(kEnableSamplingParam, enable_sampling_);
   dict->SetBoolean(kEnableSystraceParam, enable_systrace_);
   dict->SetBoolean(kEnableArgumentFilterParam, enable_argument_filter_);
 
@@ -730,41 +586,6 @@
   AddCategoryToDict(dict.get(), kExcludedCategoriesParam, excluded_categories_);
   AddCategoryToDict(dict.get(), kSyntheticDelaysParam, synthetic_delays_);
 
-  if (!event_filters_.empty()) {
-    std::unique_ptr<base::ListValue> filter_list(new base::ListValue());
-    for (const EventFilterConfig& filter : event_filters_) {
-      std::unique_ptr<base::DictionaryValue> filter_dict(
-          new base::DictionaryValue());
-      filter_dict->SetString(kFilterPredicateParam, filter.predicate_name());
-
-      std::unique_ptr<base::ListValue> included_categories_list(
-          new base::ListValue());
-      for (const std::string& included_category : filter.included_categories())
-        included_categories_list->AppendString(included_category);
-
-      filter_dict->Set(kIncludedCategoriesParam,
-                       std::move(included_categories_list));
-
-      if (!filter.excluded_categories().empty()) {
-        std::unique_ptr<base::ListValue> excluded_categories_list(
-            new base::ListValue());
-        for (const std::string& excluded_category :
-             filter.excluded_categories())
-          excluded_categories_list->AppendString(excluded_category);
-
-        filter_dict->Set(kExcludedCategoriesParam,
-                         std::move(excluded_categories_list));
-      }
-
-      if (filter.filter_args())
-        filter_dict->Set(kFilterArgsParam,
-                         filter.filter_args()->CreateDeepCopy());
-
-      filter_list->Append(std::move(filter_dict));
-    }
-    dict->Set(kEventFiltersParam, std::move(filter_list));
-  }
-
   if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
     auto allowed_modes = MakeUnique<ListValue>();
     for (auto dump_mode : memory_dump_config_.allowed_dump_modes)
@@ -776,14 +597,10 @@
     auto triggers_list = MakeUnique<ListValue>();
     for (const auto& config : memory_dump_config_.triggers) {
       auto trigger_dict = MakeUnique<DictionaryValue>();
-      trigger_dict->SetString(kTriggerTypeParam,
-                              MemoryDumpTypeToString(config.trigger_type));
-      trigger_dict->SetInteger(
-          kMinTimeBetweenDumps,
-          static_cast<int>(config.min_time_between_dumps_ms));
+      trigger_dict->SetInteger(kPeriodicIntervalParam,
+                               static_cast<int>(config.periodic_interval_ms));
       trigger_dict->SetString(
-          kTriggerModeParam,
-          MemoryDumpLevelOfDetailToString(config.level_of_detail));
+          kModeParam, MemoryDumpLevelOfDetailToString(config.level_of_detail));
       triggers_list->Append(std::move(trigger_dict));
     }
 
@@ -822,6 +639,8 @@
     default:
       NOTREACHED();
   }
+  if (enable_sampling_)
+    ret = ret + "," + kEnableSampling;
   if (enable_systrace_)
     ret = ret + "," + kEnableSystrace;
   if (enable_argument_filter_)
diff --git a/base/trace_event/trace_config.h b/base/trace_event/trace_config.h
index 717c261..91d6f1f 100644
--- a/base/trace_event/trace_config.h
+++ b/base/trace_event/trace_config.h
@@ -7,10 +7,8 @@
 
 #include <stdint.h>
 
-#include <memory>
 #include <set>
 #include <string>
-#include <unordered_set>
 #include <vector>
 
 #include "base/base_export.h"
@@ -53,9 +51,8 @@
 
     // Specifies the triggers in the memory dump config.
     struct Trigger {
-      uint32_t min_time_between_dumps_ms;
+      uint32_t periodic_interval_ms;
       MemoryDumpLevelOfDetail level_of_detail;
-      MemoryDumpType trigger_type;
     };
 
     // Specifies the configuration options for the heap profiler.
@@ -74,8 +71,6 @@
     // Reset the values in the config.
     void Clear();
 
-    void Merge(const MemoryDumpConfig& config);
-
     // Set of memory dump modes allowed for the tracing session. The explicitly
     // triggered dumps will be successful only if the dump mode is allowed in
     // the config.
@@ -85,39 +80,6 @@
     HeapProfiler heap_profiler_options;
   };
 
-  class BASE_EXPORT EventFilterConfig {
-   public:
-    EventFilterConfig(const std::string& predicate_name);
-    EventFilterConfig(const EventFilterConfig& tc);
-
-    ~EventFilterConfig();
-
-    EventFilterConfig& operator=(const EventFilterConfig& rhs);
-
-    void AddIncludedCategory(const std::string& category);
-    void AddExcludedCategory(const std::string& category);
-    void SetArgs(std::unique_ptr<base::DictionaryValue> args);
-    bool GetArgAsSet(const char* key, std::unordered_set<std::string>*) const;
-
-    bool IsCategoryGroupEnabled(const char* category_group_name) const;
-
-    const std::string& predicate_name() const { return predicate_name_; }
-    base::DictionaryValue* filter_args() const { return args_.get(); }
-    const StringList& included_categories() const {
-      return included_categories_;
-    }
-    const StringList& excluded_categories() const {
-      return excluded_categories_;
-    }
-
-   private:
-    std::string predicate_name_;
-    StringList included_categories_;
-    StringList excluded_categories_;
-    std::unique_ptr<base::DictionaryValue> args_;
-  };
-  typedef std::vector<EventFilterConfig> EventFilters;
-
   TraceConfig();
 
   // Create TraceConfig object from category filter and trace options strings.
@@ -131,22 +93,22 @@
   //
   // |trace_options_string| is a comma-delimited list of trace options.
   // Possible options are: "record-until-full", "record-continuously",
-  // "record-as-much-as-possible", "trace-to-console", "enable-systrace" and
-  // "enable-argument-filter".
+  // "record-as-much-as-possible", "trace-to-console", "enable-sampling",
+  // "enable-systrace" and "enable-argument-filter".
   // The first 4 options are trace recoding modes and hence
   // mutually exclusive. If more than one trace recording modes appear in the
   // options_string, the last one takes precedence. If none of the trace
   // recording mode is specified, recording mode is RECORD_UNTIL_FULL.
   //
   // The trace option will first be reset to the default option
-  // (record_mode set to RECORD_UNTIL_FULL, enable_systrace and
-  // enable_argument_filter set to false) before options parsed from
+  // (record_mode set to RECORD_UNTIL_FULL, enable_sampling, enable_systrace,
+  // and enable_argument_filter set to false) before options parsed from
   // |trace_options_string| are applied on it. If |trace_options_string| is
   // invalid, the final state of trace options is undefined.
   //
   // Example: TraceConfig("test_MyTest*", "record-until-full");
   // Example: TraceConfig("test_MyTest*,test_OtherStuff",
-  //                      "record-continuously");
+  //                      "record-continuously, enable-sampling");
   // Example: TraceConfig("-excluded_category1,-excluded_category2",
   //                      "record-until-full, trace-to-console");
   //          would set ECHO_TO_CONSOLE as the recording mode.
@@ -176,6 +138,7 @@
   // Example:
   //   {
   //     "record_mode": "record-continuously",
+  //     "enable_sampling": true,
   //     "enable_systrace": true,
   //     "enable_argument_filter": true,
   //     "included_categories": ["included",
@@ -211,10 +174,12 @@
   const StringList& GetSyntheticDelayValues() const;
 
   TraceRecordMode GetTraceRecordMode() const { return record_mode_; }
+  bool IsSamplingEnabled() const { return enable_sampling_; }
   bool IsSystraceEnabled() const { return enable_systrace_; }
   bool IsArgumentFilterEnabled() const { return enable_argument_filter_; }
 
   void SetTraceRecordMode(TraceRecordMode mode) { record_mode_ = mode; }
+  void EnableSampling() { enable_sampling_ = true; }
   void EnableSystrace() { enable_systrace_ = true; }
   void EnableArgumentFilter() { enable_argument_filter_ = true; }
 
@@ -231,7 +196,7 @@
   // Returns true if at least one category in the list is enabled by this
   // trace config. This is used to determine if the category filters are
   // enabled in the TRACE_* macros.
-  bool IsCategoryGroupEnabled(const char* category_group_name) const;
+  bool IsCategoryGroupEnabled(const char* category_group) const;
 
   // Merges config with the current TraceConfig
   void Merge(const TraceConfig& config);
@@ -245,11 +210,6 @@
     return memory_dump_config_;
   }
 
-  const EventFilters& event_filters() const { return event_filters_; }
-  void SetEventFilters(const EventFilters& filter_configs) {
-    event_filters_ = filter_configs;
-  }
-
  private:
   FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromValidLegacyFormat);
   FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
@@ -290,7 +250,6 @@
       const DictionaryValue& memory_dump_config);
   void SetDefaultMemoryDumpConfig();
 
-  void SetEventFiltersFromConfigList(const base::ListValue& event_filters);
   std::unique_ptr<DictionaryValue> ToDict() const;
 
   std::string ToTraceOptionsString() const;
@@ -312,6 +271,7 @@
   bool HasIncludedPatterns() const;
 
   TraceRecordMode record_mode_;
+  bool enable_sampling_ : 1;
   bool enable_systrace_ : 1;
   bool enable_argument_filter_ : 1;
 
@@ -321,7 +281,6 @@
   StringList disabled_categories_;
   StringList excluded_categories_;
   StringList synthetic_delays_;
-  EventFilters event_filters_;
 };
 
 }  // namespace trace_event
diff --git a/base/trace_event/trace_config_memory_test_util.h b/base/trace_event/trace_config_memory_test_util.h
index 744e8a8..6b47f8d 100644
--- a/base/trace_event/trace_config_memory_test_util.h
+++ b/base/trace_event/trace_config_memory_test_util.h
@@ -13,144 +13,87 @@
 
 class TraceConfigMemoryTestUtil {
  public:
-  static std::string GetTraceConfig_LegacyPeriodicTriggers(int light_period,
-                                                           int heavy_period) {
-    return StringPrintf(
-        "{"
-        "\"enable_argument_filter\":false,"
-        "\"enable_systrace\":false,"
-        "\"included_categories\":["
-        "\"%s\""
-        "],"
-        "\"memory_dump_config\":{"
-        "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
-        "\"heap_profiler_options\":{"
-        "\"breakdown_threshold_bytes\":2048"
-        "},"
-        "\"triggers\":["
-        "{"
-        "\"mode\":\"light\","
-        "\"periodic_interval_ms\":%d"
-        "},"
-        "{"
-        "\"mode\":\"detailed\","
-        "\"periodic_interval_ms\":%d"
-        "}"
-        "]"
-        "},"
-        "\"record_mode\":\"record-until-full\""
-        "}",
-        MemoryDumpManager::kTraceCategory, light_period, heavy_period);
-    ;
-  }
-
   static std::string GetTraceConfig_PeriodicTriggers(int light_period,
                                                      int heavy_period) {
     return StringPrintf(
         "{"
-        "\"enable_argument_filter\":false,"
-        "\"enable_systrace\":false,"
-        "\"included_categories\":["
-        "\"%s\""
-        "],"
-        "\"memory_dump_config\":{"
-        "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
-        "\"heap_profiler_options\":{"
-        "\"breakdown_threshold_bytes\":2048"
-        "},"
-        "\"triggers\":["
-        "{"
-        "\"min_time_between_dumps_ms\":%d,"
-        "\"mode\":\"light\","
-        "\"type\":\"periodic_interval\""
-        "},"
-        "{"
-        "\"min_time_between_dumps_ms\":%d,"
-        "\"mode\":\"detailed\","
-        "\"type\":\"periodic_interval\""
-        "}"
-        "]"
-        "},"
-        "\"record_mode\":\"record-until-full\""
-        "}",
-        MemoryDumpManager::kTraceCategory, light_period, heavy_period);
+          "\"enable_argument_filter\":false,"
+          "\"enable_sampling\":false,"
+          "\"enable_systrace\":false,"
+          "\"included_categories\":["
+            "\"%s\""
+          "],"
+          "\"memory_dump_config\":{"
+             "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+             "\"heap_profiler_options\":{"
+               "\"breakdown_threshold_bytes\":2048"
+             "},"
+             "\"triggers\":["
+              "{"
+                "\"mode\":\"light\","
+                "\"periodic_interval_ms\":%d"
+              "},"
+              "{"
+                "\"mode\":\"detailed\","
+                "\"periodic_interval_ms\":%d"
+              "}"
+            "]"
+          "},"
+          "\"record_mode\":\"record-until-full\""
+        "}", MemoryDumpManager::kTraceCategory, light_period, heavy_period);
   }
 
   static std::string GetTraceConfig_EmptyTriggers() {
     return StringPrintf(
         "{"
-        "\"enable_argument_filter\":false,"
-        "\"enable_systrace\":false,"
-        "\"included_categories\":["
-        "\"%s\""
-        "],"
-        "\"memory_dump_config\":{"
-        "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
-        "\"triggers\":["
-        "]"
-        "},"
-        "\"record_mode\":\"record-until-full\""
-        "}",
-        MemoryDumpManager::kTraceCategory);
+          "\"enable_argument_filter\":false,"
+          "\"enable_sampling\":false,"
+          "\"enable_systrace\":false,"
+          "\"included_categories\":["
+            "\"%s\""
+          "],"
+          "\"memory_dump_config\":{"
+            "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+            "\"triggers\":["
+            "]"
+          "},"
+          "\"record_mode\":\"record-until-full\""
+        "}", MemoryDumpManager::kTraceCategory);
   }
 
   static std::string GetTraceConfig_NoTriggers() {
     return StringPrintf(
         "{"
-        "\"enable_argument_filter\":false,"
-        "\"enable_systrace\":false,"
-        "\"included_categories\":["
-        "\"%s\""
-        "],"
-        "\"record_mode\":\"record-until-full\""
-        "}",
-        MemoryDumpManager::kTraceCategory);
+          "\"enable_argument_filter\":false,"
+          "\"enable_sampling\":false,"
+          "\"enable_systrace\":false,"
+          "\"included_categories\":["
+            "\"%s\""
+          "],"
+          "\"record_mode\":\"record-until-full\""
+        "}", MemoryDumpManager::kTraceCategory);
   }
 
   static std::string GetTraceConfig_BackgroundTrigger(int period_ms) {
     return StringPrintf(
         "{"
-        "\"enable_argument_filter\":false,"
-        "\"enable_systrace\":false,"
-        "\"included_categories\":["
-        "\"%s\""
-        "],"
-        "\"memory_dump_config\":{"
-        "\"allowed_dump_modes\":[\"background\"],"
-        "\"triggers\":["
-        "{"
-        "\"min_time_between_dumps_ms\":%d,"
-        "\"mode\":\"background\","
-        "\"type\":\"periodic_interval\""
-        "}"
-        "]"
-        "},"
-        "\"record_mode\":\"record-until-full\""
-        "}",
-        MemoryDumpManager::kTraceCategory, period_ms);
-  }
-
-  static std::string GetTraceConfig_PeakDetectionTrigger(int heavy_period) {
-    return StringPrintf(
-        "{"
-        "\"enable_argument_filter\":false,"
-        "\"enable_systrace\":false,"
-        "\"included_categories\":["
-        "\"%s\""
-        "],"
-        "\"memory_dump_config\":{"
-        "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
-        "\"triggers\":["
-        "{"
-        "\"min_time_between_dumps_ms\":%d,"
-        "\"mode\":\"detailed\","
-        "\"type\":\"peak_memory_usage\""
-        "}"
-        "]"
-        "},"
-        "\"record_mode\":\"record-until-full\""
-        "}",
-        MemoryDumpManager::kTraceCategory, heavy_period);
+          "\"enable_argument_filter\":false,"
+          "\"enable_sampling\":false,"
+          "\"enable_systrace\":false,"
+          "\"included_categories\":["
+            "\"%s\""
+          "],"
+          "\"memory_dump_config\":{"
+             "\"allowed_dump_modes\":[\"background\"],"
+             "\"triggers\":["
+              "{"
+                "\"mode\":\"background\","
+                "\"periodic_interval_ms\":%d"
+              "}"
+            "]"
+          "},"
+          "\"record_mode\":\"record-until-full\""
+        "}", MemoryDumpManager::kTraceCategory, period_ms);
   }
 };
 
diff --git a/base/trace_event/trace_config_unittest.cc b/base/trace_event/trace_config_unittest.cc
index 74aa7bd..4b46b2f 100644
--- a/base/trace_event/trace_config_unittest.cc
+++ b/base/trace_event/trace_config_unittest.cc
@@ -5,7 +5,6 @@
 #include <stddef.h>
 
 #include "base/json/json_reader.h"
-#include "base/json/json_writer.h"
 #include "base/macros.h"
 #include "base/trace_event/memory_dump_manager.h"
 #include "base/trace_event/trace_config.h"
@@ -20,52 +19,38 @@
 const char kDefaultTraceConfigString[] =
   "{"
     "\"enable_argument_filter\":false,"
+    "\"enable_sampling\":false,"
     "\"enable_systrace\":false,"
     "\"record_mode\":\"record-until-full\""
   "}";
 
 const char kCustomTraceConfigString[] =
-    "{"
+  "{"
     "\"enable_argument_filter\":true,"
+    "\"enable_sampling\":true,"
     "\"enable_systrace\":true,"
-    "\"event_filters\":["
-    "{"
-    "\"excluded_categories\":[\"unfiltered_cat\"],"
-    "\"filter_args\":{\"event_name_whitelist\":[\"a snake\",\"a dog\"]},"
-    "\"filter_predicate\":\"event_whitelist_predicate\","
-    "\"included_categories\":[\"*\"]"
-    "}"
-    "],"
     "\"excluded_categories\":[\"excluded\",\"exc_pattern*\"],"
-    "\"included_categories\":["
-    "\"included\","
-    "\"inc_pattern*\","
-    "\"disabled-by-default-cc\","
-    "\"disabled-by-default-memory-infra\"],"
+    "\"included_categories\":[\"included\","
+                            "\"inc_pattern*\","
+                            "\"disabled-by-default-cc\","
+                            "\"disabled-by-default-memory-infra\"],"
     "\"memory_dump_config\":{"
-    "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
-    "\"heap_profiler_options\":{"
-    "\"breakdown_threshold_bytes\":10240"
-    "},"
-    "\"triggers\":["
-    "{"
-    "\"min_time_between_dumps_ms\":50,"
-    "\"mode\":\"light\","
-    "\"type\":\"periodic_interval\""
-    "},"
-    "{"
-    "\"min_time_between_dumps_ms\":1000,"
-    "\"mode\":\"detailed\","
-    "\"type\":\"peak_memory_usage\""
-    "}"
-    "]"
+      "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+      "\"heap_profiler_options\":{"
+        "\"breakdown_threshold_bytes\":10240"
+      "},"
+      "\"triggers\":["
+        "{\"mode\":\"light\",\"periodic_interval_ms\":50},"
+        "{\"mode\":\"detailed\",\"periodic_interval_ms\":1000}"
+      "]"
     "},"
     "\"record_mode\":\"record-continuously\","
     "\"synthetic_delays\":[\"test.Delay1;16\",\"test.Delay2;32\"]"
-    "}";
+  "}";
 
 void CheckDefaultTraceConfigBehavior(const TraceConfig& tc) {
   EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+  EXPECT_FALSE(tc.IsSamplingEnabled());
   EXPECT_FALSE(tc.IsSystraceEnabled());
   EXPECT_FALSE(tc.IsArgumentFilterEnabled());
 
@@ -87,31 +72,44 @@
   // From trace options strings
   TraceConfig config("", "record-until-full");
   EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSamplingEnabled());
   EXPECT_FALSE(config.IsSystraceEnabled());
   EXPECT_FALSE(config.IsArgumentFilterEnabled());
   EXPECT_STREQ("record-until-full", config.ToTraceOptionsString().c_str());
 
   config = TraceConfig("", "record-continuously");
   EXPECT_EQ(RECORD_CONTINUOUSLY, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSamplingEnabled());
   EXPECT_FALSE(config.IsSystraceEnabled());
   EXPECT_FALSE(config.IsArgumentFilterEnabled());
   EXPECT_STREQ("record-continuously", config.ToTraceOptionsString().c_str());
 
   config = TraceConfig("", "trace-to-console");
   EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSamplingEnabled());
   EXPECT_FALSE(config.IsSystraceEnabled());
   EXPECT_FALSE(config.IsArgumentFilterEnabled());
   EXPECT_STREQ("trace-to-console", config.ToTraceOptionsString().c_str());
 
   config = TraceConfig("", "record-as-much-as-possible");
   EXPECT_EQ(RECORD_AS_MUCH_AS_POSSIBLE, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSamplingEnabled());
   EXPECT_FALSE(config.IsSystraceEnabled());
   EXPECT_FALSE(config.IsArgumentFilterEnabled());
   EXPECT_STREQ("record-as-much-as-possible",
                config.ToTraceOptionsString().c_str());
 
+  config = TraceConfig("", "record-until-full, enable-sampling");
+  EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
+  EXPECT_TRUE(config.IsSamplingEnabled());
+  EXPECT_FALSE(config.IsSystraceEnabled());
+  EXPECT_FALSE(config.IsArgumentFilterEnabled());
+  EXPECT_STREQ("record-until-full,enable-sampling",
+               config.ToTraceOptionsString().c_str());
+
   config = TraceConfig("", "enable-systrace, record-continuously");
   EXPECT_EQ(RECORD_CONTINUOUSLY, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSamplingEnabled());
   EXPECT_TRUE(config.IsSystraceEnabled());
   EXPECT_FALSE(config.IsArgumentFilterEnabled());
   EXPECT_STREQ("record-continuously,enable-systrace",
@@ -119,6 +117,7 @@
 
   config = TraceConfig("", "enable-argument-filter,record-as-much-as-possible");
   EXPECT_EQ(RECORD_AS_MUCH_AS_POSSIBLE, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSamplingEnabled());
   EXPECT_FALSE(config.IsSystraceEnabled());
   EXPECT_TRUE(config.IsArgumentFilterEnabled());
   EXPECT_STREQ("record-as-much-as-possible,enable-argument-filter",
@@ -126,17 +125,19 @@
 
   config = TraceConfig(
     "",
-    "enable-systrace,trace-to-console,enable-argument-filter");
+    "enable-systrace,trace-to-console,enable-sampling,enable-argument-filter");
   EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
+  EXPECT_TRUE(config.IsSamplingEnabled());
   EXPECT_TRUE(config.IsSystraceEnabled());
   EXPECT_TRUE(config.IsArgumentFilterEnabled());
   EXPECT_STREQ(
-    "trace-to-console,enable-systrace,enable-argument-filter",
+    "trace-to-console,enable-sampling,enable-systrace,enable-argument-filter",
     config.ToTraceOptionsString().c_str());
 
   config = TraceConfig(
     "", "record-continuously, record-until-full, trace-to-console");
   EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSamplingEnabled());
   EXPECT_FALSE(config.IsSystraceEnabled());
   EXPECT_FALSE(config.IsArgumentFilterEnabled());
   EXPECT_STREQ("trace-to-console", config.ToTraceOptionsString().c_str());
@@ -144,24 +145,28 @@
   // From TraceRecordMode
   config = TraceConfig("", RECORD_UNTIL_FULL);
   EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSamplingEnabled());
   EXPECT_FALSE(config.IsSystraceEnabled());
   EXPECT_FALSE(config.IsArgumentFilterEnabled());
   EXPECT_STREQ("record-until-full", config.ToTraceOptionsString().c_str());
 
   config = TraceConfig("", RECORD_CONTINUOUSLY);
   EXPECT_EQ(RECORD_CONTINUOUSLY, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSamplingEnabled());
   EXPECT_FALSE(config.IsSystraceEnabled());
   EXPECT_FALSE(config.IsArgumentFilterEnabled());
   EXPECT_STREQ("record-continuously", config.ToTraceOptionsString().c_str());
 
   config = TraceConfig("", ECHO_TO_CONSOLE);
   EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSamplingEnabled());
   EXPECT_FALSE(config.IsSystraceEnabled());
   EXPECT_FALSE(config.IsArgumentFilterEnabled());
   EXPECT_STREQ("trace-to-console", config.ToTraceOptionsString().c_str());
 
   config = TraceConfig("", RECORD_AS_MUCH_AS_POSSIBLE);
   EXPECT_EQ(RECORD_AS_MUCH_AS_POSSIBLE, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSamplingEnabled());
   EXPECT_FALSE(config.IsSystraceEnabled());
   EXPECT_FALSE(config.IsArgumentFilterEnabled());
   EXPECT_STREQ("record-as-much-as-possible",
@@ -193,30 +198,33 @@
   // From both trace options and category filter strings
   config = TraceConfig("", "");
   EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSamplingEnabled());
   EXPECT_FALSE(config.IsSystraceEnabled());
   EXPECT_FALSE(config.IsArgumentFilterEnabled());
   EXPECT_STREQ("", config.ToCategoryFilterString().c_str());
   EXPECT_STREQ("record-until-full", config.ToTraceOptionsString().c_str());
 
   config = TraceConfig("included,-excluded,inc_pattern*,-exc_pattern*",
-                       "enable-systrace, trace-to-console");
+                       "enable-systrace, trace-to-console, enable-sampling");
   EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
+  EXPECT_TRUE(config.IsSamplingEnabled());
   EXPECT_TRUE(config.IsSystraceEnabled());
   EXPECT_FALSE(config.IsArgumentFilterEnabled());
   EXPECT_STREQ("included,inc_pattern*,-excluded,-exc_pattern*",
                config.ToCategoryFilterString().c_str());
-  EXPECT_STREQ("trace-to-console,enable-systrace",
+  EXPECT_STREQ("trace-to-console,enable-sampling,enable-systrace",
                config.ToTraceOptionsString().c_str());
 
   // From both trace options and category filter strings with spaces.
   config = TraceConfig(" included , -excluded, inc_pattern*, ,-exc_pattern*   ",
-                       "enable-systrace, ,trace-to-console  ");
+                       "enable-systrace, ,trace-to-console, enable-sampling  ");
   EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
+  EXPECT_TRUE(config.IsSamplingEnabled());
   EXPECT_TRUE(config.IsSystraceEnabled());
   EXPECT_FALSE(config.IsArgumentFilterEnabled());
   EXPECT_STREQ("included,inc_pattern*,-excluded,-exc_pattern*",
                config.ToCategoryFilterString().c_str());
-  EXPECT_STREQ("trace-to-console,enable-systrace",
+  EXPECT_STREQ("trace-to-console,enable-sampling,enable-systrace",
                config.ToTraceOptionsString().c_str());
 
   // From category filter string and TraceRecordMode
@@ -224,6 +232,7 @@
                        RECORD_CONTINUOUSLY);
   EXPECT_EQ(RECORD_CONTINUOUSLY, config.GetTraceRecordMode());
   EXPECT_FALSE(config.IsSystraceEnabled());
+  EXPECT_FALSE(config.IsSamplingEnabled());
   EXPECT_FALSE(config.IsArgumentFilterEnabled());
   EXPECT_STREQ("included,inc_pattern*,-excluded,-exc_pattern*",
                config.ToCategoryFilterString().c_str());
@@ -233,6 +242,7 @@
 TEST(TraceConfigTest, TraceConfigFromInvalidLegacyStrings) {
   TraceConfig config("", "foo-bar-baz");
   EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSamplingEnabled());
   EXPECT_FALSE(config.IsSystraceEnabled());
   EXPECT_FALSE(config.IsArgumentFilterEnabled());
   EXPECT_STREQ("", config.ToCategoryFilterString().c_str());
@@ -240,6 +250,7 @@
 
   config = TraceConfig("arbitrary-category", "foo-bar-baz, enable-systrace");
   EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
+  EXPECT_FALSE(config.IsSamplingEnabled());
   EXPECT_TRUE(config.IsSystraceEnabled());
   EXPECT_FALSE(config.IsArgumentFilterEnabled());
   EXPECT_STREQ("arbitrary-category", config.ToCategoryFilterString().c_str());
@@ -319,7 +330,6 @@
   EXPECT_FALSE(tc.IsCategoryGroupEnabled("bar"));
   EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-bar"));
 
-  EXPECT_TRUE(tc.event_filters().empty());
   // Enabling only the disabled-by-default-* category means the default ones
   // are also enabled.
   tc = TraceConfig("disabled-by-default-foo", "");
@@ -336,6 +346,7 @@
   TraceConfig tc(dict);
   EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
   EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+  EXPECT_FALSE(tc.IsSamplingEnabled());
   EXPECT_FALSE(tc.IsSystraceEnabled());
   EXPECT_FALSE(tc.IsArgumentFilterEnabled());
   EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -349,6 +360,7 @@
   TraceConfig default_tc(*default_dict);
   EXPECT_STREQ(kDefaultTraceConfigString, default_tc.ToString().c_str());
   EXPECT_EQ(RECORD_UNTIL_FULL, default_tc.GetTraceRecordMode());
+  EXPECT_FALSE(default_tc.IsSamplingEnabled());
   EXPECT_FALSE(default_tc.IsSystraceEnabled());
   EXPECT_FALSE(default_tc.IsArgumentFilterEnabled());
   EXPECT_STREQ("", default_tc.ToCategoryFilterString().c_str());
@@ -362,6 +374,7 @@
   TraceConfig custom_tc(*custom_dict);
   EXPECT_STREQ(kCustomTraceConfigString, custom_tc.ToString().c_str());
   EXPECT_EQ(RECORD_CONTINUOUSLY, custom_tc.GetTraceRecordMode());
+  EXPECT_TRUE(custom_tc.IsSamplingEnabled());
   EXPECT_TRUE(custom_tc.IsSystraceEnabled());
   EXPECT_TRUE(custom_tc.IsArgumentFilterEnabled());
   EXPECT_STREQ("included,inc_pattern*,"
@@ -374,28 +387,22 @@
 TEST(TraceConfigTest, TraceConfigFromValidString) {
   // Using some non-empty config string.
   const char config_string[] =
-      "{"
+    "{"
       "\"enable_argument_filter\":true,"
+      "\"enable_sampling\":true,"
       "\"enable_systrace\":true,"
-      "\"event_filters\":["
-      "{"
-      "\"excluded_categories\":[\"unfiltered_cat\"],"
-      "\"filter_args\":{\"event_name_whitelist\":[\"a snake\",\"a dog\"]},"
-      "\"filter_predicate\":\"event_whitelist_predicate\","
-      "\"included_categories\":[\"*\"]"
-      "}"
-      "],"
       "\"excluded_categories\":[\"excluded\",\"exc_pattern*\"],"
       "\"included_categories\":[\"included\","
-      "\"inc_pattern*\","
-      "\"disabled-by-default-cc\"],"
+                               "\"inc_pattern*\","
+                               "\"disabled-by-default-cc\"],"
       "\"record_mode\":\"record-continuously\","
       "\"synthetic_delays\":[\"test.Delay1;16\",\"test.Delay2;32\"]"
-      "}";
+    "}";
   TraceConfig tc(config_string);
 
   EXPECT_STREQ(config_string, tc.ToString().c_str());
   EXPECT_EQ(RECORD_CONTINUOUSLY, tc.GetTraceRecordMode());
+  EXPECT_TRUE(tc.IsSamplingEnabled());
   EXPECT_TRUE(tc.IsSystraceEnabled());
   EXPECT_TRUE(tc.IsArgumentFilterEnabled());
   EXPECT_STREQ("included,inc_pattern*,disabled-by-default-cc,-excluded,"
@@ -427,26 +434,6 @@
   EXPECT_STREQ("test.Delay1;16", tc.GetSyntheticDelayValues()[0].c_str());
   EXPECT_STREQ("test.Delay2;32", tc.GetSyntheticDelayValues()[1].c_str());
 
-  EXPECT_EQ(tc.event_filters().size(), 1u);
-  const TraceConfig::EventFilterConfig& event_filter = tc.event_filters()[0];
-  EXPECT_STREQ("event_whitelist_predicate",
-               event_filter.predicate_name().c_str());
-  EXPECT_EQ(1u, event_filter.included_categories().size());
-  EXPECT_STREQ("*", event_filter.included_categories()[0].c_str());
-  EXPECT_EQ(1u, event_filter.excluded_categories().size());
-  EXPECT_STREQ("unfiltered_cat", event_filter.excluded_categories()[0].c_str());
-  EXPECT_TRUE(event_filter.filter_args());
-
-  std::string json_out;
-  base::JSONWriter::Write(*event_filter.filter_args(), &json_out);
-  EXPECT_STREQ(json_out.c_str(),
-               "{\"event_name_whitelist\":[\"a snake\",\"a dog\"]}");
-  std::unordered_set<std::string> filter_values;
-  EXPECT_TRUE(event_filter.GetArgAsSet("event_name_whitelist", &filter_values));
-  EXPECT_EQ(2u, filter_values.size());
-  EXPECT_EQ(1u, filter_values.count("a snake"));
-  EXPECT_EQ(1u, filter_values.count("a dog"));
-
   const char config_string_2[] = "{\"included_categories\":[\"*\"]}";
   TraceConfig tc2(config_string_2);
   EXPECT_TRUE(tc2.IsCategoryEnabled("non-disabled-by-default-pattern"));
@@ -459,6 +446,7 @@
   EXPECT_STREQ(tc.ToString().c_str(),
                "{"
                  "\"enable_argument_filter\":false,"
+                 "\"enable_sampling\":false,"
                  "\"enable_systrace\":false,"
                  "\"record_mode\":\"record-until-full\""
                "}");
@@ -470,6 +458,7 @@
   TraceConfig tc("");
   EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
   EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+  EXPECT_FALSE(tc.IsSamplingEnabled());
   EXPECT_FALSE(tc.IsSystraceEnabled());
   EXPECT_FALSE(tc.IsArgumentFilterEnabled());
   EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -478,6 +467,7 @@
   tc = TraceConfig("This is an invalid config string.");
   EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
   EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+  EXPECT_FALSE(tc.IsSamplingEnabled());
   EXPECT_FALSE(tc.IsSystraceEnabled());
   EXPECT_FALSE(tc.IsArgumentFilterEnabled());
   EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -486,6 +476,7 @@
   tc = TraceConfig("[\"This\", \"is\", \"not\", \"a\", \"dictionary\"]");
   EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
   EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+  EXPECT_FALSE(tc.IsSamplingEnabled());
   EXPECT_FALSE(tc.IsSystraceEnabled());
   EXPECT_FALSE(tc.IsArgumentFilterEnabled());
   EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -494,6 +485,7 @@
   tc = TraceConfig("{\"record_mode\": invalid-value-needs-double-quote}");
   EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
   EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+  EXPECT_FALSE(tc.IsSamplingEnabled());
   EXPECT_FALSE(tc.IsSystraceEnabled());
   EXPECT_FALSE(tc.IsArgumentFilterEnabled());
   EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -503,6 +495,7 @@
   // initialize TraceConfig with best effort.
   tc = TraceConfig("{}");
   EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+  EXPECT_FALSE(tc.IsSamplingEnabled());
   EXPECT_FALSE(tc.IsSystraceEnabled());
   EXPECT_FALSE(tc.IsArgumentFilterEnabled());
   EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -510,6 +503,7 @@
 
   tc = TraceConfig("{\"arbitrary-key\":\"arbitrary-value\"}");
   EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+  EXPECT_FALSE(tc.IsSamplingEnabled());
   EXPECT_FALSE(tc.IsSystraceEnabled());
   EXPECT_FALSE(tc.IsArgumentFilterEnabled());
   EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -517,6 +511,7 @@
 
   const char invalid_config_string[] =
     "{"
+      "\"enable_sampling\":\"true\","
       "\"enable_systrace\":1,"
       "\"excluded_categories\":[\"excluded\"],"
       "\"included_categories\":\"not a list\","
@@ -527,6 +522,7 @@
     "}";
   tc = TraceConfig(invalid_config_string);
   EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+  EXPECT_FALSE(tc.IsSamplingEnabled());
   EXPECT_FALSE(tc.IsSystraceEnabled());
   EXPECT_FALSE(tc.IsArgumentFilterEnabled());
   EXPECT_STREQ("-excluded,DELAY(test.Delay1;16),DELAY(test.Delay2;32)",
@@ -551,6 +547,7 @@
   tc.Merge(tc2);
   EXPECT_STREQ("{"
                  "\"enable_argument_filter\":false,"
+                 "\"enable_sampling\":false,"
                  "\"enable_systrace\":false,"
                  "\"excluded_categories\":[\"excluded\",\"exc_pattern*\"],"
                  "\"record_mode\":\"record-until-full\""
@@ -617,11 +614,15 @@
 TEST(TraceConfigTest, SetTraceOptionValues) {
   TraceConfig tc;
   EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+  EXPECT_FALSE(tc.IsSamplingEnabled());
   EXPECT_FALSE(tc.IsSystraceEnabled());
 
   tc.SetTraceRecordMode(RECORD_AS_MUCH_AS_POSSIBLE);
   EXPECT_EQ(RECORD_AS_MUCH_AS_POSSIBLE, tc.GetTraceRecordMode());
 
+  tc.EnableSampling();
+  EXPECT_TRUE(tc.IsSamplingEnabled());
+
   tc.EnableSystrace();
   EXPECT_TRUE(tc.IsSystraceEnabled());
 }
@@ -631,47 +632,30 @@
       TraceConfigMemoryTestUtil::GetTraceConfig_PeriodicTriggers(200, 2000);
   TraceConfig tc1(tc_str1);
   EXPECT_EQ(tc_str1, tc1.ToString());
-  TraceConfig tc2(
-      TraceConfigMemoryTestUtil::GetTraceConfig_LegacyPeriodicTriggers(200,
-                                                                       2000));
-  EXPECT_EQ(tc_str1, tc2.ToString());
-
   EXPECT_TRUE(tc1.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
   ASSERT_EQ(2u, tc1.memory_dump_config_.triggers.size());
 
-  EXPECT_EQ(200u,
-            tc1.memory_dump_config_.triggers[0].min_time_between_dumps_ms);
+  EXPECT_EQ(200u, tc1.memory_dump_config_.triggers[0].periodic_interval_ms);
   EXPECT_EQ(MemoryDumpLevelOfDetail::LIGHT,
             tc1.memory_dump_config_.triggers[0].level_of_detail);
 
-  EXPECT_EQ(2000u,
-            tc1.memory_dump_config_.triggers[1].min_time_between_dumps_ms);
+  EXPECT_EQ(2000u, tc1.memory_dump_config_.triggers[1].periodic_interval_ms);
   EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
             tc1.memory_dump_config_.triggers[1].level_of_detail);
   EXPECT_EQ(
       2048u,
       tc1.memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes);
 
-  std::string tc_str3 =
+  std::string tc_str2 =
       TraceConfigMemoryTestUtil::GetTraceConfig_BackgroundTrigger(
           1 /* period_ms */);
-  TraceConfig tc3(tc_str3);
-  EXPECT_EQ(tc_str3, tc3.ToString());
-  EXPECT_TRUE(tc3.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
-  ASSERT_EQ(1u, tc3.memory_dump_config_.triggers.size());
-  EXPECT_EQ(1u, tc3.memory_dump_config_.triggers[0].min_time_between_dumps_ms);
+  TraceConfig tc2(tc_str2);
+  EXPECT_EQ(tc_str2, tc2.ToString());
+  EXPECT_TRUE(tc2.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
+  ASSERT_EQ(1u, tc2.memory_dump_config_.triggers.size());
+  EXPECT_EQ(1u, tc2.memory_dump_config_.triggers[0].periodic_interval_ms);
   EXPECT_EQ(MemoryDumpLevelOfDetail::BACKGROUND,
-            tc3.memory_dump_config_.triggers[0].level_of_detail);
-
-  std::string tc_str4 =
-      TraceConfigMemoryTestUtil::GetTraceConfig_PeakDetectionTrigger(
-          1 /*heavy_period */);
-  TraceConfig tc4(tc_str4);
-  EXPECT_EQ(tc_str4, tc4.ToString());
-  ASSERT_EQ(1u, tc4.memory_dump_config_.triggers.size());
-  EXPECT_EQ(1u, tc4.memory_dump_config_.triggers[0].min_time_between_dumps_ms);
-  EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
-            tc4.memory_dump_config_.triggers[0].level_of_detail);
+            tc2.memory_dump_config_.triggers[0].level_of_detail);
 }
 
 TEST(TraceConfigTest, EmptyMemoryDumpConfigTest) {
diff --git a/base/trace_event/trace_event.gypi b/base/trace_event/trace_event.gypi
new file mode 100644
index 0000000..f915780
--- /dev/null
+++ b/base/trace_event/trace_event.gypi
@@ -0,0 +1,107 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+  'variables': {
+    'trace_event_sources' : [
+      'trace_event/blame_context.cc',
+      'trace_event/blame_context.h',
+      'trace_event/common/trace_event_common.h',
+      'trace_event/heap_profiler.h',
+      'trace_event/heap_profiler_allocation_context.cc',
+      'trace_event/heap_profiler_allocation_context.h',
+      'trace_event/heap_profiler_allocation_context_tracker.cc',
+      'trace_event/heap_profiler_allocation_context_tracker.h',
+      'trace_event/heap_profiler_allocation_register.cc',
+      'trace_event/heap_profiler_allocation_register_posix.cc',
+      'trace_event/heap_profiler_allocation_register_win.cc',
+      'trace_event/heap_profiler_allocation_register.h',
+      'trace_event/heap_profiler_heap_dump_writer.cc',
+      'trace_event/heap_profiler_heap_dump_writer.h',
+      'trace_event/heap_profiler_stack_frame_deduplicator.cc',
+      'trace_event/heap_profiler_stack_frame_deduplicator.h',
+      'trace_event/heap_profiler_type_name_deduplicator.cc',
+      'trace_event/heap_profiler_type_name_deduplicator.h',
+      'trace_event/java_heap_dump_provider_android.cc',
+      'trace_event/java_heap_dump_provider_android.h',
+      'trace_event/memory_allocator_dump.cc',
+      'trace_event/memory_allocator_dump.h',
+      'trace_event/memory_allocator_dump_guid.cc',
+      'trace_event/memory_allocator_dump_guid.h',
+      'trace_event/memory_dump_manager.cc',
+      'trace_event/memory_dump_manager.h',
+      'trace_event/memory_dump_provider.h',
+      'trace_event/memory_dump_request_args.cc',
+      'trace_event/memory_dump_request_args.h',
+      'trace_event/memory_dump_session_state.cc',
+      'trace_event/memory_dump_session_state.h',
+      'trace_event/memory_infra_background_whitelist.cc',
+      'trace_event/memory_infra_background_whitelist.h',
+      'trace_event/process_memory_dump.cc',
+      'trace_event/process_memory_dump.h',
+      'trace_event/process_memory_maps.cc',
+      'trace_event/process_memory_maps.h',
+      'trace_event/process_memory_totals.cc',
+      'trace_event/process_memory_totals.h',
+      'trace_event/trace_buffer.cc',
+      'trace_event/trace_buffer.h',
+      'trace_event/trace_config.cc',
+      'trace_event/trace_config.h',
+      'trace_event/trace_event.h',
+      'trace_event/trace_event_android.cc',
+      'trace_event/trace_event_argument.cc',
+      'trace_event/trace_event_argument.h',
+      'trace_event/trace_event_etw_export_win.cc',
+      'trace_event/trace_event_etw_export_win.h',
+      'trace_event/trace_event_impl.cc',
+      'trace_event/trace_event_impl.h',
+      'trace_event/trace_event_memory_overhead.cc',
+      'trace_event/trace_event_memory_overhead.h',
+      'trace_event/trace_event_synthetic_delay.cc',
+      'trace_event/trace_event_synthetic_delay.h',
+      'trace_event/trace_event_system_stats_monitor.cc',
+      'trace_event/trace_event_system_stats_monitor.h',
+      'trace_event/trace_log.cc',
+      'trace_event/trace_log.h',
+      'trace_event/trace_log_constants.cc',
+      'trace_event/trace_sampling_thread.cc',
+      'trace_event/trace_sampling_thread.h',
+      'trace_event/tracing_agent.cc',
+      'trace_event/tracing_agent.h',
+      'trace_event/winheap_dump_provider_win.cc',
+      'trace_event/winheap_dump_provider_win.h',
+    ],
+    'trace_event_test_sources' : [
+      'trace_event/blame_context_unittest.cc',
+      'trace_event/heap_profiler_allocation_context_tracker_unittest.cc',
+      'trace_event/heap_profiler_allocation_register_unittest.cc',
+      'trace_event/heap_profiler_heap_dump_writer_unittest.cc',
+      'trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc',
+      'trace_event/heap_profiler_type_name_deduplicator_unittest.cc',
+      'trace_event/java_heap_dump_provider_android_unittest.cc',
+      'trace_event/memory_allocator_dump_unittest.cc',
+      'trace_event/memory_dump_manager_unittest.cc',
+      'trace_event/process_memory_dump_unittest.cc',
+      'trace_event/trace_config_memory_test_util.h',
+      'trace_event/trace_config_unittest.cc',
+      'trace_event/trace_event_argument_unittest.cc',
+      'trace_event/trace_event_synthetic_delay_unittest.cc',
+      'trace_event/trace_event_system_stats_monitor_unittest.cc',
+      'trace_event/trace_event_unittest.cc',
+      'trace_event/winheap_dump_provider_win_unittest.cc',
+    ],
+    'conditions': [
+      ['OS == "linux" or OS=="android" or OS=="mac" or OS=="ios"', {
+        'trace_event_sources': [
+          'trace_event/malloc_dump_provider.cc',
+          'trace_event/malloc_dump_provider.h',
+        ],
+      }],
+      ['OS == "android"', {
+        'trace_event_test_sources' : [
+          'trace_event/trace_event_android_unittest.cc',
+        ],
+      }],
+    ],
+  },
+}
diff --git a/base/trace_event/trace_event.h b/base/trace_event/trace_event.h
index 51e6927..a075898 100644
--- a/base/trace_event/trace_event.h
+++ b/base/trace_event/trace_event.h
@@ -19,7 +19,6 @@
 #include "base/time/time.h"
 #include "base/trace_event/common/trace_event_common.h"
 #include "base/trace_event/heap_profiler.h"
-#include "base/trace_event/trace_category.h"
 #include "base/trace_event/trace_event_system_stats_monitor.h"
 #include "base/trace_event/trace_log.h"
 #include "build/build_config.h"
@@ -29,52 +28,55 @@
 #define TRACE_STR_COPY(str) \
     trace_event_internal::TraceStringWithCopy(str)
 
-// DEPRECATED: do not use: Consider using TRACE_ID_{GLOBAL, LOCAL} macros,
-// instead. By default, uint64_t ID argument values are not mangled with the
-// Process ID in TRACE_EVENT_ASYNC macros. Use this macro to force Process ID
-// mangling.
+// By default, uint64_t ID argument values are not mangled with the Process ID
+// in TRACE_EVENT_ASYNC macros. Use this macro to force Process ID mangling.
 #define TRACE_ID_MANGLE(id) \
     trace_event_internal::TraceID::ForceMangle(id)
 
-// DEPRECATED: do not use: Consider using TRACE_ID_{GLOBAL, LOCAL} macros,
-// instead. By default, pointers are mangled with the Process ID in
-// TRACE_EVENT_ASYNC macros. Use this macro to prevent Process ID mangling.
+// By default, pointers are mangled with the Process ID in TRACE_EVENT_ASYNC
+// macros. Use this macro to prevent Process ID mangling.
 #define TRACE_ID_DONT_MANGLE(id) \
     trace_event_internal::TraceID::DontMangle(id)
 
 // By default, trace IDs are eventually converted to a single 64-bit number. Use
-// this macro to add a scope string. For example,
-//
-// TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
-//     "network", "ResourceLoad",
-//     TRACE_ID_WITH_SCOPE("BlinkResourceID", resourceID));
-//
-// Also, it is possible to prepend the ID with another number, like the process
-// ID. This is useful in creatin IDs that are unique among all processes. To do
-// that, pass two numbers after the scope string instead of one. For example,
-//
-// TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
-//     "network", "ResourceLoad",
-//     TRACE_ID_WITH_SCOPE("BlinkResourceID", pid, resourceID));
-#define TRACE_ID_WITH_SCOPE(scope, ...) \
-  trace_event_internal::TraceID::WithScope(scope, ##__VA_ARGS__)
+// this macro to add a scope string.
+#define TRACE_ID_WITH_SCOPE(scope, id) \
+    trace_event_internal::TraceID::WithScope(scope, id)
 
-#define TRACE_ID_GLOBAL(id) trace_event_internal::TraceID::GlobalId(id)
-#define TRACE_ID_LOCAL(id) trace_event_internal::TraceID::LocalId(id)
+// Sets the current sample state to the given category and name (both must be
+// constant strings). These states are intended for a sampling profiler.
+// Implementation note: we store category and name together because we don't
+// want the inconsistency/expense of storing two pointers.
+// |thread_bucket| is [0..2] and is used to statically isolate samples in one
+// thread from others.
+#define TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET( \
+    bucket_number, category, name)                 \
+        trace_event_internal::                     \
+        TraceEventSamplingStateScope<bucket_number>::Set(category "\0" name)
+
+// Returns a current sampling state of the given bucket.
+#define TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(bucket_number) \
+    trace_event_internal::TraceEventSamplingStateScope<bucket_number>::Current()
+
+// Creates a scope of a sampling state of the given bucket.
+//
+// {  // The sampling state is set within this scope.
+//    TRACE_EVENT_SAMPLING_STATE_SCOPE_FOR_BUCKET(0, "category", "name");
+//    ...;
+// }
+#define TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(                   \
+    bucket_number, category, name)                                      \
+    trace_event_internal::TraceEventSamplingStateScope<bucket_number>   \
+        traceEventSamplingScope(category "\0" name);
 
 #define TRACE_EVENT_API_CURRENT_THREAD_ID \
   static_cast<int>(base::PlatformThread::CurrentId())
 
 #define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \
   UNLIKELY(*INTERNAL_TRACE_EVENT_UID(category_group_enabled) &           \
-           (base::trace_event::TraceCategory::ENABLED_FOR_RECORDING |    \
-            base::trace_event::TraceCategory::ENABLED_FOR_ETW_EXPORT))
-
-#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()                  \
-  UNLIKELY(*INTERNAL_TRACE_EVENT_UID(category_group_enabled) &         \
-           (base::trace_event::TraceCategory::ENABLED_FOR_RECORDING |  \
-            base::trace_event::TraceCategory::ENABLED_FOR_ETW_EXPORT | \
-            base::trace_event::TraceCategory::ENABLED_FOR_FILTERING))
+           (base::trace_event::TraceLog::ENABLED_FOR_RECORDING |         \
+            base::trace_event::TraceLog::ENABLED_FOR_EVENT_CALLBACK |    \
+            base::trace_event::TraceLog::ENABLED_FOR_ETW_EXPORT))
 
 ////////////////////////////////////////////////////////////////////////////////
 // Implementation specific tracing API definitions.
@@ -202,6 +204,13 @@
 // Defines visibility for classes in trace_event.h
 #define TRACE_EVENT_API_CLASS_EXPORT BASE_EXPORT
 
+// The thread buckets for the sampling profiler.
+TRACE_EVENT_API_CLASS_EXPORT extern \
+    TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
+
+#define TRACE_EVENT_API_THREAD_BUCKET(thread_bucket)                           \
+    g_trace_state[thread_bucket]
+
 ////////////////////////////////////////////////////////////////////////////////
 
 // Implementation detail: trace event macros create temporary variables
@@ -240,69 +249,69 @@
 
 // Implementation detail: internal macro to create static category and add
 // event if the category is enabled.
-#define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...)  \
-  do {                                                                     \
-    INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group);                \
-    if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) {                   \
-      trace_event_internal::AddTraceEvent(                                 \
-          phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,   \
-          trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
-          flags, trace_event_internal::kNoId, ##__VA_ARGS__);              \
-    }                                                                      \
-  } while (0)
+#define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \
+    do { \
+      INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+      if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+        trace_event_internal::AddTraceEvent( \
+            phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+            trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
+            flags, trace_event_internal::kNoId, ##__VA_ARGS__); \
+      } \
+    } while (0)
 
 // Implementation detail: internal macro to create static category and add begin
 // event if the category is enabled. Also adds the end event when the scope
 // ends.
-#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...)           \
-  INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group);                    \
-  trace_event_internal::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer);       \
-  if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) {                       \
-    base::trace_event::TraceEventHandle h =                                  \
-        trace_event_internal::AddTraceEvent(                                 \
-            TRACE_EVENT_PHASE_COMPLETE,                                      \
-            INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,          \
-            trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
-            TRACE_EVENT_FLAG_NONE, trace_event_internal::kNoId,              \
-            ##__VA_ARGS__);                                                  \
-    INTERNAL_TRACE_EVENT_UID(tracer).Initialize(                             \
-        INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h);          \
-  }
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \
+    INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+    trace_event_internal::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
+    if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+      base::trace_event::TraceEventHandle h = \
+          trace_event_internal::AddTraceEvent( \
+              TRACE_EVENT_PHASE_COMPLETE, \
+              INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+              trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
+              TRACE_EVENT_FLAG_NONE, trace_event_internal::kNoId, \
+              ##__VA_ARGS__); \
+      INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
+          INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
+    }
 
-#define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name,      \
-                                                  bind_id, flow_flags, ...)  \
-  INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group);                    \
-  trace_event_internal::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer);       \
-  if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) {                       \
-    trace_event_internal::TraceID trace_event_bind_id((bind_id));            \
-    unsigned int trace_event_flags =                                         \
-        flow_flags | trace_event_bind_id.id_flags();                         \
-    base::trace_event::TraceEventHandle h =                                  \
-        trace_event_internal::AddTraceEvent(                                 \
-            TRACE_EVENT_PHASE_COMPLETE,                                      \
-            INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,          \
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW( \
+    category_group, name, bind_id, flow_flags, ...) \
+  INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+  trace_event_internal::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
+  if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+    unsigned int trace_event_flags = flow_flags; \
+    trace_event_internal::TraceID trace_event_bind_id(bind_id, \
+                                                      &trace_event_flags); \
+    base::trace_event::TraceEventHandle h = \
+        trace_event_internal::AddTraceEvent( \
+            TRACE_EVENT_PHASE_COMPLETE, \
+            INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
             trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
             trace_event_flags, trace_event_bind_id.raw_id(), ##__VA_ARGS__); \
-    INTERNAL_TRACE_EVENT_UID(tracer).Initialize(                             \
-        INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h);          \
+    INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
+        INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
   }
 
 // Implementation detail: internal macro to create static category and add
 // event if the category is enabled.
 #define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category_group, name, id, \
-                                         flags, ...)                      \
-  do {                                                                    \
-    INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group);               \
-    if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) {                  \
-      trace_event_internal::TraceID trace_event_trace_id((id));           \
-      unsigned int trace_event_flags =                                    \
-          flags | trace_event_trace_id.id_flags();                        \
-      trace_event_internal::AddTraceEvent(                                \
-          phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,  \
-          trace_event_trace_id.scope(), trace_event_trace_id.raw_id(),    \
-          trace_event_flags, trace_event_internal::kNoId, ##__VA_ARGS__); \
-    }                                                                     \
-  } while (0)
+                                         flags, ...) \
+    do { \
+      INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+      if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+        unsigned int trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
+        trace_event_internal::TraceID trace_event_trace_id( \
+            id, &trace_event_flags); \
+        trace_event_internal::AddTraceEvent( \
+            phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
+            name, trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
+            trace_event_flags, trace_event_internal::kNoId, ##__VA_ARGS__); \
+      } \
+    } while (0)
 
 // Implementation detail: internal macro to create static category and add
 // event if the category is enabled.
@@ -310,11 +319,12 @@
                                                 timestamp, flags, ...)       \
   do {                                                                       \
     INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group);                  \
-    if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) {                     \
+    if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) {  \
       trace_event_internal::AddTraceEventWithThreadIdAndTimestamp(           \
           phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,     \
           trace_event_internal::kGlobalScope, trace_event_internal::kNoId,   \
-          TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp,                      \
+          TRACE_EVENT_API_CURRENT_THREAD_ID,                                 \
+          base::TimeTicks::FromInternalValue(timestamp),                     \
           flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP,                       \
           trace_event_internal::kNoId, ##__VA_ARGS__);                       \
     }                                                                        \
@@ -322,50 +332,33 @@
 
 // Implementation detail: internal macro to create static category and add
 // event if the category is enabled.
-#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(              \
-    phase, category_group, name, id, thread_id, timestamp, flags, ...)   \
-  do {                                                                   \
-    INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group);              \
-    if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) {                 \
-      trace_event_internal::TraceID trace_event_trace_id((id));          \
-      unsigned int trace_event_flags =                                   \
-          flags | trace_event_trace_id.id_flags();                       \
-      trace_event_internal::AddTraceEventWithThreadIdAndTimestamp(       \
-          phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
-          trace_event_trace_id.scope(), trace_event_trace_id.raw_id(),   \
-          thread_id, timestamp,                                          \
-          trace_event_flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP,       \
-          trace_event_internal::kNoId, ##__VA_ARGS__);                   \
-    }                                                                    \
-  } while (0)
-
-// The linked ID will not be mangled.
-#define INTERNAL_TRACE_EVENT_ADD_LINK_IDS(category_group, name, id1, id2) \
-  do {                                                                    \
-    INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group);               \
-    if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) {                  \
-      trace_event_internal::TraceID source_id((id1));                     \
-      unsigned int source_flags = source_id.id_flags();                   \
-      trace_event_internal::TraceID target_id((id2));                     \
-      trace_event_internal::AddTraceEvent(                                \
-          TRACE_EVENT_PHASE_LINK_IDS,                                     \
-          INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,         \
-          source_id.scope(), source_id.raw_id(), source_flags,            \
-          trace_event_internal::kNoId, "linked_id",                       \
-          target_id.AsConvertableToTraceFormat());                        \
-    }                                                                     \
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                   \
+    phase, category_group, name, id, thread_id, timestamp, flags, ...)        \
+  do {                                                                        \
+    INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group);                   \
+    if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) {   \
+      unsigned int trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID;       \
+      trace_event_internal::TraceID trace_event_trace_id(id,                  \
+                                                         &trace_event_flags); \
+      trace_event_internal::AddTraceEventWithThreadIdAndTimestamp(            \
+          phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,      \
+          trace_event_trace_id.scope(), trace_event_trace_id.raw_id(),        \
+          thread_id, base::TimeTicks::FromInternalValue(timestamp),           \
+          trace_event_flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP,            \
+          trace_event_internal::kNoId, ##__VA_ARGS__);                        \
+    }                                                                         \
   } while (0)
 
 // Implementation detail: internal macro to create static category and add
 // metadata event if the category is enabled.
-#define INTERNAL_TRACE_EVENT_METADATA_ADD(category_group, name, ...) \
-  do {                                                               \
-    INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group);          \
-    if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) {             \
-      TRACE_EVENT_API_ADD_METADATA_EVENT(                            \
-          INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,    \
-          ##__VA_ARGS__);                                            \
-    }                                                                \
+#define INTERNAL_TRACE_EVENT_METADATA_ADD(category_group, name, ...)        \
+  do {                                                                      \
+    INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group);                 \
+    if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+      TRACE_EVENT_API_ADD_METADATA_EVENT(                                   \
+          INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,           \
+          ##__VA_ARGS__);                                                   \
+    }                                                                       \
   } while (0)
 
 // Implementation detail: internal macro to enter and leave a
@@ -388,7 +381,7 @@
     void operator=(const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {};     \
   };                                                                       \
   INTERNAL_TRACE_EVENT_UID(ScopedContext)                                  \
-  INTERNAL_TRACE_EVENT_UID(scoped_context)(context);
+  INTERNAL_TRACE_EVENT_UID(scoped_context)(context.raw_id());
 
 // Implementation detail: internal macro to trace a task execution with the
 // location where it was posted from.
@@ -410,64 +403,19 @@
 // TraceID encapsulates an ID that can either be an integer or pointer. Pointers
 // are by default mangled with the Process ID so that they are unlikely to
 // collide when the same pointer is used on different processes.
-class BASE_EXPORT TraceID {
+class TraceID {
  public:
-  // Can be combined with WithScope.
-  class LocalId {
-   public:
-    explicit LocalId(unsigned long long raw_id) : raw_id_(raw_id) {}
-    unsigned long long raw_id() const { return raw_id_; }
-   private:
-    unsigned long long raw_id_;
-  };
-
-  // Can be combined with WithScope.
-  class GlobalId {
-   public:
-    explicit GlobalId(unsigned long long raw_id) : raw_id_(raw_id) {}
-    unsigned long long raw_id() const { return raw_id_; }
-   private:
-    unsigned long long raw_id_;
-  };
-
   class WithScope {
    public:
     WithScope(const char* scope, unsigned long long raw_id)
         : scope_(scope), raw_id_(raw_id) {}
-    WithScope(const char* scope, LocalId local_id)
-        : scope_(scope), raw_id_(local_id.raw_id()) {
-      id_flags_ = TRACE_EVENT_FLAG_HAS_LOCAL_ID;
-    }
-    WithScope(const char* scope, GlobalId global_id)
-        : scope_(scope), raw_id_(global_id.raw_id()) {
-      id_flags_ = TRACE_EVENT_FLAG_HAS_GLOBAL_ID;
-    }
-    WithScope(const char* scope,
-              unsigned long long prefix,
-              unsigned long long raw_id)
-        : scope_(scope), has_prefix_(true), prefix_(prefix), raw_id_(raw_id) {}
-    WithScope(const char* scope, unsigned long long prefix, GlobalId global_id)
-        : scope_(scope),
-          has_prefix_(true),
-          prefix_(prefix),
-          raw_id_(global_id.raw_id()) {
-      id_flags_ = TRACE_EVENT_FLAG_HAS_GLOBAL_ID;
-    }
     unsigned long long raw_id() const { return raw_id_; }
     const char* scope() const { return scope_; }
-    bool has_prefix() const { return has_prefix_; }
-    unsigned long long prefix() const { return prefix_; }
-    unsigned int id_flags() const { return id_flags_; }
-
    private:
     const char* scope_ = nullptr;
-    bool has_prefix_ = false;
-    unsigned long long prefix_;
     unsigned long long raw_id_;
-    unsigned int id_flags_ = TRACE_EVENT_FLAG_HAS_ID;
   };
 
-  // DEPRECATED: consider using LocalId or GlobalId, instead.
   class DontMangle {
    public:
     explicit DontMangle(const void* raw_id)
@@ -488,12 +436,15 @@
         : raw_id_(static_cast<unsigned long long>(raw_id)) {}
     explicit DontMangle(signed char raw_id)
         : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit DontMangle(WithScope scoped_id)
+        : scope_(scoped_id.scope()), raw_id_(scoped_id.raw_id()) {}
+    const char* scope() const { return scope_; }
     unsigned long long raw_id() const { return raw_id_; }
    private:
+    const char* scope_ = nullptr;
     unsigned long long raw_id_;
   };
 
-  // DEPRECATED: consider using LocalId or GlobalId, instead.
   class ForceMangle {
    public:
     explicit ForceMangle(unsigned long long raw_id) : raw_id_(raw_id) {}
@@ -515,58 +466,50 @@
    private:
     unsigned long long raw_id_;
   };
-
-  TraceID(const void* raw_id) : raw_id_(static_cast<unsigned long long>(
-                                        reinterpret_cast<uintptr_t>(raw_id))) {
-    id_flags_ = TRACE_EVENT_FLAG_HAS_ID | TRACE_EVENT_FLAG_MANGLE_ID;
+  TraceID(const void* raw_id, unsigned int* flags)
+      : raw_id_(static_cast<unsigned long long>(
+                reinterpret_cast<uintptr_t>(raw_id))) {
+    *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
   }
-  TraceID(ForceMangle raw_id) : raw_id_(raw_id.raw_id()) {
-    id_flags_ = TRACE_EVENT_FLAG_HAS_ID | TRACE_EVENT_FLAG_MANGLE_ID;
+  TraceID(ForceMangle raw_id, unsigned int* flags) : raw_id_(raw_id.raw_id()) {
+    *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
   }
-  TraceID(DontMangle raw_id) : raw_id_(raw_id.raw_id()) {}
-  TraceID(unsigned long long raw_id) : raw_id_(raw_id) {}
-  TraceID(unsigned long raw_id) : raw_id_(raw_id) {}
-  TraceID(unsigned int raw_id) : raw_id_(raw_id) {}
-  TraceID(unsigned short raw_id) : raw_id_(raw_id) {}
-  TraceID(unsigned char raw_id) : raw_id_(raw_id) {}
-  TraceID(long long raw_id)
-      : raw_id_(static_cast<unsigned long long>(raw_id)) {}
-  TraceID(long raw_id)
-      : raw_id_(static_cast<unsigned long long>(raw_id)) {}
-  TraceID(int raw_id)
-      : raw_id_(static_cast<unsigned long long>(raw_id)) {}
-  TraceID(short raw_id)
-      : raw_id_(static_cast<unsigned long long>(raw_id)) {}
-  TraceID(signed char raw_id)
-      : raw_id_(static_cast<unsigned long long>(raw_id)) {}
-  TraceID(LocalId raw_id) : raw_id_(raw_id.raw_id()) {
-    id_flags_ = TRACE_EVENT_FLAG_HAS_LOCAL_ID;
+  TraceID(DontMangle maybe_scoped_id, unsigned int* /*flags*/)
+      : scope_(maybe_scoped_id.scope()), raw_id_(maybe_scoped_id.raw_id()) {}
+  TraceID(unsigned long long raw_id, unsigned int* flags) : raw_id_(raw_id) {
+    (void)flags;
   }
-  TraceID(GlobalId raw_id) : raw_id_(raw_id.raw_id()) {
-    id_flags_ = TRACE_EVENT_FLAG_HAS_GLOBAL_ID;
+  TraceID(unsigned long raw_id, unsigned int* flags) : raw_id_(raw_id) {
+    (void)flags;
   }
-  TraceID(WithScope scoped_id)
-      : scope_(scoped_id.scope()),
-        has_prefix_(scoped_id.has_prefix()),
-        prefix_(scoped_id.prefix()),
-        raw_id_(scoped_id.raw_id()),
-        id_flags_(scoped_id.id_flags()) {}
+  TraceID(unsigned int raw_id, unsigned int* flags) : raw_id_(raw_id) {
+    (void)flags;
+  }
+  TraceID(unsigned short raw_id, unsigned int* flags) : raw_id_(raw_id) {
+    (void)flags;
+  }
+  TraceID(unsigned char raw_id, unsigned int* flags) : raw_id_(raw_id) {
+    (void)flags;
+  }
+  TraceID(long long raw_id, unsigned int* flags)
+      : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
+  TraceID(long raw_id, unsigned int* flags)
+      : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
+  TraceID(int raw_id, unsigned int* flags)
+      : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
+  TraceID(short raw_id, unsigned int* flags)
+      : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
+  TraceID(signed char raw_id, unsigned int* flags)
+      : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
+  TraceID(WithScope scoped_id, unsigned int* /*flags*/)
+      : scope_(scoped_id.scope()), raw_id_(scoped_id.raw_id()) {}
 
   unsigned long long raw_id() const { return raw_id_; }
   const char* scope() const { return scope_; }
-  bool has_prefix() const { return has_prefix_; }
-  unsigned long long prefix() const { return prefix_; }
-  unsigned int id_flags() const { return id_flags_; }
-
-  std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
-  AsConvertableToTraceFormat() const;
 
  private:
   const char* scope_ = nullptr;
-  bool has_prefix_ = false;
-  unsigned long long prefix_;
   unsigned long long raw_id_;
-  unsigned int id_flags_ = TRACE_EVENT_FLAG_HAS_ID;
 };
 
 // Simple union to store various types as unsigned long long.
@@ -1030,10 +973,9 @@
   ScopedTracer() : p_data_(NULL) {}
 
   ~ScopedTracer() {
-    if (p_data_ && *data_.category_group_enabled) {
+    if (p_data_ && *data_.category_group_enabled)
       TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
           data_.category_group_enabled, data_.name, data_.event_handle);
-    }
   }
 
   void Initialize(const unsigned char* category_group_enabled,
@@ -1081,6 +1023,37 @@
     trace_event_internal::ScopedTraceBinaryEfficient \
         INTERNAL_TRACE_EVENT_UID(scoped_trace)(category_group, name);
 
+// TraceEventSamplingStateScope records the current sampling state
+// and sets a new sampling state. When the scope exists, it restores
+// the sampling state having recorded.
+template<size_t BucketNumber>
+class TraceEventSamplingStateScope {
+ public:
+  TraceEventSamplingStateScope(const char* category_and_name) {
+    previous_state_ = TraceEventSamplingStateScope<BucketNumber>::Current();
+    TraceEventSamplingStateScope<BucketNumber>::Set(category_and_name);
+  }
+
+  ~TraceEventSamplingStateScope() {
+    TraceEventSamplingStateScope<BucketNumber>::Set(previous_state_);
+  }
+
+  static inline const char* Current() {
+    return reinterpret_cast<const char*>(TRACE_EVENT_API_ATOMIC_LOAD(
+      g_trace_state[BucketNumber]));
+  }
+
+  static inline void Set(const char* category_and_name) {
+    TRACE_EVENT_API_ATOMIC_STORE(
+      g_trace_state[BucketNumber],
+      reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>(
+        const_cast<char*>(category_and_name)));
+  }
+
+ private:
+  const char* previous_state_;
+};
+
 }  // namespace trace_event_internal
 
 namespace base {
diff --git a/base/trace_event/trace_event_argument.cc b/base/trace_event/trace_event_argument.cc
index db702b6..336d964 100644
--- a/base/trace_event/trace_event_argument.cc
+++ b/base/trace_event/trace_event_argument.cc
@@ -244,36 +244,36 @@
                                              const base::Value& value) {
   DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
   switch (value.GetType()) {
-    case base::Value::Type::NONE:
-    case base::Value::Type::BINARY:
+    case base::Value::TYPE_NULL:
+    case base::Value::TYPE_BINARY:
       NOTREACHED();
       break;
 
-    case base::Value::Type::BOOLEAN: {
+    case base::Value::TYPE_BOOLEAN: {
       bool bool_value;
       value.GetAsBoolean(&bool_value);
       SetBooleanWithCopiedName(name, bool_value);
     } break;
 
-    case base::Value::Type::INTEGER: {
+    case base::Value::TYPE_INTEGER: {
       int int_value;
       value.GetAsInteger(&int_value);
       SetIntegerWithCopiedName(name, int_value);
     } break;
 
-    case base::Value::Type::DOUBLE: {
+    case base::Value::TYPE_DOUBLE: {
       double double_value;
       value.GetAsDouble(&double_value);
       SetDoubleWithCopiedName(name, double_value);
     } break;
 
-    case base::Value::Type::STRING: {
-      const Value* string_value;
+    case base::Value::TYPE_STRING: {
+      const StringValue* string_value;
       value.GetAsString(&string_value);
       SetStringWithCopiedName(name, string_value->GetString());
     } break;
 
-    case base::Value::Type::DICTIONARY: {
+    case base::Value::TYPE_DICTIONARY: {
       const DictionaryValue* dict_value;
       value.GetAsDictionary(&dict_value);
       BeginDictionaryWithCopiedName(name);
@@ -284,7 +284,7 @@
       EndDictionary();
     } break;
 
-    case base::Value::Type::LIST: {
+    case base::Value::TYPE_LIST: {
       const ListValue* list_value;
       value.GetAsList(&list_value);
       BeginArrayWithCopiedName(name);
@@ -298,36 +298,36 @@
 void TracedValue::AppendBaseValue(const base::Value& value) {
   DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
   switch (value.GetType()) {
-    case base::Value::Type::NONE:
-    case base::Value::Type::BINARY:
+    case base::Value::TYPE_NULL:
+    case base::Value::TYPE_BINARY:
       NOTREACHED();
       break;
 
-    case base::Value::Type::BOOLEAN: {
+    case base::Value::TYPE_BOOLEAN: {
       bool bool_value;
       value.GetAsBoolean(&bool_value);
       AppendBoolean(bool_value);
     } break;
 
-    case base::Value::Type::INTEGER: {
+    case base::Value::TYPE_INTEGER: {
       int int_value;
       value.GetAsInteger(&int_value);
       AppendInteger(int_value);
     } break;
 
-    case base::Value::Type::DOUBLE: {
+    case base::Value::TYPE_DOUBLE: {
       double double_value;
       value.GetAsDouble(&double_value);
       AppendDouble(double_value);
     } break;
 
-    case base::Value::Type::STRING: {
-      const Value* string_value;
+    case base::Value::TYPE_STRING: {
+      const StringValue* string_value;
       value.GetAsString(&string_value);
       AppendString(string_value->GetString());
     } break;
 
-    case base::Value::Type::DICTIONARY: {
+    case base::Value::TYPE_DICTIONARY: {
       const DictionaryValue* dict_value;
       value.GetAsDictionary(&dict_value);
       BeginDictionary();
@@ -338,7 +338,7 @@
       EndDictionary();
     } break;
 
-    case base::Value::Type::LIST: {
+    case base::Value::TYPE_LIST: {
       const ListValue* list_value;
       value.GetAsList(&list_value);
       BeginArray();
diff --git a/base/trace_event/trace_event_argument_unittest.cc b/base/trace_event/trace_event_argument_unittest.cc
index aef8441..61395f4 100644
--- a/base/trace_event/trace_event_argument_unittest.cc
+++ b/base/trace_event/trace_event_argument_unittest.cc
@@ -97,9 +97,9 @@
 }
 
 TEST(TraceEventArgumentTest, PassBaseValue) {
-  Value int_value(42);
-  Value bool_value(true);
-  Value double_value(42.0f);
+  FundamentalValue int_value(42);
+  FundamentalValue bool_value(true);
+  FundamentalValue double_value(42.0f);
 
   auto dict_value = WrapUnique(new DictionaryValue);
   dict_value->SetBoolean("bool", true);
@@ -131,10 +131,10 @@
 }
 
 TEST(TraceEventArgumentTest, PassTracedValue) {
-  auto dict_value = MakeUnique<TracedValue>();
+  auto dict_value = WrapUnique(new TracedValue());
   dict_value->SetInteger("a", 1);
 
-  auto nested_dict_value = MakeUnique<TracedValue>();
+  auto nested_dict_value = WrapUnique(new TracedValue());
   nested_dict_value->SetInteger("b", 2);
   nested_dict_value->BeginArray("c");
   nested_dict_value->AppendString("foo");
diff --git a/base/trace_event/trace_event_filter.cc b/base/trace_event/trace_event_filter.cc
deleted file mode 100644
index d50c5fe..0000000
--- a/base/trace_event/trace_event_filter.cc
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/compiler_specific.h"
-#include "base/trace_event/trace_event_filter.h"
-
-namespace base {
-namespace trace_event {
-
-TraceEventFilter::TraceEventFilter() {}
-TraceEventFilter::~TraceEventFilter() {}
-
-void TraceEventFilter::EndEvent(const char* category_name,
-                                const char* event_name) const {
-  ALLOW_UNUSED_PARAM(category_name);
-  ALLOW_UNUSED_PARAM(event_name);
-}
-
-}  // namespace trace_event
-}  // namespace base
diff --git a/base/trace_event/trace_event_filter.h b/base/trace_event/trace_event_filter.h
deleted file mode 100644
index 48c6711..0000000
--- a/base/trace_event/trace_event_filter.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TRACE_EVENT_TRACE_EVENT_FILTER_H_
-#define BASE_TRACE_EVENT_TRACE_EVENT_FILTER_H_
-
-#include <memory>
-
-#include "base/base_export.h"
-#include "base/macros.h"
-
-namespace base {
-namespace trace_event {
-
-class TraceEvent;
-
-// TraceEventFilter is like iptables for TRACE_EVENT macros. Filters can be
-// enabled on a per-category basis, hence a single filter instance can serve
-// more than a TraceCategory. There are two use cases for filters:
-// 1. Snooping TRACE_EVENT macros without adding them to the TraceLog. This is
-//    possible by setting the ENABLED_FOR_FILTERING flag on a category w/o
-//    ENABLED_FOR_RECORDING (see TraceConfig for user-facing configuration).
-// 2. Filtering TRACE_EVENT macros before they are added to the TraceLog. This
-//    requires both the ENABLED_FOR_FILTERING and ENABLED_FOR_RECORDING flags
-//    on the category.
-// More importantly, filters must be thread-safe. The FilterTraceEvent and
-// EndEvent methods can be called concurrently as trace macros are hit on
-// different threads.
-class BASE_EXPORT TraceEventFilter {
- public:
-  TraceEventFilter();
-  virtual ~TraceEventFilter();
-
-  // If the category is ENABLED_FOR_RECORDING, the event is added iff all the
-  // filters enabled for the category return true. false causes the event to be
-  // discarded.
-  virtual bool FilterTraceEvent(const TraceEvent& trace_event) const = 0;
-
-  // Notifies the end of a duration event when the RAII macro goes out of scope.
-  virtual void EndEvent(const char* category_name,
-                        const char* event_name) const;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(TraceEventFilter);
-};
-
-}  // namespace trace_event
-}  // namespace base
-
-#endif  // BASE_TRACE_EVENT_TRACE_EVENT_FILTER_H_
diff --git a/base/trace_event/trace_event_filter_test_utils.cc b/base/trace_event/trace_event_filter_test_utils.cc
deleted file mode 100644
index 06548b0..0000000
--- a/base/trace_event/trace_event_filter_test_utils.cc
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/trace_event_filter_test_utils.h"
-
-#include "base/logging.h"
-
-namespace base {
-namespace trace_event {
-
-namespace {
-TestEventFilter::HitsCounter* g_hits_counter;
-}  // namespace;
-
-// static
-const char TestEventFilter::kName[] = "testing_predicate";
-bool TestEventFilter::filter_return_value_;
-
-// static
-std::unique_ptr<TraceEventFilter> TestEventFilter::Factory(
-    const std::string& predicate_name) {
-  std::unique_ptr<TraceEventFilter> res;
-  if (predicate_name == kName)
-    res.reset(new TestEventFilter());
-  return res;
-}
-
-TestEventFilter::TestEventFilter() {}
-TestEventFilter::~TestEventFilter() {}
-
-bool TestEventFilter::FilterTraceEvent(const TraceEvent& trace_event) const {
-  if (g_hits_counter)
-    g_hits_counter->filter_trace_event_hit_count++;
-  return filter_return_value_;
-}
-
-void TestEventFilter::EndEvent(const char* category_name,
-                               const char* name) const {
-  if (g_hits_counter)
-    g_hits_counter->end_event_hit_count++;
-}
-
-TestEventFilter::HitsCounter::HitsCounter() {
-  Reset();
-  DCHECK(!g_hits_counter);
-  g_hits_counter = this;
-}
-
-TestEventFilter::HitsCounter::~HitsCounter() {
-  DCHECK(g_hits_counter);
-  g_hits_counter = nullptr;
-}
-
-void TestEventFilter::HitsCounter::Reset() {
-  filter_trace_event_hit_count = 0;
-  end_event_hit_count = 0;
-}
-
-}  // namespace trace_event
-}  // namespace base
diff --git a/base/trace_event/trace_event_filter_test_utils.h b/base/trace_event/trace_event_filter_test_utils.h
deleted file mode 100644
index 419068b..0000000
--- a/base/trace_event/trace_event_filter_test_utils.h
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TRACE_EVENT_TRACE_EVENT_FILTER_TEST_UTILS_H_
-#define BASE_TRACE_EVENT_TRACE_EVENT_FILTER_TEST_UTILS_H_
-
-#include <memory>
-#include <string>
-
-#include "base/macros.h"
-#include "base/trace_event/trace_event_filter.h"
-
-namespace base {
-namespace trace_event {
-
-class TestEventFilter : public TraceEventFilter {
- public:
-  struct HitsCounter {
-    HitsCounter();
-    ~HitsCounter();
-    void Reset();
-    size_t filter_trace_event_hit_count;
-    size_t end_event_hit_count;
-  };
-
-  static const char kName[];
-
-  // Factory method for TraceLog::SetFilterFactoryForTesting().
-  static std::unique_ptr<TraceEventFilter> Factory(
-      const std::string& predicate_name);
-
-  TestEventFilter();
-  ~TestEventFilter() override;
-
-  // TraceEventFilter implementation.
-  bool FilterTraceEvent(const TraceEvent& trace_event) const override;
-  void EndEvent(const char* category_name, const char* name) const override;
-
-  static void set_filter_return_value(bool value) {
-    filter_return_value_ = value;
-  }
-
- private:
-  static bool filter_return_value_;
-
-  DISALLOW_COPY_AND_ASSIGN(TestEventFilter);
-};
-
-}  // namespace trace_event
-}  // namespace base
-
-#endif  // BASE_TRACE_EVENT_TRACE_EVENT_FILTER_TEST_UTILS_H_
diff --git a/base/trace_event/trace_event_impl.cc b/base/trace_event/trace_event_impl.cc
index cb23eb4..f469f2f 100644
--- a/base/trace_event/trace_event_impl.cc
+++ b/base/trace_event/trace_event_impl.cc
@@ -8,7 +8,6 @@
 
 #include "base/format_macros.h"
 #include "base/json/string_escape.h"
-#include "base/memory/ptr_util.h"
 #include "base/process/process_handle.h"
 #include "base/stl_util.h"
 #include "base/strings/string_number_conversions.h"
@@ -16,7 +15,6 @@
 #include "base/strings/stringprintf.h"
 #include "base/strings/utf_string_conversions.h"
 #include "base/trace_event/trace_event.h"
-#include "base/trace_event/trace_event_argument.h"
 #include "base/trace_event/trace_log.h"
 
 namespace base {
@@ -360,33 +358,10 @@
 
   // If id_ is set, print it out as a hex string so we don't loose any
   // bits (it might be a 64-bit pointer).
-  unsigned int id_flags_ = flags_ & (TRACE_EVENT_FLAG_HAS_ID |
-                                     TRACE_EVENT_FLAG_HAS_LOCAL_ID |
-                                     TRACE_EVENT_FLAG_HAS_GLOBAL_ID);
-  if (id_flags_) {
+  if (flags_ & TRACE_EVENT_FLAG_HAS_ID) {
     if (scope_ != trace_event_internal::kGlobalScope)
       StringAppendF(out, ",\"scope\":\"%s\"", scope_);
-
-    switch (id_flags_) {
-      case TRACE_EVENT_FLAG_HAS_ID:
-        StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"",
-                      static_cast<uint64_t>(id_));
-        break;
-
-      case TRACE_EVENT_FLAG_HAS_LOCAL_ID:
-        StringAppendF(out, ",\"id2\":{\"local\":\"0x%" PRIx64 "\"}",
-                      static_cast<uint64_t>(id_));
-        break;
-
-      case TRACE_EVENT_FLAG_HAS_GLOBAL_ID:
-        StringAppendF(out, ",\"id2\":{\"global\":\"0x%" PRIx64 "\"}",
-                      static_cast<uint64_t>(id_));
-        break;
-
-      default:
-        NOTREACHED() << "More than one of the ID flags are set";
-        break;
-    }
+    StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"", static_cast<uint64_t>(id_));
   }
 
   if (flags_ & TRACE_EVENT_FLAG_BIND_TO_ENCLOSING)
@@ -449,42 +424,3 @@
 
 }  // namespace trace_event
 }  // namespace base
-
-namespace trace_event_internal {
-
-std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
-TraceID::AsConvertableToTraceFormat() const {
-  auto value = base::MakeUnique<base::trace_event::TracedValue>();
-
-  if (scope_ != kGlobalScope)
-    value->SetString("scope", scope_);
-
-  const char* id_field_name = "id";
-  if (id_flags_ == TRACE_EVENT_FLAG_HAS_GLOBAL_ID) {
-    id_field_name = "global";
-    value->BeginDictionary("id2");
-  } else if (id_flags_ == TRACE_EVENT_FLAG_HAS_LOCAL_ID) {
-    id_field_name = "local";
-    value->BeginDictionary("id2");
-  } else if (id_flags_ != TRACE_EVENT_FLAG_HAS_ID) {
-    NOTREACHED() << "Unrecognized ID flag";
-  }
-
-  if (has_prefix_) {
-    value->SetString(id_field_name,
-                     base::StringPrintf("0x%" PRIx64 "/0x%" PRIx64,
-                                        static_cast<uint64_t>(prefix_),
-                                        static_cast<uint64_t>(raw_id_)));
-  } else {
-    value->SetString(
-        id_field_name,
-        base::StringPrintf("0x%" PRIx64, static_cast<uint64_t>(raw_id_)));
-  }
-
-  if (id_flags_ != TRACE_EVENT_FLAG_HAS_ID)
-    value->EndDictionary();
-
-  return std::move(value);
-}
-
-}  // namespace trace_event_internal
diff --git a/base/trace_event/trace_event_impl.h b/base/trace_event/trace_event_impl.h
index 5eef702..4382217 100644
--- a/base/trace_event/trace_event_impl.h
+++ b/base/trace_event/trace_event_impl.h
@@ -23,11 +23,16 @@
 #include "base/strings/string_util.h"
 #include "base/synchronization/condition_variable.h"
 #include "base/synchronization/lock.h"
+#include "base/threading/thread.h"
 #include "base/threading/thread_local.h"
 #include "base/trace_event/trace_event_memory_overhead.h"
 #include "build/build_config.h"
 
 namespace base {
+
+class WaitableEvent;
+class MessageLoop;
+
 namespace trace_event {
 
 typedef base::Callback<bool(const char* arg_name)> ArgumentNameFilterPredicate;
diff --git a/base/trace_event/trace_event_memory_overhead.cc b/base/trace_event/trace_event_memory_overhead.cc
index 8d56e1d..23579cb 100644
--- a/base/trace_event/trace_event_memory_overhead.cc
+++ b/base/trace_event/trace_event_memory_overhead.cc
@@ -69,27 +69,27 @@
 
 void TraceEventMemoryOverhead::AddValue(const Value& value) {
   switch (value.GetType()) {
-    case Value::Type::NONE:
-    case Value::Type::BOOLEAN:
-    case Value::Type::INTEGER:
-    case Value::Type::DOUBLE:
+    case Value::TYPE_NULL:
+    case Value::TYPE_BOOLEAN:
+    case Value::TYPE_INTEGER:
+    case Value::TYPE_DOUBLE:
       Add("FundamentalValue", sizeof(Value));
       break;
 
-    case Value::Type::STRING: {
-      const Value* string_value = nullptr;
+    case Value::TYPE_STRING: {
+      const StringValue* string_value = nullptr;
       value.GetAsString(&string_value);
-      Add("StringValue", sizeof(Value));
+      Add("StringValue", sizeof(StringValue));
       AddString(string_value->GetString());
     } break;
 
-    case Value::Type::BINARY: {
+    case Value::TYPE_BINARY: {
       const BinaryValue* binary_value = nullptr;
       value.GetAsBinary(&binary_value);
       Add("BinaryValue", sizeof(BinaryValue) + binary_value->GetSize());
     } break;
 
-    case Value::Type::DICTIONARY: {
+    case Value::TYPE_DICTIONARY: {
       const DictionaryValue* dictionary_value = nullptr;
       value.GetAsDictionary(&dictionary_value);
       Add("DictionaryValue", sizeof(DictionaryValue));
@@ -100,7 +100,7 @@
       }
     } break;
 
-    case Value::Type::LIST: {
+    case Value::TYPE_LIST: {
       const ListValue* list_value = nullptr;
       value.GetAsList(&list_value);
       Add("ListValue", sizeof(ListValue));
diff --git a/base/trace_event/trace_event_synthetic_delay.h b/base/trace_event/trace_event_synthetic_delay.h
index e86f9ee..59e2842 100644
--- a/base/trace_event/trace_event_synthetic_delay.h
+++ b/base/trace_event/trace_event_synthetic_delay.h
@@ -62,6 +62,9 @@
     trace_event_internal::GetOrCreateDelay(name, &impl_ptr)->End();   \
   } while (false)
 
+template <typename Type>
+struct DefaultSingletonTraits;
+
 namespace base {
 namespace trace_event {
 
diff --git a/base/trace_event/trace_event_unittest.cc b/base/trace_event/trace_event_unittest.cc
index 82a552a..ff8ec2d 100644
--- a/base/trace_event/trace_event_unittest.cc
+++ b/base/trace_event/trace_event_unittest.cc
@@ -18,7 +18,6 @@
 #include "base/json/json_writer.h"
 #include "base/location.h"
 #include "base/macros.h"
-#include "base/memory/ptr_util.h"
 #include "base/memory/ref_counted_memory.h"
 #include "base/memory/singleton.h"
 #include "base/process/process_handle.h"
@@ -30,12 +29,7 @@
 #include "base/threading/platform_thread.h"
 #include "base/threading/thread.h"
 #include "base/time/time.h"
-#include "base/trace_event/event_name_filter.h"
-#include "base/trace_event/heap_profiler_event_filter.h"
 #include "base/trace_event/trace_buffer.h"
-#include "base/trace_event/trace_event.h"
-#include "base/trace_event/trace_event_filter.h"
-#include "base/trace_event/trace_event_filter_test_utils.h"
 #include "base/trace_event/trace_event_synthetic_delay.h"
 #include "base/values.h"
 #include "testing/gmock/include/gmock/gmock.h"
@@ -73,6 +67,9 @@
       WaitableEvent* flush_complete_event,
       const scoped_refptr<base::RefCountedString>& events_str,
       bool has_more_events);
+  void OnWatchEventMatched() {
+    ++event_watch_notification_;
+  }
   DictionaryValue* FindMatchingTraceEntry(const JsonKeyValue* key_values);
   DictionaryValue* FindNamePhase(const char* name, const char* phase);
   DictionaryValue* FindNamePhaseKeyValue(const char* name,
@@ -94,6 +91,7 @@
   }
 
   void BeginSpecificTrace(const std::string& filter) {
+    event_watch_notification_ = 0;
     TraceLog::GetInstance()->SetEnabled(TraceConfig(filter, ""),
                                         TraceLog::RECORDING_MODE);
   }
@@ -137,8 +135,7 @@
   }
 
   void EndTraceAndFlushAsync(WaitableEvent* flush_complete_event) {
-    TraceLog::GetInstance()->SetDisabled(TraceLog::RECORDING_MODE |
-                                         TraceLog::FILTERING_MODE);
+    TraceLog::GetInstance()->SetDisabled();
     TraceLog::GetInstance()->Flush(
         base::Bind(&TraceEventTestFixture::OnTraceDataCollected,
                    base::Unretained(static_cast<TraceEventTestFixture*>(this)),
@@ -154,6 +151,7 @@
     ASSERT_TRUE(tracelog);
     ASSERT_FALSE(tracelog->IsEnabled());
     trace_buffer_.SetOutputCallback(json_output_.GetCallback());
+    event_watch_notification_ = 0;
     num_flush_callbacks_ = 0;
   }
   void TearDown() override {
@@ -170,6 +168,7 @@
   ListValue trace_parsed_;
   TraceResultBuffer trace_buffer_;
   TraceResultBuffer::SimpleOutput json_output_;
+  int event_watch_notification_;
   size_t num_flush_callbacks_;
 
  private:
@@ -264,7 +263,7 @@
   for (size_t i = 0; i < trace_parsed_count; i++) {
     Value* value = NULL;
     trace_parsed_.Get(i, &value);
-    if (!value || value->GetType() != Value::Type::DICTIONARY)
+    if (!value || value->GetType() != Value::TYPE_DICTIONARY)
       continue;
     DictionaryValue* dict = static_cast<DictionaryValue*>(value);
 
@@ -282,7 +281,7 @@
   for (size_t i = 0; i < old_trace_parsed_size; i++) {
     Value* value = nullptr;
     old_trace_parsed->Get(i, &value);
-    if (!value || value->GetType() != Value::Type::DICTIONARY) {
+    if (!value || value->GetType() != Value::TYPE_DICTIONARY) {
       trace_parsed_.Append(value->CreateDeepCopy());
       continue;
     }
@@ -371,7 +370,7 @@
          match_after_this_item = NULL;
       continue;
     }
-    if (!value || value->GetType() != Value::Type::DICTIONARY)
+    if (!value || value->GetType() != Value::TYPE_DICTIONARY)
       continue;
     const DictionaryValue* dict = static_cast<const DictionaryValue*>(value);
 
@@ -389,7 +388,7 @@
   for (size_t i = 0; i < trace_parsed_count; i++) {
     const Value* value = NULL;
     trace_parsed.Get(i, &value);
-    if (!value || value->GetType() != Value::Type::DICTIONARY)
+    if (!value || value->GetType() != Value::TYPE_DICTIONARY)
       continue;
     const DictionaryValue* dict = static_cast<const DictionaryValue*>(value);
 
@@ -461,10 +460,9 @@
                    "b", 1415);
 
     TRACE_COUNTER_WITH_TIMESTAMP1("all", "TRACE_COUNTER_WITH_TIMESTAMP1 call",
-                                  TimeTicks::FromInternalValue(42), 31415);
+                                  42, 31415);
     TRACE_COUNTER_WITH_TIMESTAMP2("all", "TRACE_COUNTER_WITH_TIMESTAMP2 call",
-                                  TimeTicks::FromInternalValue(42),
-                                  "a", 30000, "b", 1415);
+                                  42, "a", 30000, "b", 1415);
 
     TRACE_COUNTER_ID1("all", "TRACE_COUNTER_ID1 call", 0x319009, 31415);
     TRACE_COUNTER_ID2("all", "TRACE_COUNTER_ID2 call", 0x319009,
@@ -472,14 +470,14 @@
 
     TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0("all",
         "TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0 call",
-        kAsyncId, kThreadId, TimeTicks::FromInternalValue(12345));
+        kAsyncId, kThreadId, 12345);
     TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0("all",
         "TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0 call",
-        kAsyncId, kThreadId, TimeTicks::FromInternalValue(23456));
+        kAsyncId, kThreadId, 23456);
 
     TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0("all",
         "TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0 call",
-        kAsyncId2, kThreadId, TimeTicks::FromInternalValue(34567));
+        kAsyncId2, kThreadId, 34567);
     TRACE_EVENT_ASYNC_STEP_PAST0("all", "TRACE_EVENT_ASYNC_STEP_PAST0 call",
                                  kAsyncId2, "step_end1");
     TRACE_EVENT_ASYNC_STEP_PAST1("all", "TRACE_EVENT_ASYNC_STEP_PAST1 call",
@@ -487,7 +485,7 @@
 
     TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0("all",
         "TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0 call",
-        kAsyncId2, kThreadId, TimeTicks::FromInternalValue(45678));
+        kAsyncId2, kThreadId, 45678);
 
     TRACE_EVENT_OBJECT_CREATED_WITH_ID("all", "tracked object 1", 0x42);
     TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
@@ -519,24 +517,6 @@
                                context_id);
     TRACE_EVENT_SCOPED_CONTEXT("all", "TRACE_EVENT_SCOPED_CONTEXT call",
                                context_id);
-
-    TRACE_LINK_IDS("all", "TRACE_LINK_IDS simple call", 0x1000, 0x2000);
-    TRACE_LINK_IDS("all", "TRACE_LINK_IDS scoped call",
-                   TRACE_ID_WITH_SCOPE("scope 1", 0x1000),
-                   TRACE_ID_WITH_SCOPE("scope 2", 0x2000));
-    TRACE_LINK_IDS("all", "TRACE_LINK_IDS to a local ID", 0x1000,
-                   TRACE_ID_LOCAL(0x2000));
-    TRACE_LINK_IDS("all", "TRACE_LINK_IDS to a global ID", 0x1000,
-                   TRACE_ID_GLOBAL(0x2000));
-    TRACE_LINK_IDS("all", "TRACE_LINK_IDS to a composite ID", 0x1000,
-                   TRACE_ID_WITH_SCOPE("scope 1", 0x2000, 0x3000));
-
-    TRACE_EVENT_ASYNC_BEGIN0("all", "async default process scope", 0x1000);
-    TRACE_EVENT_ASYNC_BEGIN0("all", "async local id", TRACE_ID_LOCAL(0x2000));
-    TRACE_EVENT_ASYNC_BEGIN0("all", "async global id", TRACE_ID_GLOBAL(0x3000));
-    TRACE_EVENT_ASYNC_BEGIN0("all", "async global id with scope string",
-                             TRACE_ID_WITH_SCOPE("scope string",
-                                                 TRACE_ID_GLOBAL(0x4000)));
   }  // Scope close causes TRACE_EVENT0 etc to send their END events.
 
   if (task_complete_event)
@@ -977,144 +957,6 @@
     EXPECT_TRUE((item && item->GetString("id", &id)));
     EXPECT_EQ("0x20151021", id);
   }
-
-  EXPECT_FIND_("TRACE_LINK_IDS simple call");
-  {
-    std::string ph;
-    EXPECT_TRUE((item && item->GetString("ph", &ph)));
-    EXPECT_EQ("=", ph);
-
-    EXPECT_FALSE((item && item->HasKey("scope")));
-    std::string id1;
-    EXPECT_TRUE((item && item->GetString("id", &id1)));
-    EXPECT_EQ("0x1000", id1);
-
-    EXPECT_FALSE((item && item->HasKey("args.linked_id.scope")));
-    std::string id2;
-    EXPECT_TRUE((item && item->GetString("args.linked_id.id", &id2)));
-    EXPECT_EQ("0x2000", id2);
-  }
-
-  EXPECT_FIND_("TRACE_LINK_IDS scoped call");
-  {
-    std::string ph;
-    EXPECT_TRUE((item && item->GetString("ph", &ph)));
-    EXPECT_EQ("=", ph);
-
-    std::string scope1;
-    EXPECT_TRUE((item && item->GetString("scope", &scope1)));
-    EXPECT_EQ("scope 1", scope1);
-    std::string id1;
-    EXPECT_TRUE((item && item->GetString("id", &id1)));
-    EXPECT_EQ("0x1000", id1);
-
-    std::string scope2;
-    EXPECT_TRUE((item && item->GetString("args.linked_id.scope", &scope2)));
-    EXPECT_EQ("scope 2", scope2);
-    std::string id2;
-    EXPECT_TRUE((item && item->GetString("args.linked_id.id", &id2)));
-    EXPECT_EQ("0x2000", id2);
-  }
-
-  EXPECT_FIND_("TRACE_LINK_IDS to a local ID");
-  {
-    std::string ph;
-    EXPECT_TRUE((item && item->GetString("ph", &ph)));
-    EXPECT_EQ("=", ph);
-
-    EXPECT_FALSE((item && item->HasKey("scope")));
-    std::string id1;
-    EXPECT_TRUE((item && item->GetString("id", &id1)));
-    EXPECT_EQ("0x1000", id1);
-
-    EXPECT_FALSE((item && item->HasKey("args.linked_id.scope")));
-    std::string id2;
-    EXPECT_TRUE((item && item->GetString("args.linked_id.id2.local", &id2)));
-    EXPECT_EQ("0x2000", id2);
-  }
-
-  EXPECT_FIND_("TRACE_LINK_IDS to a global ID");
-  {
-    std::string ph;
-    EXPECT_TRUE((item && item->GetString("ph", &ph)));
-    EXPECT_EQ("=", ph);
-
-    EXPECT_FALSE((item && item->HasKey("scope")));
-    std::string id1;
-    EXPECT_TRUE((item && item->GetString("id", &id1)));
-    EXPECT_EQ("0x1000", id1);
-
-    EXPECT_FALSE((item && item->HasKey("args.linked_id.scope")));
-    std::string id2;
-    EXPECT_TRUE((item && item->GetString("args.linked_id.id2.global", &id2)));
-    EXPECT_EQ("0x2000", id2);
-  }
-
-  EXPECT_FIND_("TRACE_LINK_IDS to a composite ID");
-  {
-    std::string ph;
-    EXPECT_TRUE((item && item->GetString("ph", &ph)));
-    EXPECT_EQ("=", ph);
-
-    EXPECT_FALSE(item->HasKey("scope"));
-    std::string id1;
-    EXPECT_TRUE(item->GetString("id", &id1));
-    EXPECT_EQ("0x1000", id1);
-
-    std::string scope;
-    EXPECT_TRUE(item->GetString("args.linked_id.scope", &scope));
-    EXPECT_EQ("scope 1", scope);
-    std::string id2;
-    EXPECT_TRUE(item->GetString("args.linked_id.id", &id2));
-    EXPECT_EQ(id2, "0x2000/0x3000");
-  }
-
-  EXPECT_FIND_("async default process scope");
-  {
-    std::string ph;
-    EXPECT_TRUE((item && item->GetString("ph", &ph)));
-    EXPECT_EQ("S", ph);
-
-    std::string id;
-    EXPECT_TRUE((item && item->GetString("id", &id)));
-    EXPECT_EQ("0x1000", id);
-  }
-
-  EXPECT_FIND_("async local id");
-  {
-    std::string ph;
-    EXPECT_TRUE((item && item->GetString("ph", &ph)));
-    EXPECT_EQ("S", ph);
-
-    std::string id;
-    EXPECT_TRUE((item && item->GetString("id2.local", &id)));
-    EXPECT_EQ("0x2000", id);
-  }
-
-  EXPECT_FIND_("async global id");
-  {
-    std::string ph;
-    EXPECT_TRUE((item && item->GetString("ph", &ph)));
-    EXPECT_EQ("S", ph);
-
-    std::string id;
-    EXPECT_TRUE((item && item->GetString("id2.global", &id)));
-    EXPECT_EQ("0x3000", id);
-  }
-
-  EXPECT_FIND_("async global id with scope string");
-  {
-    std::string ph;
-    EXPECT_TRUE((item && item->GetString("ph", &ph)));
-    EXPECT_EQ("S", ph);
-
-    std::string id;
-    EXPECT_TRUE((item && item->GetString("id2.global", &id)));
-    EXPECT_EQ("0x4000", id);
-    std::string scope;
-    EXPECT_TRUE((item && item->GetString("scope", &scope)));
-    EXPECT_EQ("scope string", scope);
-  }
 }
 
 void TraceManyInstantEvents(int thread_id, int num_events,
@@ -1139,7 +981,7 @@
   for (size_t i = 0; i < trace_parsed_count; i++) {
     const Value* value = NULL;
     trace_parsed.Get(i, &value);
-    if (!value || value->GetType() != Value::Type::DICTIONARY)
+    if (!value || value->GetType() != Value::TYPE_DICTIONARY)
       continue;
     const DictionaryValue* dict = static_cast<const DictionaryValue*>(value);
     std::string name;
@@ -1588,6 +1430,59 @@
 }
 
 
+// Test EVENT_WATCH_NOTIFICATION
+TEST_F(TraceEventTestFixture, EventWatchNotification) {
+  // Basic one occurrence.
+  BeginTrace();
+  TraceLog::WatchEventCallback callback =
+      base::Bind(&TraceEventTestFixture::OnWatchEventMatched,
+                 base::Unretained(this));
+  TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
+  TRACE_EVENT_INSTANT0("cat", "event", TRACE_EVENT_SCOPE_THREAD);
+  EndTraceAndFlush();
+  EXPECT_EQ(event_watch_notification_, 1);
+
+  // Auto-reset after end trace.
+  BeginTrace();
+  TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
+  EndTraceAndFlush();
+  BeginTrace();
+  TRACE_EVENT_INSTANT0("cat", "event", TRACE_EVENT_SCOPE_THREAD);
+  EndTraceAndFlush();
+  EXPECT_EQ(event_watch_notification_, 0);
+
+  // Multiple occurrence.
+  BeginTrace();
+  int num_occurrences = 5;
+  TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
+  for (int i = 0; i < num_occurrences; ++i)
+    TRACE_EVENT_INSTANT0("cat", "event", TRACE_EVENT_SCOPE_THREAD);
+  EndTraceAndFlush();
+  EXPECT_EQ(event_watch_notification_, num_occurrences);
+
+  // Wrong category.
+  BeginTrace();
+  TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
+  TRACE_EVENT_INSTANT0("wrong_cat", "event", TRACE_EVENT_SCOPE_THREAD);
+  EndTraceAndFlush();
+  EXPECT_EQ(event_watch_notification_, 0);
+
+  // Wrong name.
+  BeginTrace();
+  TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
+  TRACE_EVENT_INSTANT0("cat", "wrong_event", TRACE_EVENT_SCOPE_THREAD);
+  EndTraceAndFlush();
+  EXPECT_EQ(event_watch_notification_, 0);
+
+  // Canceled.
+  BeginTrace();
+  TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
+  TraceLog::GetInstance()->CancelWatchEvent();
+  TRACE_EVENT_INSTANT0("cat", "event", TRACE_EVENT_SCOPE_THREAD);
+  EndTraceAndFlush();
+  EXPECT_EQ(event_watch_notification_, 0);
+}
+
 // Test ASYNC_BEGIN/END events
 TEST_F(TraceEventTestFixture, AsyncBeginEndEvents) {
   BeginTrace();
@@ -2158,6 +2053,55 @@
   trace_log->SetDisabled();
 }
 
+TEST_F(TraceEventTestFixture, TraceSampling) {
+  TraceLog::GetInstance()->SetEnabled(
+    TraceConfig(kRecordAllCategoryFilter, "record-until-full,enable-sampling"),
+    TraceLog::RECORDING_MODE);
+
+  TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(1, "cc", "Stuff");
+  TraceLog::GetInstance()->WaitSamplingEventForTesting();
+  TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(1, "cc", "Things");
+  TraceLog::GetInstance()->WaitSamplingEventForTesting();
+
+  EndTraceAndFlush();
+
+  // Make sure we hit at least once.
+  EXPECT_TRUE(FindNamePhase("Stuff", "P"));
+  EXPECT_TRUE(FindNamePhase("Things", "P"));
+}
+
+TEST_F(TraceEventTestFixture, TraceSamplingScope) {
+  TraceLog::GetInstance()->SetEnabled(
+    TraceConfig(kRecordAllCategoryFilter, "record-until-full,enable-sampling"),
+    TraceLog::RECORDING_MODE);
+
+  TRACE_EVENT_SCOPED_SAMPLING_STATE("AAA", "name");
+  TraceLog::GetInstance()->WaitSamplingEventForTesting();
+  {
+    EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "AAA");
+    TRACE_EVENT_SCOPED_SAMPLING_STATE("BBB", "name");
+    TraceLog::GetInstance()->WaitSamplingEventForTesting();
+    EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "BBB");
+  }
+  TraceLog::GetInstance()->WaitSamplingEventForTesting();
+  {
+    EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "AAA");
+    TRACE_EVENT_SCOPED_SAMPLING_STATE("CCC", "name");
+    TraceLog::GetInstance()->WaitSamplingEventForTesting();
+    EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "CCC");
+  }
+  TraceLog::GetInstance()->WaitSamplingEventForTesting();
+  {
+    EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "AAA");
+    TRACE_EVENT_SET_SAMPLING_STATE("DDD", "name");
+    TraceLog::GetInstance()->WaitSamplingEventForTesting();
+    EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "DDD");
+  }
+  TraceLog::GetInstance()->WaitSamplingEventForTesting();
+  EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "DDD");
+
+  EndTraceAndFlush();
+}
 
 class MyData : public ConvertableToTraceFormat {
  public:
@@ -2346,7 +2290,7 @@
   dict->GetDictionary("args", &args_dict);
   ASSERT_TRUE(args_dict);
   EXPECT_TRUE(args_dict->Get("float_one", &value));
-  EXPECT_TRUE(value->IsType(Value::Type::DOUBLE));
+  EXPECT_TRUE(value->IsType(Value::TYPE_DOUBLE));
   EXPECT_TRUE(value->GetAsDouble(&double_value));
   EXPECT_EQ(1, double_value);
 
@@ -2356,7 +2300,7 @@
   dict->GetDictionary("args", &args_dict);
   ASSERT_TRUE(args_dict);
   EXPECT_TRUE(args_dict->Get("float_half", &value));
-  EXPECT_TRUE(value->IsType(Value::Type::DOUBLE));
+  EXPECT_TRUE(value->IsType(Value::TYPE_DOUBLE));
   EXPECT_TRUE(value->GetAsDouble(&double_value));
   EXPECT_EQ(0.5, double_value);
 
@@ -2366,7 +2310,7 @@
   dict->GetDictionary("args", &args_dict);
   ASSERT_TRUE(args_dict);
   EXPECT_TRUE(args_dict->Get("float_neghalf", &value));
-  EXPECT_TRUE(value->IsType(Value::Type::DOUBLE));
+  EXPECT_TRUE(value->IsType(Value::TYPE_DOUBLE));
   EXPECT_TRUE(value->GetAsDouble(&double_value));
   EXPECT_EQ(-0.5, double_value);
 
@@ -2536,6 +2480,233 @@
   EXPECT_EQ(args_string, "__stripped__");
 }
 
+class TraceEventCallbackTest : public TraceEventTestFixture {
+ public:
+  void SetUp() override {
+    TraceEventTestFixture::SetUp();
+    ASSERT_EQ(NULL, s_instance);
+    s_instance = this;
+  }
+  void TearDown() override {
+    TraceLog::GetInstance()->SetDisabled();
+    ASSERT_TRUE(s_instance);
+    s_instance = NULL;
+    TraceEventTestFixture::TearDown();
+  }
+
+ protected:
+  // For TraceEventCallbackAndRecordingX tests.
+  void VerifyCallbackAndRecordedEvents(size_t expected_callback_count,
+                                       size_t expected_recorded_count) {
+    // Callback events.
+    EXPECT_EQ(expected_callback_count, collected_events_names_.size());
+    for (size_t i = 0; i < collected_events_names_.size(); ++i) {
+      EXPECT_EQ("callback", collected_events_categories_[i]);
+      EXPECT_EQ("yes", collected_events_names_[i]);
+    }
+
+    // Recorded events.
+    EXPECT_EQ(expected_recorded_count, trace_parsed_.GetSize());
+    EXPECT_TRUE(FindTraceEntry(trace_parsed_, "recording"));
+    EXPECT_FALSE(FindTraceEntry(trace_parsed_, "callback"));
+    EXPECT_TRUE(FindTraceEntry(trace_parsed_, "yes"));
+    EXPECT_FALSE(FindTraceEntry(trace_parsed_, "no"));
+  }
+
+  void VerifyCollectedEvent(size_t i,
+                            unsigned phase,
+                            const std::string& category,
+                            const std::string& name) {
+    EXPECT_EQ(phase, collected_events_phases_[i]);
+    EXPECT_EQ(category, collected_events_categories_[i]);
+    EXPECT_EQ(name, collected_events_names_[i]);
+  }
+
+  std::vector<std::string> collected_events_categories_;
+  std::vector<std::string> collected_events_names_;
+  std::vector<unsigned char> collected_events_phases_;
+  std::vector<TimeTicks> collected_events_timestamps_;
+
+  static TraceEventCallbackTest* s_instance;
+  static void Callback(TimeTicks timestamp,
+                       char phase,
+                       const unsigned char* category_group_enabled,
+                       const char* name,
+                       const char* scope,
+                       unsigned long long id,
+                       int num_args,
+                       const char* const arg_names[],
+                       const unsigned char arg_types[],
+                       const unsigned long long arg_values[],
+                       unsigned int flags) {
+    s_instance->collected_events_phases_.push_back(phase);
+    s_instance->collected_events_categories_.push_back(
+        TraceLog::GetCategoryGroupName(category_group_enabled));
+    s_instance->collected_events_names_.push_back(name);
+    s_instance->collected_events_timestamps_.push_back(timestamp);
+  }
+};
+
+TraceEventCallbackTest* TraceEventCallbackTest::s_instance;
+
+TEST_F(TraceEventCallbackTest, TraceEventCallback) {
+  TRACE_EVENT_INSTANT0("all", "before enable", TRACE_EVENT_SCOPE_THREAD);
+  TraceLog::GetInstance()->SetEventCallbackEnabled(
+      TraceConfig(kRecordAllCategoryFilter, ""), Callback);
+  TRACE_EVENT_INSTANT0("all", "event1", TRACE_EVENT_SCOPE_GLOBAL);
+  TRACE_EVENT_INSTANT0("all", "event2", TRACE_EVENT_SCOPE_GLOBAL);
+  {
+    TRACE_EVENT0("all", "duration");
+    TRACE_EVENT_INSTANT0("all", "event3", TRACE_EVENT_SCOPE_GLOBAL);
+  }
+  TraceLog::GetInstance()->SetEventCallbackDisabled();
+  TRACE_EVENT_INSTANT0("all", "after callback removed",
+                       TRACE_EVENT_SCOPE_GLOBAL);
+  ASSERT_EQ(5u, collected_events_names_.size());
+  EXPECT_EQ("event1", collected_events_names_[0]);
+  EXPECT_EQ(TRACE_EVENT_PHASE_INSTANT, collected_events_phases_[0]);
+  EXPECT_EQ("event2", collected_events_names_[1]);
+  EXPECT_EQ(TRACE_EVENT_PHASE_INSTANT, collected_events_phases_[1]);
+  EXPECT_EQ("duration", collected_events_names_[2]);
+  EXPECT_EQ(TRACE_EVENT_PHASE_BEGIN, collected_events_phases_[2]);
+  EXPECT_EQ("event3", collected_events_names_[3]);
+  EXPECT_EQ(TRACE_EVENT_PHASE_INSTANT, collected_events_phases_[3]);
+  EXPECT_EQ("duration", collected_events_names_[4]);
+  EXPECT_EQ(TRACE_EVENT_PHASE_END, collected_events_phases_[4]);
+  for (size_t i = 1; i < collected_events_timestamps_.size(); i++) {
+    EXPECT_LE(collected_events_timestamps_[i - 1],
+              collected_events_timestamps_[i]);
+  }
+}
+
+TEST_F(TraceEventCallbackTest, TraceEventCallbackWhileFull) {
+  TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+                                      TraceLog::RECORDING_MODE);
+  do {
+    TRACE_EVENT_INSTANT0("all", "badger badger", TRACE_EVENT_SCOPE_GLOBAL);
+  } while (!TraceLog::GetInstance()->BufferIsFull());
+  TraceLog::GetInstance()->SetEventCallbackEnabled(
+      TraceConfig(kRecordAllCategoryFilter, ""), Callback);
+  TRACE_EVENT_INSTANT0("all", "a snake", TRACE_EVENT_SCOPE_GLOBAL);
+  TraceLog::GetInstance()->SetEventCallbackDisabled();
+  ASSERT_EQ(1u, collected_events_names_.size());
+  EXPECT_EQ("a snake", collected_events_names_[0]);
+}
+
+// 1: Enable callback, enable recording, disable callback, disable recording.
+TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecording1) {
+  TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+  TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+  TraceLog::GetInstance()->SetEventCallbackEnabled(TraceConfig("callback", ""),
+                                                   Callback);
+  TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+  TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+  TraceLog::GetInstance()->SetEnabled(TraceConfig("recording", ""),
+                                      TraceLog::RECORDING_MODE);
+  TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+  TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+  TraceLog::GetInstance()->SetEventCallbackDisabled();
+  TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+  TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+  EndTraceAndFlush();
+  TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+  TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+
+  DropTracedMetadataRecords();
+  VerifyCallbackAndRecordedEvents(2, 2);
+}
+
+// 2: Enable callback, enable recording, disable recording, disable callback.
+TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecording2) {
+  TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+  TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+  TraceLog::GetInstance()->SetEventCallbackEnabled(TraceConfig("callback", ""),
+                                                   Callback);
+  TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+  TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+  TraceLog::GetInstance()->SetEnabled(TraceConfig("recording", ""),
+                                      TraceLog::RECORDING_MODE);
+  TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+  TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+  EndTraceAndFlush();
+  TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+  TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+  TraceLog::GetInstance()->SetEventCallbackDisabled();
+  TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+  TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+
+  DropTracedMetadataRecords();
+  VerifyCallbackAndRecordedEvents(3, 1);
+}
+
+// 3: Enable recording, enable callback, disable callback, disable recording.
+TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecording3) {
+  TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+  TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+  TraceLog::GetInstance()->SetEnabled(TraceConfig("recording", ""),
+                                      TraceLog::RECORDING_MODE);
+  TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+  TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+  TraceLog::GetInstance()->SetEventCallbackEnabled(TraceConfig("callback", ""),
+                                                   Callback);
+  TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+  TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+  TraceLog::GetInstance()->SetEventCallbackDisabled();
+  TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+  TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+  EndTraceAndFlush();
+  TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+  TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+
+  DropTracedMetadataRecords();
+  VerifyCallbackAndRecordedEvents(1, 3);
+}
+
+// 4: Enable recording, enable callback, disable recording, disable callback.
+TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecording4) {
+  TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+  TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+  TraceLog::GetInstance()->SetEnabled(TraceConfig("recording", ""),
+                                      TraceLog::RECORDING_MODE);
+  TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+  TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+  TraceLog::GetInstance()->SetEventCallbackEnabled(TraceConfig("callback", ""),
+                                                   Callback);
+  TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+  TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+  EndTraceAndFlush();
+  TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+  TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+  TraceLog::GetInstance()->SetEventCallbackDisabled();
+  TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+  TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+
+  DropTracedMetadataRecords();
+  VerifyCallbackAndRecordedEvents(2, 2);
+}
+
+TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecordingDuration) {
+  TraceLog::GetInstance()->SetEventCallbackEnabled(
+      TraceConfig(kRecordAllCategoryFilter, ""), Callback);
+  {
+    TRACE_EVENT0("callback", "duration1");
+    TraceLog::GetInstance()->SetEnabled(
+        TraceConfig(kRecordAllCategoryFilter, ""), TraceLog::RECORDING_MODE);
+    TRACE_EVENT0("callback", "duration2");
+    EndTraceAndFlush();
+    TRACE_EVENT0("callback", "duration3");
+  }
+  TraceLog::GetInstance()->SetEventCallbackDisabled();
+
+  ASSERT_EQ(6u, collected_events_names_.size());
+  VerifyCollectedEvent(0, TRACE_EVENT_PHASE_BEGIN, "callback", "duration1");
+  VerifyCollectedEvent(1, TRACE_EVENT_PHASE_BEGIN, "callback", "duration2");
+  VerifyCollectedEvent(2, TRACE_EVENT_PHASE_BEGIN, "callback", "duration3");
+  VerifyCollectedEvent(3, TRACE_EVENT_PHASE_END, "callback", "duration3");
+  VerifyCollectedEvent(4, TRACE_EVENT_PHASE_END, "callback", "duration2");
+  VerifyCollectedEvent(5, TRACE_EVENT_PHASE_END, "callback", "duration1");
+}
+
 TEST_F(TraceEventTestFixture, TraceBufferVectorReportFull) {
   TraceLog* trace_log = TraceLog::GetInstance();
   trace_log->SetEnabled(
@@ -2544,9 +2715,9 @@
       TraceBuffer::CreateTraceBufferVectorOfSize(100));
   do {
     TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(
-        "all", "with_timestamp", 0, 0, TimeTicks::Now());
+        "all", "with_timestamp", 0, 0, TimeTicks::Now().ToInternalValue());
     TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(
-        "all", "with_timestamp", 0, 0, TimeTicks::Now());
+        "all", "with_timestamp", 0, 0, TimeTicks::Now().ToInternalValue());
   } while (!trace_log->BufferIsFull());
 
   EndTraceAndFlush();
@@ -2757,9 +2928,29 @@
             trace_log->GetInternalOptionsFromTraceConfig(
                 TraceConfig(kRecordAllCategoryFilter, ECHO_TO_CONSOLE)));
 
-  EXPECT_EQ(TraceLog::kInternalEchoToConsole,
-            trace_log->GetInternalOptionsFromTraceConfig(
-                TraceConfig("*", "trace-to-console,enable-systrace")));
+  EXPECT_EQ(
+      TraceLog::kInternalRecordUntilFull | TraceLog::kInternalEnableSampling,
+      trace_log->GetInternalOptionsFromTraceConfig(
+          TraceConfig(kRecordAllCategoryFilter,
+                      "record-until-full,enable-sampling")));
+
+  EXPECT_EQ(
+      TraceLog::kInternalRecordContinuously | TraceLog::kInternalEnableSampling,
+      trace_log->GetInternalOptionsFromTraceConfig(
+          TraceConfig(kRecordAllCategoryFilter,
+                      "record-continuously,enable-sampling")));
+
+  EXPECT_EQ(
+      TraceLog::kInternalEchoToConsole | TraceLog::kInternalEnableSampling,
+      trace_log->GetInternalOptionsFromTraceConfig(
+          TraceConfig(kRecordAllCategoryFilter,
+                      "trace-to-console,enable-sampling")));
+
+  EXPECT_EQ(
+      TraceLog::kInternalEchoToConsole | TraceLog::kInternalEnableSampling,
+      trace_log->GetInternalOptionsFromTraceConfig(
+          TraceConfig("*",
+                      "trace-to-console,enable-sampling,enable-systrace")));
 }
 
 void SetBlockingFlagAndBlockUntilStopped(WaitableEvent* task_start_event,
@@ -2918,9 +3109,9 @@
     TRACE_EVENT0("all", "duration2");
   }
   TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(
-      "all", "with_timestamp", 0, 0, TimeTicks::Now());
+      "all", "with_timestamp", 0, 0, TimeTicks::Now().ToInternalValue());
   TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(
-      "all", "with_timestamp", 0, 0, TimeTicks::Now());
+      "all", "with_timestamp", 0, 0, TimeTicks::Now().ToInternalValue());
 
   EndTraceAndFlush();
   DropTracedMetadataRecords();
@@ -2982,213 +3173,6 @@
   EXPECT_EQ(filter, config.ToCategoryFilterString());
 }
 
-TEST_F(TraceEventTestFixture, TraceFilteringMode) {
-  const char config_json[] =
-      "{"
-      "  \"event_filters\": ["
-      "     {"
-      "       \"filter_predicate\": \"testing_predicate\", "
-      "       \"included_categories\": [\"*\"]"
-      "     }"
-      "  ]"
-      "}";
-
-  // Run RECORDING_MODE within FILTERING_MODE:
-  TestEventFilter::HitsCounter filter_hits_counter;
-  TestEventFilter::set_filter_return_value(true);
-  TraceLog::GetInstance()->SetFilterFactoryForTesting(TestEventFilter::Factory);
-
-  // Only filtering mode is enabled with test filters.
-  TraceLog::GetInstance()->SetEnabled(TraceConfig(config_json),
-                                      TraceLog::FILTERING_MODE);
-  EXPECT_EQ(TraceLog::FILTERING_MODE, TraceLog::GetInstance()->enabled_modes());
-  {
-    void* ptr = this;
-    TRACE_EVENT0("c0", "name0");
-    TRACE_EVENT_ASYNC_BEGIN0("c1", "name1", ptr);
-    TRACE_EVENT_INSTANT0("c0", "name0", TRACE_EVENT_SCOPE_THREAD);
-    TRACE_EVENT_ASYNC_END0("c1", "name1", ptr);
-  }
-
-  // Recording mode is enabled when filtering mode is turned on.
-  TraceLog::GetInstance()->SetEnabled(TraceConfig("", ""),
-                                      TraceLog::RECORDING_MODE);
-  EXPECT_EQ(TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE,
-            TraceLog::GetInstance()->enabled_modes());
-  {
-    TRACE_EVENT0("c2", "name2");
-  }
-  // Only recording mode is disabled and filtering mode will continue to run.
-  TraceLog::GetInstance()->SetDisabled(TraceLog::RECORDING_MODE);
-  EXPECT_EQ(TraceLog::FILTERING_MODE, TraceLog::GetInstance()->enabled_modes());
-
-  {
-    TRACE_EVENT0("c0", "name0");
-  }
-  // Filtering mode is disabled and no tracing mode should be enabled.
-  TraceLog::GetInstance()->SetDisabled(TraceLog::FILTERING_MODE);
-  EXPECT_EQ(0, TraceLog::GetInstance()->enabled_modes());
-
-  EndTraceAndFlush();
-  EXPECT_FALSE(FindMatchingValue("cat", "c0"));
-  EXPECT_FALSE(FindMatchingValue("cat", "c1"));
-  EXPECT_FALSE(FindMatchingValue("name", "name0"));
-  EXPECT_FALSE(FindMatchingValue("name", "name1"));
-  EXPECT_TRUE(FindMatchingValue("cat", "c2"));
-  EXPECT_TRUE(FindMatchingValue("name", "name2"));
-  EXPECT_EQ(6u, filter_hits_counter.filter_trace_event_hit_count);
-  EXPECT_EQ(3u, filter_hits_counter.end_event_hit_count);
-  Clear();
-  filter_hits_counter.Reset();
-
-  // Run FILTERING_MODE within RECORDING_MODE:
-  // Only recording mode is enabled and all events must be recorded.
-  TraceLog::GetInstance()->SetEnabled(TraceConfig("", ""),
-                                      TraceLog::RECORDING_MODE);
-  EXPECT_EQ(TraceLog::RECORDING_MODE, TraceLog::GetInstance()->enabled_modes());
-  {
-    TRACE_EVENT0("c0", "name0");
-  }
-
-  // Filtering mode is also enabled and all events must be filtered-out.
-  TestEventFilter::set_filter_return_value(false);
-  TraceLog::GetInstance()->SetEnabled(TraceConfig(config_json),
-                                      TraceLog::FILTERING_MODE);
-  EXPECT_EQ(TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE,
-            TraceLog::GetInstance()->enabled_modes());
-  {
-    TRACE_EVENT0("c1", "name1");
-  }
-  // Only filtering mode is disabled and recording mode should continue to run
-  // with all events being recorded.
-  TraceLog::GetInstance()->SetDisabled(TraceLog::FILTERING_MODE);
-  EXPECT_EQ(TraceLog::RECORDING_MODE, TraceLog::GetInstance()->enabled_modes());
-
-  {
-    TRACE_EVENT0("c2", "name2");
-  }
-  // Recording mode is disabled and no tracing mode should be enabled.
-  TraceLog::GetInstance()->SetDisabled(TraceLog::RECORDING_MODE);
-  EXPECT_EQ(0, TraceLog::GetInstance()->enabled_modes());
-
-  EndTraceAndFlush();
-  EXPECT_TRUE(FindMatchingValue("cat", "c0"));
-  EXPECT_TRUE(FindMatchingValue("cat", "c2"));
-  EXPECT_TRUE(FindMatchingValue("name", "name0"));
-  EXPECT_TRUE(FindMatchingValue("name", "name2"));
-  EXPECT_FALSE(FindMatchingValue("cat", "c1"));
-  EXPECT_FALSE(FindMatchingValue("name", "name1"));
-  EXPECT_EQ(1u, filter_hits_counter.filter_trace_event_hit_count);
-  EXPECT_EQ(1u, filter_hits_counter.end_event_hit_count);
-  Clear();
-}
-
-TEST_F(TraceEventTestFixture, EventFiltering) {
-  const char config_json[] =
-      "{"
-      "  \"included_categories\": ["
-      "    \"filtered_cat\","
-      "    \"unfiltered_cat\"],"
-      "  \"event_filters\": ["
-      "     {"
-      "       \"filter_predicate\": \"testing_predicate\", "
-      "       \"included_categories\": [\"filtered_cat\"]"
-      "     }"
-      "    "
-      "  ]"
-      "}";
-
-  TestEventFilter::HitsCounter filter_hits_counter;
-  TestEventFilter::set_filter_return_value(true);
-  TraceLog::GetInstance()->SetFilterFactoryForTesting(TestEventFilter::Factory);
-
-  TraceConfig trace_config(config_json);
-  TraceLog::GetInstance()->SetEnabled(
-      trace_config, TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE);
-  ASSERT_TRUE(TraceLog::GetInstance()->IsEnabled());
-
-  TRACE_EVENT0("filtered_cat", "a snake");
-  TRACE_EVENT0("filtered_cat", "a mushroom");
-  TRACE_EVENT0("unfiltered_cat", "a horse");
-
-  // This is scoped so we can test the end event being filtered.
-  { TRACE_EVENT0("filtered_cat", "another cat whoa"); }
-
-  EndTraceAndFlush();
-
-  EXPECT_EQ(3u, filter_hits_counter.filter_trace_event_hit_count);
-  EXPECT_EQ(1u, filter_hits_counter.end_event_hit_count);
-}
-
-TEST_F(TraceEventTestFixture, EventWhitelistFiltering) {
-  std::string config_json = StringPrintf(
-      "{"
-      "  \"included_categories\": ["
-      "    \"filtered_cat\","
-      "    \"unfiltered_cat\"],"
-      "  \"event_filters\": ["
-      "     {"
-      "       \"filter_predicate\": \"%s\", "
-      "       \"included_categories\": [\"*\"], "
-      "       \"excluded_categories\": [\"unfiltered_cat\"], "
-      "       \"filter_args\": {"
-      "           \"event_name_whitelist\": [\"a snake\", \"a dog\"]"
-      "         }"
-      "     }"
-      "    "
-      "  ]"
-      "}",
-      EventNameFilter::kName);
-
-  TraceConfig trace_config(config_json);
-  TraceLog::GetInstance()->SetEnabled(
-      trace_config, TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE);
-  EXPECT_TRUE(TraceLog::GetInstance()->IsEnabled());
-
-  TRACE_EVENT0("filtered_cat", "a snake");
-  TRACE_EVENT0("filtered_cat", "a mushroom");
-  TRACE_EVENT0("unfiltered_cat", "a cat");
-
-  EndTraceAndFlush();
-
-  EXPECT_TRUE(FindMatchingValue("name", "a snake"));
-  EXPECT_FALSE(FindMatchingValue("name", "a mushroom"));
-  EXPECT_TRUE(FindMatchingValue("name", "a cat"));
-}
-
-TEST_F(TraceEventTestFixture, HeapProfilerFiltering) {
-  std::string config_json = StringPrintf(
-      "{"
-      "  \"included_categories\": ["
-      "    \"filtered_cat\","
-      "    \"unfiltered_cat\"],"
-      "  \"excluded_categories\": [\"excluded_cat\"],"
-      "  \"event_filters\": ["
-      "     {"
-      "       \"filter_predicate\": \"%s\", "
-      "       \"included_categories\": [\"*\"]"
-      "     }"
-      "  ]"
-      "}",
-      HeapProfilerEventFilter::kName);
-
-  TraceConfig trace_config(config_json);
-  TraceLog::GetInstance()->SetEnabled(
-      trace_config, TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE);
-  EXPECT_TRUE(TraceLog::GetInstance()->IsEnabled());
-
-  TRACE_EVENT0("filtered_cat", "a snake");
-  TRACE_EVENT0("excluded_cat", "a mushroom");
-  TRACE_EVENT0("unfiltered_cat", "a cat");
-
-  EndTraceAndFlush();
-
-  // The predicate should not change behavior of the trace events.
-  EXPECT_TRUE(FindMatchingValue("name", "a snake"));
-  EXPECT_FALSE(FindMatchingValue("name", "a mushroom"));
-  EXPECT_TRUE(FindMatchingValue("name", "a cat"));
-}
-
 TEST_F(TraceEventTestFixture, ClockSyncEventsAreAlwaysAddedToTrace) {
   BeginSpecificTrace("-*");
   TRACE_EVENT_CLOCK_SYNC_RECEIVER(1);
diff --git a/base/trace_event/trace_log.cc b/base/trace_event/trace_log.cc
index 10b090a..12cebc6 100644
--- a/base/trace_event/trace_log.cc
+++ b/base/trace_event/trace_log.cc
@@ -13,43 +13,41 @@
 #include "base/bind.h"
 #include "base/command_line.h"
 #include "base/debug/leak_annotations.h"
+#include "base/lazy_instance.h"
 #include "base/location.h"
 #include "base/macros.h"
-#include "base/memory/ptr_util.h"
 #include "base/memory/ref_counted_memory.h"
 #include "base/memory/singleton.h"
-#include "base/message_loop/message_loop.h"
 #include "base/process/process_metrics.h"
 #include "base/stl_util.h"
 #include "base/strings/string_split.h"
 #include "base/strings/string_tokenizer.h"
 #include "base/strings/stringprintf.h"
 #include "base/sys_info.h"
-// post_task.h pulls in a lot of code not needed on Arc++.
-#if 0
-#include "base/task_scheduler/post_task.h"
-#endif
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
 #include "base/threading/platform_thread.h"
 #include "base/threading/thread_id_name_manager.h"
 #include "base/threading/thread_task_runner_handle.h"
+#include "base/threading/worker_pool.h"
 #include "base/time/time.h"
-#include "base/trace_event/category_registry.h"
-#include "base/trace_event/event_name_filter.h"
 #include "base/trace_event/heap_profiler.h"
 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
-#include "base/trace_event/heap_profiler_event_filter.h"
 #include "base/trace_event/memory_dump_manager.h"
 #include "base/trace_event/memory_dump_provider.h"
 #include "base/trace_event/process_memory_dump.h"
 #include "base/trace_event/trace_buffer.h"
 #include "base/trace_event/trace_event.h"
 #include "base/trace_event/trace_event_synthetic_delay.h"
+#include "base/trace_event/trace_sampling_thread.h"
 #include "build/build_config.h"
 
 #if defined(OS_WIN)
 #include "base/trace_event/trace_event_etw_export_win.h"
 #endif
 
+// The thread buckets for the sampling profiler.
+BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
+
 namespace base {
 namespace internal {
 
@@ -88,13 +86,35 @@
 const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
 const int kThreadFlushTimeoutMs = 3000;
 
-#define MAX_TRACE_EVENT_FILTERS 32
+#define MAX_CATEGORY_GROUPS 200
 
-// List of TraceEventFilter objects from the most recent tracing session.
-std::vector<std::unique_ptr<TraceEventFilter>>& GetCategoryGroupFilters() {
-  static auto* filters = new std::vector<std::unique_ptr<TraceEventFilter>>();
-  return *filters;
-}
+// Parallel arrays g_category_groups and g_category_group_enabled are separate
+// so that a pointer to a member of g_category_group_enabled can be easily
+// converted to an index into g_category_groups. This allows macros to deal
+// only with char enabled pointers from g_category_group_enabled, and we can
+// convert internally to determine the category name from the char enabled
+// pointer.
+const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
+    "toplevel",
+    "tracing already shutdown",
+    "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
+    "__metadata"};
+
+// The enabled flag is char instead of bool so that the API can be used from C.
+unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0};
+// Indexes here have to match the g_category_groups array indexes above.
+const int g_category_already_shutdown = 1;
+const int g_category_categories_exhausted = 2;
+const int g_category_metadata = 3;
+const int g_num_builtin_categories = 4;
+// Skip default categories.
+base::subtle::AtomicWord g_category_index = g_num_builtin_categories;
+
+// The name of the current thread. This is used to decide if the current
+// thread name has changed. We combine all the seen thread names into the
+// output name for the thread.
+LazyInstance<ThreadLocalPointer<const char>>::Leaky g_current_thread_name =
+    LAZY_INSTANCE_INITIALIZER;
 
 ThreadTicks ThreadNow() {
   return ThreadTicks::IsSupported() ? ThreadTicks::Now() : ThreadTicks();
@@ -118,7 +138,7 @@
       TimeTicks(),
       ThreadTicks(),
       TRACE_EVENT_PHASE_METADATA,
-      CategoryRegistry::kCategoryMetadata->state_ptr(),
+      &g_category_group_enabled[g_category_metadata],
       metadata_name,
       trace_event_internal::kGlobalScope,  // scope
       trace_event_internal::kNoId,  // id
@@ -154,24 +174,11 @@
   DCHECK(chunk_seq);
   DCHECK(chunk_index <= TraceBufferChunk::kMaxChunkIndex);
   DCHECK(event_index < TraceBufferChunk::kTraceBufferChunkSize);
-  DCHECK(chunk_index <= std::numeric_limits<uint16_t>::max());
   handle->chunk_seq = chunk_seq;
   handle->chunk_index = static_cast<uint16_t>(chunk_index);
   handle->event_index = static_cast<uint16_t>(event_index);
 }
 
-template <typename Function>
-void ForEachCategoryFilter(const unsigned char* category_group_enabled,
-                           Function filter_fn) {
-  const TraceCategory* category =
-      CategoryRegistry::GetCategoryByStatePtr(category_group_enabled);
-  uint32_t filter_bitmap = category->enabled_filters();
-  for (int index = 0; filter_bitmap != 0; filter_bitmap >>= 1, index++) {
-    if (filter_bitmap & 1 && GetCategoryGroupFilters()[index])
-      filter_fn(GetCategoryGroupFilters()[index].get());
-  }
-}
-
 }  // namespace
 
 // A helper class that allows the lock to be acquired in the middle of the scope
@@ -345,20 +352,33 @@
 }
 
 TraceLog::TraceLog()
-    : enabled_modes_(0),
+    : mode_(DISABLED),
       num_traces_recorded_(0),
+      event_callback_(0),
       dispatching_to_observer_list_(false),
       process_sort_index_(0),
       process_id_hash_(0),
       process_id_(0),
+      watch_category_(0),
       trace_options_(kInternalRecordUntilFull),
+      sampling_thread_handle_(0),
       trace_config_(TraceConfig()),
+      event_callback_trace_config_(TraceConfig()),
       thread_shared_chunk_index_(0),
       generation_(0),
-      use_worker_thread_(false),
-      filter_factory_for_testing_(nullptr) {
-  CategoryRegistry::Initialize();
-
+      use_worker_thread_(false) {
+  // Trace is enabled or disabled on one thread while other threads are
+  // accessing the enabled flag. We don't care whether edge-case events are
+  // traced or not, so we allow races on the enabled flag to keep the trace
+  // macros fast.
+  // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
+  // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
+  //                            sizeof(g_category_group_enabled),
+  //                           "trace_event category enabled");
+  for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) {
+    ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i],
+                         "trace_event category enabled");
+  }
 #if defined(OS_NACL)  // NaCl shouldn't expose the process id.
   SetProcessID(0);
 #else
@@ -394,9 +414,7 @@
   }
 }
 
-bool TraceLog::OnMemoryDump(const MemoryDumpArgs& args,
-                            ProcessMemoryDump* pmd) {
-  ALLOW_UNUSED_PARAM(args);
+bool TraceLog::OnMemoryDump(const MemoryDumpArgs&, ProcessMemoryDump* pmd) {
   // TODO(ssid): Use MemoryDumpArgs to create light dumps when requested
   // (crbug.com/499731).
   TraceEventMemoryOverhead overhead;
@@ -418,111 +436,61 @@
     const char* category_group) {
   TraceLog* tracelog = GetInstance();
   if (!tracelog) {
-    DCHECK(!CategoryRegistry::kCategoryAlreadyShutdown->is_enabled());
-    return CategoryRegistry::kCategoryAlreadyShutdown->state_ptr();
+    DCHECK(!g_category_group_enabled[g_category_already_shutdown]);
+    return &g_category_group_enabled[g_category_already_shutdown];
   }
-  TraceCategory* category = CategoryRegistry::GetCategoryByName(category_group);
-  if (!category) {
-    // Slow path: in the case of a new category we have to repeat the check
-    // holding the lock, as multiple threads might have reached this point
-    // at the same time.
-    auto category_initializer = [](TraceCategory* category) {
-      TraceLog::GetInstance()->UpdateCategoryState(category);
-    };
-    AutoLock lock(tracelog->lock_);
-    CategoryRegistry::GetOrCreateCategoryLocked(
-        category_group, category_initializer, &category);
-  }
-  DCHECK(category->state_ptr());
-  return category->state_ptr();
+  return tracelog->GetCategoryGroupEnabledInternal(category_group);
 }
 
 const char* TraceLog::GetCategoryGroupName(
     const unsigned char* category_group_enabled) {
-  return CategoryRegistry::GetCategoryByStatePtr(category_group_enabled)
-      ->name();
+  // Calculate the index of the category group by finding
+  // category_group_enabled in g_category_group_enabled array.
+  uintptr_t category_begin =
+      reinterpret_cast<uintptr_t>(g_category_group_enabled);
+  uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled);
+  DCHECK(category_ptr >= category_begin &&
+         category_ptr < reinterpret_cast<uintptr_t>(g_category_group_enabled +
+                                                    MAX_CATEGORY_GROUPS))
+      << "out of bounds category pointer";
+  uintptr_t category_index =
+      (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
+  return g_category_groups[category_index];
 }
 
-void TraceLog::UpdateCategoryState(TraceCategory* category) {
-  lock_.AssertAcquired();
-  DCHECK(category->is_valid());
-  unsigned char state_flags = 0;
-  if (enabled_modes_ & RECORDING_MODE &&
-      trace_config_.IsCategoryGroupEnabled(category->name())) {
-    state_flags |= TraceCategory::ENABLED_FOR_RECORDING;
+void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) {
+  unsigned char enabled_flag = 0;
+  const char* category_group = g_category_groups[category_index];
+  if (mode_ == RECORDING_MODE &&
+      trace_config_.IsCategoryGroupEnabled(category_group)) {
+    enabled_flag |= ENABLED_FOR_RECORDING;
   }
 
-  // TODO(primiano): this is a temporary workaround for catapult:#2341,
-  // to guarantee that metadata events are always added even if the category
-  // filter is "-*". See crbug.com/618054 for more details and long-term fix.
-  if (enabled_modes_ & RECORDING_MODE &&
-      category == CategoryRegistry::kCategoryMetadata) {
-    state_flags |= TraceCategory::ENABLED_FOR_RECORDING;
+  if (event_callback_ &&
+      event_callback_trace_config_.IsCategoryGroupEnabled(category_group)) {
+    enabled_flag |= ENABLED_FOR_EVENT_CALLBACK;
   }
 
 #if defined(OS_WIN)
   if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
-          category->name())) {
-    state_flags |= TraceCategory::ENABLED_FOR_ETW_EXPORT;
+          category_group)) {
+    enabled_flag |= ENABLED_FOR_ETW_EXPORT;
   }
 #endif
 
-  uint32_t enabled_filters_bitmap = 0;
-  int index = 0;
-  for (const auto& event_filter : enabled_event_filters_) {
-    if (event_filter.IsCategoryGroupEnabled(category->name())) {
-      state_flags |= TraceCategory::ENABLED_FOR_FILTERING;
-      DCHECK(GetCategoryGroupFilters()[index]);
-      enabled_filters_bitmap |= 1 << index;
-    }
-    if (index++ >= MAX_TRACE_EVENT_FILTERS) {
-      NOTREACHED();
-      break;
-    }
-  }
-  category->set_enabled_filters(enabled_filters_bitmap);
-  category->set_state(state_flags);
+  // TODO(primiano): this is a temporary workaround for catapult:#2341,
+  // to guarantee that metadata events are always added even if the category
+  // filter is "-*". See crbug.com/618054 for more details and long-term fix.
+  if (mode_ == RECORDING_MODE && !strcmp(category_group, "__metadata"))
+    enabled_flag |= ENABLED_FOR_RECORDING;
+
+  g_category_group_enabled[category_index] = enabled_flag;
 }
 
-void TraceLog::UpdateCategoryRegistry() {
-  lock_.AssertAcquired();
-  CreateFiltersForTraceConfig();
-  for (TraceCategory& category : CategoryRegistry::GetAllCategories()) {
-    UpdateCategoryState(&category);
-  }
-}
-
-void TraceLog::CreateFiltersForTraceConfig() {
-  if (!(enabled_modes_ & FILTERING_MODE))
-    return;
-
-  // Filters were already added and tracing could be enabled. Filters list
-  // cannot be changed when trace events are using them.
-  if (GetCategoryGroupFilters().size())
-    return;
-
-  for (auto& filter_config : enabled_event_filters_) {
-    if (GetCategoryGroupFilters().size() >= MAX_TRACE_EVENT_FILTERS) {
-      NOTREACHED()
-          << "Too many trace event filters installed in the current session";
-      break;
-    }
-
-    std::unique_ptr<TraceEventFilter> new_filter;
-    const std::string& predicate_name = filter_config.predicate_name();
-    if (predicate_name == EventNameFilter::kName) {
-      auto whitelist = MakeUnique<std::unordered_set<std::string>>();
-      CHECK(filter_config.GetArgAsSet("event_name_whitelist", &*whitelist));
-      new_filter = MakeUnique<EventNameFilter>(std::move(whitelist));
-    } else if (predicate_name == HeapProfilerEventFilter::kName) {
-      new_filter = MakeUnique<HeapProfilerEventFilter>();
-    } else {
-      if (filter_factory_for_testing_)
-        new_filter = filter_factory_for_testing_(predicate_name);
-      CHECK(new_filter) << "Unknown trace filter " << predicate_name;
-    }
-    GetCategoryGroupFilters().push_back(std::move(new_filter));
-  }
+void TraceLog::UpdateCategoryGroupEnabledFlags() {
+  size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
+  for (size_t i = 0; i < category_index; i++)
+    UpdateCategoryGroupEnabledFlag(i);
 }
 
 void TraceLog::UpdateSyntheticDelaysFromTraceConfig() {
@@ -554,16 +522,67 @@
   }
 }
 
-void TraceLog::GetKnownCategoryGroups(
-    std::vector<std::string>* category_groups) {
-  for (const auto& category : CategoryRegistry::GetAllCategories()) {
-    if (!CategoryRegistry::IsBuiltinCategory(&category))
-      category_groups->push_back(category.name());
+const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
+    const char* category_group) {
+  DCHECK(!strchr(category_group, '"'))
+      << "Category groups may not contain double quote";
+  // The g_category_groups is append only, avoid using a lock for the fast path.
+  size_t current_category_index = base::subtle::Acquire_Load(&g_category_index);
+
+  // Search for pre-existing category group.
+  for (size_t i = 0; i < current_category_index; ++i) {
+    if (strcmp(g_category_groups[i], category_group) == 0) {
+      return &g_category_group_enabled[i];
+    }
   }
+
+  unsigned char* category_group_enabled = NULL;
+  // This is the slow path: the lock is not held in the case above, so more
+  // than one thread could have reached here trying to add the same category.
+  // Only hold to lock when actually appending a new category, and
+  // check the categories groups again.
+  AutoLock lock(lock_);
+  size_t category_index = base::subtle::Acquire_Load(&g_category_index);
+  for (size_t i = 0; i < category_index; ++i) {
+    if (strcmp(g_category_groups[i], category_group) == 0) {
+      return &g_category_group_enabled[i];
+    }
+  }
+
+  // Create a new category group.
+  DCHECK(category_index < MAX_CATEGORY_GROUPS)
+      << "must increase MAX_CATEGORY_GROUPS";
+  if (category_index < MAX_CATEGORY_GROUPS) {
+    // Don't hold on to the category_group pointer, so that we can create
+    // category groups with strings not known at compile time (this is
+    // required by SetWatchEvent).
+    const char* new_group = strdup(category_group);
+    ANNOTATE_LEAKING_OBJECT_PTR(new_group);
+    g_category_groups[category_index] = new_group;
+    DCHECK(!g_category_group_enabled[category_index]);
+    // Note that if both included and excluded patterns in the
+    // TraceConfig are empty, we exclude nothing,
+    // thereby enabling this category group.
+    UpdateCategoryGroupEnabledFlag(category_index);
+    category_group_enabled = &g_category_group_enabled[category_index];
+    // Update the max index now.
+    base::subtle::Release_Store(&g_category_index, category_index + 1);
+  } else {
+    category_group_enabled =
+        &g_category_group_enabled[g_category_categories_exhausted];
+  }
+  return category_group_enabled;
 }
 
-void TraceLog::SetEnabled(const TraceConfig& trace_config,
-                          uint8_t modes_to_enable) {
+void TraceLog::GetKnownCategoryGroups(
+    std::vector<std::string>* category_groups) {
+  AutoLock lock(lock_);
+  size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
+  for (size_t i = g_num_builtin_categories; i < category_index; i++)
+    category_groups->push_back(g_category_groups[i]);
+}
+
+void TraceLog::SetEnabled(const TraceConfig& trace_config, Mode mode) {
   std::vector<EnabledStateObserver*> observer_list;
   std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver> observer_map;
   {
@@ -577,58 +596,28 @@
 
     InternalTraceOptions old_options = trace_options();
 
+    if (IsEnabled()) {
+      if (new_options != old_options) {
+        DLOG(ERROR) << "Attempting to re-enable tracing with a different "
+                    << "set of options.";
+      }
+
+      if (mode != mode_) {
+        DLOG(ERROR) << "Attempting to re-enable tracing with a different mode.";
+      }
+
+      trace_config_.Merge(trace_config);
+      UpdateCategoryGroupEnabledFlags();
+      return;
+    }
+
     if (dispatching_to_observer_list_) {
-      // TODO(ssid): Change to NOTREACHED after fixing crbug.com/625170.
       DLOG(ERROR)
           << "Cannot manipulate TraceLog::Enabled state from an observer.";
       return;
     }
 
-    // Clear all filters from previous tracing session. These filters are not
-    // cleared at the end of tracing because some threads which hit trace event
-    // when disabling, could try to use the filters.
-    if (!enabled_modes_)
-      GetCategoryGroupFilters().clear();
-
-    // Update trace config for recording.
-    const bool already_recording = enabled_modes_ & RECORDING_MODE;
-    if (modes_to_enable & RECORDING_MODE) {
-      if (already_recording) {
-        // TODO(ssid): Stop suporting enabling of RECODING_MODE when already
-        // enabled crbug.com/625170.
-        DCHECK_EQ(new_options, old_options) << "Attempting to re-enable "
-                                               "tracing with a different set "
-                                               "of options.";
-        trace_config_.Merge(trace_config);
-      } else {
-        trace_config_ = trace_config;
-      }
-    }
-
-    // Update event filters.
-    if (modes_to_enable & FILTERING_MODE) {
-      DCHECK(!trace_config.event_filters().empty())
-          << "Attempting to enable filtering without any filters";
-      DCHECK(enabled_event_filters_.empty()) << "Attempting to re-enable "
-                                                "filtering when filters are "
-                                                "already enabled.";
-
-      // Use the given event filters only if filtering was not enabled.
-      if (enabled_event_filters_.empty())
-        enabled_event_filters_ = trace_config.event_filters();
-    }
-    // Keep the |trace_config_| updated with only enabled filters in case anyone
-    // tries to read it using |GetCurrentTraceConfig| (even if filters are
-    // empty).
-    trace_config_.SetEventFilters(enabled_event_filters_);
-
-    enabled_modes_ |= modes_to_enable;
-    UpdateCategoryRegistry();
-
-    // Do not notify observers or create trace buffer if only enabled for
-    // filtering or if recording was already enabled.
-    if (!(modes_to_enable & RECORDING_MODE) || already_recording)
-      return;
+    mode_ = mode;
 
     if (new_options != old_options) {
       subtle::NoBarrier_Store(&trace_options_, new_options);
@@ -637,16 +626,34 @@
 
     num_traces_recorded_++;
 
-    UpdateCategoryRegistry();
+    trace_config_ = TraceConfig(trace_config);
+    UpdateCategoryGroupEnabledFlags();
     UpdateSyntheticDelaysFromTraceConfig();
 
+    if (new_options & kInternalEnableSampling) {
+      sampling_thread_.reset(new TraceSamplingThread);
+      sampling_thread_->RegisterSampleBucket(
+          &g_trace_state[0], "bucket0",
+          Bind(&TraceSamplingThread::DefaultSamplingCallback));
+      sampling_thread_->RegisterSampleBucket(
+          &g_trace_state[1], "bucket1",
+          Bind(&TraceSamplingThread::DefaultSamplingCallback));
+      sampling_thread_->RegisterSampleBucket(
+          &g_trace_state[2], "bucket2",
+          Bind(&TraceSamplingThread::DefaultSamplingCallback));
+      if (!PlatformThread::Create(0, sampling_thread_.get(),
+                                  &sampling_thread_handle_)) {
+        DCHECK(false) << "failed to create thread";
+      }
+    }
+
     dispatching_to_observer_list_ = true;
     observer_list = enabled_state_observer_list_;
     observer_map = async_observers_;
   }
   // Notify observers outside the lock in case they trigger trace events.
-  for (EnabledStateObserver* observer : observer_list)
-    observer->OnTraceLogEnabled();
+  for (size_t i = 0; i < observer_list.size(); ++i)
+    observer_list[i]->OnTraceLogEnabled();
   for (const auto& it : observer_map) {
     it.second.task_runner->PostTask(
         FROM_HERE, Bind(&AsyncEnabledStateObserver::OnTraceLogEnabled,
@@ -669,9 +676,10 @@
 
 TraceLog::InternalTraceOptions TraceLog::GetInternalOptionsFromTraceConfig(
     const TraceConfig& config) {
-  InternalTraceOptions ret = config.IsArgumentFilterEnabled()
-                                 ? kInternalEnableArgumentFilter
-                                 : kInternalNone;
+  InternalTraceOptions ret =
+      config.IsSamplingEnabled() ? kInternalEnableSampling : kInternalNone;
+  if (config.IsArgumentFilterEnabled())
+    ret |= kInternalEnableArgumentFilter;
   switch (config.GetTraceRecordMode()) {
     case RECORD_UNTIL_FULL:
       return ret | kInternalRecordUntilFull;
@@ -693,44 +701,37 @@
 
 void TraceLog::SetDisabled() {
   AutoLock lock(lock_);
-  SetDisabledWhileLocked(RECORDING_MODE);
+  SetDisabledWhileLocked();
 }
 
-void TraceLog::SetDisabled(uint8_t modes_to_disable) {
-  AutoLock lock(lock_);
-  SetDisabledWhileLocked(modes_to_disable);
-}
-
-void TraceLog::SetDisabledWhileLocked(uint8_t modes_to_disable) {
+void TraceLog::SetDisabledWhileLocked() {
   lock_.AssertAcquired();
 
-  if (!(enabled_modes_ & modes_to_disable))
+  if (!IsEnabled())
     return;
 
   if (dispatching_to_observer_list_) {
-    // TODO(ssid): Change to NOTREACHED after fixing crbug.com/625170.
     DLOG(ERROR)
         << "Cannot manipulate TraceLog::Enabled state from an observer.";
     return;
   }
 
-  bool is_recording_mode_disabled =
-      (enabled_modes_ & RECORDING_MODE) && (modes_to_disable & RECORDING_MODE);
-  enabled_modes_ &= ~modes_to_disable;
+  mode_ = DISABLED;
 
-  if (modes_to_disable & FILTERING_MODE)
-    enabled_event_filters_.clear();
+  if (sampling_thread_.get()) {
+    // Stop the sampling thread.
+    sampling_thread_->Stop();
+    lock_.Release();
+    PlatformThread::Join(sampling_thread_handle_);
+    lock_.Acquire();
+    sampling_thread_handle_ = PlatformThreadHandle();
+    sampling_thread_.reset();
+  }
 
-  if (modes_to_disable & RECORDING_MODE)
-    trace_config_.Clear();
-
-  UpdateCategoryRegistry();
-
-  // Add metadata events and notify observers only if recording mode was
-  // disabled now.
-  if (!is_recording_mode_disabled)
-    return;
-
+  trace_config_.Clear();
+  subtle::NoBarrier_Store(&watch_category_, 0);
+  watch_event_name_ = "";
+  UpdateCategoryGroupEnabledFlags();
   AddMetadataEventsWhileLocked();
 
   // Remove metadata events so they will not get added to a subsequent trace.
@@ -746,8 +747,8 @@
     // Dispatch to observers outside the lock in case the observer triggers a
     // trace event.
     AutoUnlock unlock(lock_);
-    for (EnabledStateObserver* observer : observer_list)
-      observer->OnTraceLogDisabled();
+    for (size_t i = 0; i < observer_list.size(); ++i)
+      observer_list[i]->OnTraceLogDisabled();
     for (const auto& it : observer_map) {
       it.second.task_runner->PostTask(
           FROM_HERE, Bind(&AsyncEnabledStateObserver::OnTraceLogDisabled,
@@ -830,10 +831,25 @@
     if (buffer_limit_reached_timestamp_.is_null()) {
       buffer_limit_reached_timestamp_ = OffsetNow();
     }
-    SetDisabledWhileLocked(RECORDING_MODE);
+    SetDisabledWhileLocked();
   }
 }
 
+void TraceLog::SetEventCallbackEnabled(const TraceConfig& trace_config,
+                                       EventCallback cb) {
+  AutoLock lock(lock_);
+  subtle::NoBarrier_Store(&event_callback_,
+                          reinterpret_cast<subtle::AtomicWord>(cb));
+  event_callback_trace_config_ = trace_config;
+  UpdateCategoryGroupEnabledFlags();
+}
+
+void TraceLog::SetEventCallbackDisabled() {
+  AutoLock lock(lock_);
+  subtle::NoBarrier_Store(&event_callback_, 0);
+  UpdateCategoryGroupEnabledFlags();
+}
+
 // Flush() works as the following:
 // 1. Flush() is called in thread A whose task runner is saved in
 //    flush_task_runner_;
@@ -870,7 +886,7 @@
     return;
   }
 
-  int gen = generation();
+  int generation = this->generation();
   // Copy of thread_message_loops_ to be used without locking.
   std::vector<scoped_refptr<SingleThreadTaskRunner>>
       thread_message_loop_task_runners;
@@ -888,24 +904,29 @@
                                   std::move(thread_shared_chunk_));
     }
 
-    for (MessageLoop* loop : thread_message_loops_)
-      thread_message_loop_task_runners.push_back(loop->task_runner());
+    if (thread_message_loops_.size()) {
+      for (hash_set<MessageLoop*>::const_iterator it =
+               thread_message_loops_.begin();
+           it != thread_message_loops_.end(); ++it) {
+        thread_message_loop_task_runners.push_back((*it)->task_runner());
+      }
+    }
   }
 
-  if (!thread_message_loop_task_runners.empty()) {
-    for (auto& task_runner : thread_message_loop_task_runners) {
-      task_runner->PostTask(
+  if (thread_message_loop_task_runners.size()) {
+    for (size_t i = 0; i < thread_message_loop_task_runners.size(); ++i) {
+      thread_message_loop_task_runners[i]->PostTask(
           FROM_HERE, Bind(&TraceLog::FlushCurrentThread, Unretained(this),
-                          gen, discard_events));
+                          generation, discard_events));
     }
     flush_task_runner_->PostDelayedTask(
-        FROM_HERE, Bind(&TraceLog::OnFlushTimeout, Unretained(this), gen,
+        FROM_HERE, Bind(&TraceLog::OnFlushTimeout, Unretained(this), generation,
                         discard_events),
         TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs));
     return;
   }
 
-  FinishFlush(gen, discard_events);
+  FinishFlush(generation, discard_events);
 }
 
 // Usually it runs on a different thread.
@@ -969,21 +990,13 @@
     return;
   }
 
-  if (use_worker_thread_) {
-#if 0
-    base::PostTaskWithTraits(
-        FROM_HERE, base::TaskTraits()
-                       .MayBlock()
-                       .WithPriority(base::TaskPriority::BACKGROUND)
-                       .WithShutdownBehavior(
-                           base::TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN),
-        Bind(&TraceLog::ConvertTraceEventsToTraceFormat,
-             Passed(&previous_logged_events), flush_output_callback,
-             argument_filter_predicate));
+  if (use_worker_thread_ &&
+      WorkerPool::PostTask(
+          FROM_HERE, Bind(&TraceLog::ConvertTraceEventsToTraceFormat,
+                          Passed(&previous_logged_events),
+                          flush_output_callback, argument_filter_predicate),
+          true)) {
     return;
-#else
-    NOTREACHED();
-#endif
   }
 
   ConvertTraceEventsToTraceFormat(std::move(previous_logged_events),
@@ -1006,7 +1019,7 @@
 
   AutoLock lock(lock_);
   if (!CheckGeneration(generation) || !flush_task_runner_ ||
-      !thread_message_loops_.empty())
+      thread_message_loops_.size())
     return;
 
   flush_task_runner_->PostTask(
@@ -1210,13 +1223,10 @@
   TimeTicks offset_event_timestamp = OffsetTimestamp(timestamp);
   ThreadTicks thread_now = ThreadNow();
 
-  ThreadLocalEventBuffer* thread_local_event_buffer = nullptr;
-  if (*category_group_enabled & RECORDING_MODE) {
-    // |thread_local_event_buffer_| can be null if the current thread doesn't
-    // have a message loop or the message loop is blocked.
-    InitializeThreadLocalEventBufferIfSupported();
-    thread_local_event_buffer = thread_local_event_buffer_.Get();
-  }
+  // |thread_local_event_buffer_| can be null if the current thread doesn't have
+  // a message loop or the message loop is blocked.
+  InitializeThreadLocalEventBufferIfSupported();
+  auto* thread_local_event_buffer = thread_local_event_buffer_.Get();
 
   // Check and update the current thread name only if the event is for the
   // current thread to avoid locks in most cases.
@@ -1227,9 +1237,9 @@
     // call (if any), but don't bother if the new name is empty. Note this will
     // not detect a thread name change within the same char* buffer address: we
     // favor common case performance over corner case correctness.
-    static auto* current_thread_name = new ThreadLocalPointer<const char>();
-    if (new_name != current_thread_name->Get() && new_name && *new_name) {
-      current_thread_name->Set(new_name);
+    if (new_name != g_current_thread_name.Get().Get() && new_name &&
+        *new_name) {
+      g_current_thread_name.Get().Set(new_name);
 
       AutoLock thread_info_lock(thread_info_lock_);
 
@@ -1247,7 +1257,7 @@
         bool found = std::find(existing_names.begin(), existing_names.end(),
                                new_name) != existing_names.end();
         if (!found) {
-          if (!existing_names.empty())
+          if (existing_names.size())
             existing_name->second.push_back(',');
           existing_name->second.append(new_name);
         }
@@ -1258,37 +1268,14 @@
 #if defined(OS_WIN)
   // This is done sooner rather than later, to avoid creating the event and
   // acquiring the lock, which is not needed for ETW as it's already threadsafe.
-  if (*category_group_enabled & TraceCategory::ENABLED_FOR_ETW_EXPORT)
+  if (*category_group_enabled & ENABLED_FOR_ETW_EXPORT)
     TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id,
                                   num_args, arg_names, arg_types, arg_values,
                                   convertable_values);
 #endif  // OS_WIN
 
   std::string console_message;
-  std::unique_ptr<TraceEvent> filtered_trace_event;
-  bool disabled_by_filters = false;
-  if (*category_group_enabled & TraceCategory::ENABLED_FOR_FILTERING) {
-    std::unique_ptr<TraceEvent> new_trace_event(new TraceEvent);
-    new_trace_event->Initialize(thread_id, offset_event_timestamp, thread_now,
-                                phase, category_group_enabled, name, scope, id,
-                                bind_id, num_args, arg_names, arg_types,
-                                arg_values, convertable_values, flags);
-
-    disabled_by_filters = true;
-    ForEachCategoryFilter(
-        category_group_enabled, [&new_trace_event, &disabled_by_filters](
-                                    TraceEventFilter* trace_event_filter) {
-          if (trace_event_filter->FilterTraceEvent(*new_trace_event))
-            disabled_by_filters = false;
-        });
-    if (!disabled_by_filters)
-      filtered_trace_event = std::move(new_trace_event);
-  }
-
-  // If enabled for recording, the event should be added only if one of the
-  // filters indicates or category is not enabled for filtering.
-  if ((*category_group_enabled & TraceCategory::ENABLED_FOR_RECORDING) &&
-      !disabled_by_filters) {
+  if (*category_group_enabled & ENABLED_FOR_RECORDING) {
     OptionalAutoLock lock(&lock_);
 
     TraceEvent* trace_event = NULL;
@@ -1300,14 +1287,21 @@
     }
 
     if (trace_event) {
-      if (filtered_trace_event) {
-        trace_event->MoveFrom(std::move(filtered_trace_event));
-      } else {
-        trace_event->Initialize(thread_id, offset_event_timestamp, thread_now,
-                                phase, category_group_enabled, name, scope, id,
-                                bind_id, num_args, arg_names, arg_types,
-                                arg_values, convertable_values, flags);
-      }
+      trace_event->Initialize(thread_id,
+                              offset_event_timestamp,
+                              thread_now,
+                              phase,
+                              category_group_enabled,
+                              name,
+                              scope,
+                              id,
+                              bind_id,
+                              num_args,
+                              arg_names,
+                              arg_types,
+                              arg_values,
+                              convertable_values,
+                              flags);
 
 #if defined(OS_ANDROID)
       trace_event->SendToATrace();
@@ -1321,9 +1315,53 @@
     }
   }
 
-  if (!console_message.empty())
+  if (console_message.size())
     LOG(ERROR) << console_message;
 
+  if (reinterpret_cast<const unsigned char*>(
+          subtle::NoBarrier_Load(&watch_category_)) == category_group_enabled) {
+    bool event_name_matches;
+    WatchEventCallback watch_event_callback_copy;
+    {
+      AutoLock lock(lock_);
+      event_name_matches = watch_event_name_ == name;
+      watch_event_callback_copy = watch_event_callback_;
+    }
+    if (event_name_matches) {
+      if (!watch_event_callback_copy.is_null())
+        watch_event_callback_copy.Run();
+    }
+  }
+
+  if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
+    EventCallback event_callback = reinterpret_cast<EventCallback>(
+        subtle::NoBarrier_Load(&event_callback_));
+    if (event_callback) {
+      event_callback(
+          offset_event_timestamp,
+          phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
+          category_group_enabled, name, scope, id, num_args, arg_names,
+          arg_types, arg_values, flags);
+    }
+  }
+
+  // TODO(primiano): Add support for events with copied name crbug.com/581078
+  if (!(flags & TRACE_EVENT_FLAG_COPY)) {
+    if (AllocationContextTracker::capture_mode() ==
+        AllocationContextTracker::CaptureMode::PSEUDO_STACK) {
+      if (phase == TRACE_EVENT_PHASE_BEGIN ||
+          phase == TRACE_EVENT_PHASE_COMPLETE) {
+        AllocationContextTracker::GetInstanceForCurrentThread()
+            ->PushPseudoStackFrame(name);
+      } else if (phase == TRACE_EVENT_PHASE_END) {
+        // The pop for |TRACE_EVENT_PHASE_COMPLETE| events
+        // is in |TraceLog::UpdateTraceEventDuration|.
+        AllocationContextTracker::GetInstanceForCurrentThread()
+            ->PopPseudoStackFrame(name);
+      }
+    }
+  }
+
   return handle;
 }
 
@@ -1381,9 +1419,9 @@
                             thread_colors_[thread_name]);
 
   size_t depth = 0;
-  auto it = thread_event_start_times_.find(thread_id);
-  if (it != thread_event_start_times_.end())
-    depth = it->second.size();
+  if (thread_event_start_times_.find(thread_id) !=
+      thread_event_start_times_.end())
+    depth = thread_event_start_times_[thread_id].size();
 
   for (size_t i = 0; i < depth; ++i)
     log << "| ";
@@ -1401,18 +1439,6 @@
   return log.str();
 }
 
-void TraceLog::EndFilteredEvent(const unsigned char* category_group_enabled,
-                                const char* name,
-                                TraceEventHandle handle) {
-  ALLOW_UNUSED_PARAM(handle);
-  const char* category_name = GetCategoryGroupName(category_group_enabled);
-  ForEachCategoryFilter(
-      category_group_enabled,
-      [name, category_name](TraceEventFilter* trace_event_filter) {
-        trace_event_filter->EndEvent(category_name, name);
-      });
-}
-
 void TraceLog::UpdateTraceEventDuration(
     const unsigned char* category_group_enabled,
     const char* name,
@@ -1434,29 +1460,17 @@
 
 #if defined(OS_WIN)
   // Generate an ETW event that marks the end of a complete event.
-  if (category_group_enabled_local & TraceCategory::ENABLED_FOR_ETW_EXPORT)
+  if (category_group_enabled_local & ENABLED_FOR_ETW_EXPORT)
     TraceEventETWExport::AddCompleteEndEvent(name);
 #endif  // OS_WIN
 
   std::string console_message;
-  if (category_group_enabled_local & TraceCategory::ENABLED_FOR_RECORDING) {
+  if (category_group_enabled_local & ENABLED_FOR_RECORDING) {
     OptionalAutoLock lock(&lock_);
 
     TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock);
     if (trace_event) {
       DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
-      // TEMP(oysteine) to debug crbug.com/638744
-      if (trace_event->duration().ToInternalValue() != -1) {
-        DVLOG(1) << "TraceHandle: chunk_seq " << handle.chunk_seq
-                 << ", chunk_index " << handle.chunk_index << ", event_index "
-                 << handle.event_index;
-
-        std::string serialized_event;
-        trace_event->AppendAsJSON(&serialized_event, ArgumentFilterPredicate());
-        DVLOG(1) << "TraceEvent: " << serialized_event;
-        lock_.AssertAcquired();
-      }
-
       trace_event->UpdateDuration(now, thread_now);
 #if defined(OS_ANDROID)
       trace_event->SendToATrace();
@@ -1467,13 +1481,47 @@
       console_message =
           EventToConsoleMessage(TRACE_EVENT_PHASE_END, now, trace_event);
     }
+
+    if (AllocationContextTracker::capture_mode() ==
+        AllocationContextTracker::CaptureMode::PSEUDO_STACK) {
+      // The corresponding push is in |AddTraceEventWithThreadIdAndTimestamp|.
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->PopPseudoStackFrame(name);
+    }
   }
 
-  if (!console_message.empty())
+  if (console_message.size())
     LOG(ERROR) << console_message;
 
-  if (category_group_enabled_local & TraceCategory::ENABLED_FOR_FILTERING)
-    EndFilteredEvent(category_group_enabled, name, handle);
+  if (category_group_enabled_local & ENABLED_FOR_EVENT_CALLBACK) {
+    EventCallback event_callback = reinterpret_cast<EventCallback>(
+        subtle::NoBarrier_Load(&event_callback_));
+    if (event_callback) {
+      event_callback(
+        now, TRACE_EVENT_PHASE_END, category_group_enabled, name,
+        trace_event_internal::kGlobalScope, trace_event_internal::kNoId, 0,
+        nullptr, nullptr, nullptr, TRACE_EVENT_FLAG_NONE);
+    }
+  }
+}
+
+void TraceLog::SetWatchEvent(const std::string& category_name,
+                             const std::string& event_name,
+                             const WatchEventCallback& callback) {
+  const unsigned char* category =
+      GetCategoryGroupEnabled(category_name.c_str());
+  AutoLock lock(lock_);
+  subtle::NoBarrier_Store(&watch_category_,
+                          reinterpret_cast<subtle::AtomicWord>(category));
+  watch_event_name_ = event_name;
+  watch_event_callback_ = callback;
+}
+
+void TraceLog::CancelWatchEvent() {
+  AutoLock lock(lock_);
+  subtle::NoBarrier_Store(&watch_category_, 0);
+  watch_event_name_ = "";
+  watch_event_callback_.Reset();
 }
 
 uint64_t TraceLog::MangleEventId(uint64_t id) {
@@ -1503,37 +1551,42 @@
                             "sort_index", process_sort_index_);
   }
 
-  if (!process_name_.empty()) {
+  if (process_name_.size()) {
     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
                             current_thread_id, "process_name", "name",
                             process_name_);
   }
 
-  if (!process_labels_.empty()) {
+  if (process_labels_.size() > 0) {
     std::vector<std::string> labels;
-    for (const auto& it : process_labels_)
-      labels.push_back(it.second);
+    for (base::hash_map<int, std::string>::iterator it =
+             process_labels_.begin();
+         it != process_labels_.end(); it++) {
+      labels.push_back(it->second);
+    }
     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
                             current_thread_id, "process_labels", "labels",
                             base::JoinString(labels, ","));
   }
 
   // Thread sort indices.
-  for (const auto& it : thread_sort_indices_) {
-    if (it.second == 0)
+  for (hash_map<int, int>::iterator it = thread_sort_indices_.begin();
+       it != thread_sort_indices_.end(); it++) {
+    if (it->second == 0)
       continue;
     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
-                            it.first, "thread_sort_index", "sort_index",
-                            it.second);
+                            it->first, "thread_sort_index", "sort_index",
+                            it->second);
   }
 
   // Thread names.
   AutoLock thread_info_lock(thread_info_lock_);
-  for (const auto& it : thread_names_) {
-    if (it.second.empty())
+  for (hash_map<int, std::string>::iterator it = thread_names_.begin();
+       it != thread_names_.end(); it++) {
+    if (it->second.empty())
       continue;
     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
-                            it.first, "thread_name", "name", it.second);
+                            it->first, "thread_name", "name", it->second);
   }
 
   // If buffer is full, add a metadata record to report this.
@@ -1545,9 +1598,14 @@
   }
 }
 
+void TraceLog::WaitSamplingEventForTesting() {
+  if (!sampling_thread_)
+    return;
+  sampling_thread_->WaitSamplingEventForTesting();
+}
+
 void TraceLog::DeleteForTesting() {
   internal::DeleteTraceLogForTesting::Delete();
-  CategoryRegistry::ResetForTesting();
 }
 
 TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
@@ -1559,10 +1617,6 @@
   if (!handle.chunk_seq)
     return NULL;
 
-  DCHECK(handle.chunk_seq);
-  DCHECK(handle.chunk_index <= TraceBufferChunk::kMaxChunkIndex);
-  DCHECK(handle.event_index < TraceBufferChunk::kTraceBufferChunkSize);
-
   if (thread_local_event_buffer_.Get()) {
     TraceEvent* trace_event =
         thread_local_event_buffer_.Get()->GetEventByHandle(handle);
@@ -1589,10 +1643,10 @@
   process_id_ = process_id;
   // Create a FNV hash from the process ID for XORing.
   // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
-  const unsigned long long kOffsetBasis = 14695981039346656037ull;
-  const unsigned long long kFnvPrime = 1099511628211ull;
-  const unsigned long long pid = static_cast<unsigned long long>(process_id_);
-  process_id_hash_ = (kOffsetBasis ^ pid) * kFnvPrime;
+  unsigned long long offset_basis = 14695981039346656037ull;
+  unsigned long long fnv_prime = 1099511628211ull;
+  unsigned long long pid = static_cast<unsigned long long>(process_id_);
+  process_id_hash_ = (offset_basis ^ pid) * fnv_prime;
 }
 
 void TraceLog::SetProcessSortIndex(int sort_index) {
@@ -1600,7 +1654,7 @@
   process_sort_index_ = sort_index;
 }
 
-void TraceLog::SetProcessName(const char* process_name) {
+void TraceLog::SetProcessName(const std::string& process_name) {
   AutoLock lock(lock_);
   process_name_ = process_name;
 }
@@ -1616,7 +1670,12 @@
 
 void TraceLog::RemoveProcessLabel(int label_id) {
   AutoLock lock(lock_);
-  process_labels_.erase(label_id);
+  base::hash_map<int, std::string>::iterator it =
+      process_labels_.find(label_id);
+  if (it == process_labels_.end())
+    return;
+
+  process_labels_.erase(it);
 }
 
 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) {
@@ -1634,39 +1693,42 @@
 
 void TraceLog::SetCurrentThreadBlocksMessageLoop() {
   thread_blocks_message_loop_.Set(true);
-  // This will flush the thread local buffer.
-  delete thread_local_event_buffer_.Get();
+  if (thread_local_event_buffer_.Get()) {
+    // This will flush the thread local buffer.
+    delete thread_local_event_buffer_.Get();
+  }
 }
 
 TraceBuffer* TraceLog::CreateTraceBuffer() {
   HEAP_PROFILER_SCOPED_IGNORE;
   InternalTraceOptions options = trace_options();
-  if (options & kInternalRecordContinuously) {
+  if (options & kInternalRecordContinuously)
     return TraceBuffer::CreateTraceBufferRingBuffer(
         kTraceEventRingBufferChunks);
-  }
-  if (options & kInternalEchoToConsole) {
+  else if (options & kInternalEchoToConsole)
     return TraceBuffer::CreateTraceBufferRingBuffer(
         kEchoToConsoleTraceEventBufferChunks);
-  }
-  if (options & kInternalRecordAsMuchAsPossible) {
+  else if (options & kInternalRecordAsMuchAsPossible)
     return TraceBuffer::CreateTraceBufferVectorOfSize(
         kTraceEventVectorBigBufferChunks);
-  }
   return TraceBuffer::CreateTraceBufferVectorOfSize(
       kTraceEventVectorBufferChunks);
 }
 
 #if defined(OS_WIN)
 void TraceLog::UpdateETWCategoryGroupEnabledFlags() {
+  AutoLock lock(lock_);
+  size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
   // Go through each category and set/clear the ETW bit depending on whether the
   // category is enabled.
-  for (TraceCategory& category : CategoryRegistry::GetAllCategories()) {
+  for (size_t i = 0; i < category_index; i++) {
+    const char* category_group = g_category_groups[i];
+    DCHECK(category_group);
     if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
-            category.name())) {
-      category.set_state_flag(TraceCategory::ENABLED_FOR_ETW_EXPORT);
+            category_group)) {
+      g_category_group_enabled[i] |= ENABLED_FOR_ETW_EXPORT;
     } else {
-      category.clear_state_flag(TraceCategory::ENABLED_FOR_ETW_EXPORT);
+      g_category_group_enabled[i] &= ~ENABLED_FOR_ETW_EXPORT;
     }
   }
 }
diff --git a/base/trace_event/trace_log.h b/base/trace_event/trace_log.h
index 88b6e58..e4407e8 100644
--- a/base/trace_event/trace_log.h
+++ b/base/trace_event/trace_log.h
@@ -26,17 +26,15 @@
 
 template <typename Type>
 struct DefaultSingletonTraits;
-class MessageLoop;
 class RefCountedString;
 
 namespace trace_event {
 
-struct TraceCategory;
 class TraceBuffer;
 class TraceBufferChunk;
 class TraceEvent;
-class TraceEventFilter;
 class TraceEventMemoryOverhead;
+class TraceSamplingThread;
 
 struct BASE_EXPORT TraceLogStatus {
   TraceLogStatus();
@@ -47,14 +45,22 @@
 
 class BASE_EXPORT TraceLog : public MemoryDumpProvider {
  public:
-  // Argument passed to TraceLog::SetEnabled.
-  enum Mode : uint8_t {
-    // Enables normal tracing (recording trace events in the trace buffer).
-    RECORDING_MODE = 1 << 0,
+  enum Mode {
+    DISABLED = 0,
+    RECORDING_MODE
+  };
 
-    // Trace events are enabled just for filtering but not for recording. Only
-    // event filters config of |trace_config| argument is used.
-    FILTERING_MODE = 1 << 1
+  // The pointer returned from GetCategoryGroupEnabledInternal() points to a
+  // value with zero or more of the following bits. Used in this class only.
+  // The TRACE_EVENT macros should only use the value as a bool.
+  // These values must be in sync with macro values in TraceEvent.h in Blink.
+  enum CategoryGroupEnabledFlags {
+    // Category group enabled for the recording mode.
+    ENABLED_FOR_RECORDING = 1 << 0,
+    // Category group enabled by SetEventCallbackEnabled().
+    ENABLED_FOR_EVENT_CALLBACK = 1 << 2,
+    // Category group enabled to export events to ETW.
+    ENABLED_FOR_ETW_EXPORT = 1 << 3
   };
 
   static TraceLog* GetInstance();
@@ -70,30 +76,16 @@
   // if the current thread supports that (has a message loop).
   void InitializeThreadLocalEventBufferIfSupported();
 
-  // See TraceConfig comments for details on how to control which categories
-  // will be traced. SetDisabled must be called distinctly for each mode that is
-  // enabled. If tracing has already been enabled for recording, category filter
-  // (enabled and disabled categories) will be merged into the current category
-  // filter. Enabling RECORDING_MODE does not enable filters. Trace event
-  // filters will be used only if FILTERING_MODE is set on |modes_to_enable|.
-  // Conversely to RECORDING_MODE, FILTERING_MODE doesn't support upgrading,
-  // i.e. filters can only be enabled if not previously enabled.
-  void SetEnabled(const TraceConfig& trace_config, uint8_t modes_to_enable);
+  // Enables normal tracing (recording trace events in the trace buffer).
+  // See TraceConfig comments for details on how to control what categories
+  // will be traced. If tracing has already been enabled, |category_filter| will
+  // be merged into the current category filter.
+  void SetEnabled(const TraceConfig& trace_config, Mode mode);
 
-  // TODO(ssid): Remove the default SetEnabled and IsEnabled. They should take
-  // Mode as argument.
-
-  // Disables tracing for all categories for the specified |modes_to_disable|
-  // only. Only RECORDING_MODE is taken as default |modes_to_disable|.
+  // Disables normal tracing for all categories.
   void SetDisabled();
-  void SetDisabled(uint8_t modes_to_disable);
 
-  // Returns true if TraceLog is enabled on recording mode.
-  // Note: Returns false even if FILTERING_MODE is enabled.
-  bool IsEnabled() { return enabled_modes_ & RECORDING_MODE; }
-
-  // Returns a bitmap of enabled modes from TraceLog::Mode.
-  uint8_t enabled_modes() { return enabled_modes_; }
+  bool IsEnabled() { return mode_ != DISABLED; }
 
   // The number of times we have begun recording traces. If tracing is off,
   // returns -1. If tracing is on, then it returns the number of times we have
@@ -156,6 +148,31 @@
   // objects.
   void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
 
+  // Not using base::Callback because of its limited by 7 parameters.
+  // Also, using primitive type allows directly passing callback from WebCore.
+  // WARNING: It is possible for the previously set callback to be called
+  // after a call to SetEventCallbackEnabled() that replaces or a call to
+  // SetEventCallbackDisabled() that disables the callback.
+  // This callback may be invoked on any thread.
+  // For TRACE_EVENT_PHASE_COMPLETE events, the client will still receive pairs
+  // of TRACE_EVENT_PHASE_BEGIN and TRACE_EVENT_PHASE_END events to keep the
+  // interface simple.
+  typedef void (*EventCallback)(TimeTicks timestamp,
+                                char phase,
+                                const unsigned char* category_group_enabled,
+                                const char* name,
+                                const char* scope,
+                                unsigned long long id,
+                                int num_args,
+                                const char* const arg_names[],
+                                const unsigned char arg_types[],
+                                const unsigned long long arg_values[],
+                                unsigned int flags);
+
+  // Enable tracing for EventCallback.
+  void SetEventCallbackEnabled(const TraceConfig& trace_config,
+                               EventCallback cb);
+  void SetEventCallbackDisabled();
   void SetArgumentFilterPredicate(
       const ArgumentFilterPredicate& argument_filter_predicate);
 
@@ -269,9 +286,14 @@
                                 const char* name,
                                 TraceEventHandle handle);
 
-  void EndFilteredEvent(const unsigned char* category_group_enabled,
-                        const char* name,
-                        TraceEventHandle handle);
+  // For every matching event, the callback will be called.
+  typedef base::Callback<void()> WatchEventCallback;
+  void SetWatchEvent(const std::string& category_name,
+                     const std::string& event_name,
+                     const WatchEventCallback& callback);
+  // Cancel the watch event. If tracing is enabled, this may race with the
+  // watch event notification firing.
+  void CancelWatchEvent();
 
   int process_id() const { return process_id_; }
 
@@ -279,12 +301,7 @@
 
   // Exposed for unittesting:
 
-  // Testing factory for TraceEventFilter.
-  typedef std::unique_ptr<TraceEventFilter> (*FilterFactoryForTesting)(
-      const std::string& /* predicate_name */);
-  void SetFilterFactoryForTesting(FilterFactoryForTesting factory) {
-    filter_factory_for_testing_ = factory;
-  }
+  void WaitSamplingEventForTesting();
 
   // Allows deleting our singleton instance.
   static void DeleteForTesting();
@@ -299,9 +316,8 @@
   // on their sort index, ascending, then by their name, and then tid.
   void SetProcessSortIndex(int sort_index);
 
-  // Sets the name of the process. |process_name| should be a string literal
-  // since it is a whitelisted argument for background field trials.
-  void SetProcessName(const char* process_name);
+  // Sets the name of the process.
+  void SetProcessName(const std::string& process_name);
 
   // Processes can have labels in addition to their names. Use labels, for
   // instance, to list out the web page titles that a process is handling.
@@ -355,14 +371,12 @@
                     ProcessMemoryDump* pmd) override;
 
   // Enable/disable each category group based on the current mode_,
-  // category_filter_ and event_filters_enabled_.
-  // Enable the category group in the recording mode if category_filter_ matches
-  // the category group, is not null. Enable category for filtering if any
-  // filter in event_filters_enabled_ enables it.
-  void UpdateCategoryRegistry();
-  void UpdateCategoryState(TraceCategory* category);
-
-  void CreateFiltersForTraceConfig();
+  // category_filter_, event_callback_ and event_callback_category_filter_.
+  // Enable the category group in the enabled mode if category_filter_ matches
+  // the category group, or event_callback_ is not null and
+  // event_callback_category_filter_ matches the category group.
+  void UpdateCategoryGroupEnabledFlags();
+  void UpdateCategoryGroupEnabledFlag(size_t category_index);
 
   // Configure synthetic delays based on the values set in the current
   // trace config.
@@ -377,6 +391,7 @@
 
   TraceLog();
   ~TraceLog() override;
+  const unsigned char* GetCategoryGroupEnabledInternal(const char* name);
   void AddMetadataEventsWhileLocked();
 
   InternalTraceOptions trace_options() const {
@@ -394,7 +409,7 @@
   TraceEvent* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle* handle,
                                                      bool check_buffer_is_full);
   void CheckIfBufferIsFullWhileLocked();
-  void SetDisabledWhileLocked(uint8_t modes);
+  void SetDisabledWhileLocked();
 
   TraceEvent* GetEventByHandleInternal(TraceEventHandle handle,
                                        OptionalAutoLock* lock);
@@ -433,6 +448,7 @@
   static const InternalTraceOptions kInternalRecordUntilFull;
   static const InternalTraceOptions kInternalRecordContinuously;
   static const InternalTraceOptions kInternalEchoToConsole;
+  static const InternalTraceOptions kInternalEnableSampling;
   static const InternalTraceOptions kInternalRecordAsMuchAsPossible;
   static const InternalTraceOptions kInternalEnableArgumentFilter;
 
@@ -442,10 +458,11 @@
   // This lock protects accesses to thread_names_, thread_event_start_times_
   // and thread_colors_.
   Lock thread_info_lock_;
-  uint8_t enabled_modes_;  // See TraceLog::Mode.
+  Mode mode_;
   int num_traces_recorded_;
   std::unique_ptr<TraceBuffer> logged_events_;
   std::vector<std::unique_ptr<TraceEvent>> metadata_events_;
+  subtle::AtomicWord /* EventCallback */ event_callback_;
   bool dispatching_to_observer_list_;
   std::vector<EnabledStateObserver*> enabled_state_observer_list_;
   std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver>
@@ -470,10 +487,19 @@
 
   TimeDelta time_offset_;
 
+  // Allow tests to wake up when certain events occur.
+  WatchEventCallback watch_event_callback_;
+  subtle::AtomicWord /* const unsigned char* */ watch_category_;
+  std::string watch_event_name_;
+
   subtle::AtomicWord /* Options */ trace_options_;
 
+  // Sampling thread handles.
+  std::unique_ptr<TraceSamplingThread> sampling_thread_;
+  PlatformThreadHandle sampling_thread_handle_;
+
   TraceConfig trace_config_;
-  TraceConfig::EventFilters enabled_event_filters_;
+  TraceConfig event_callback_trace_config_;
 
   ThreadLocalPointer<ThreadLocalEventBuffer> thread_local_event_buffer_;
   ThreadLocalBoolean thread_blocks_message_loop_;
@@ -496,8 +522,6 @@
   subtle::AtomicWord generation_;
   bool use_worker_thread_;
 
-  FilterFactoryForTesting filter_factory_for_testing_;
-
   DISALLOW_COPY_AND_ASSIGN(TraceLog);
 };
 
diff --git a/base/trace_event/trace_log_constants.cc b/base/trace_event/trace_log_constants.cc
index 65dca2e..cd2ff0d 100644
--- a/base/trace_event/trace_log_constants.cc
+++ b/base/trace_event/trace_log_constants.cc
@@ -14,7 +14,8 @@
     TraceLog::kInternalRecordUntilFull = 1 << 0;
 const TraceLog::InternalTraceOptions
     TraceLog::kInternalRecordContinuously = 1 << 1;
-// 1 << 2 is reserved for the DEPRECATED kInternalEnableSampling. DO NOT USE.
+const TraceLog::InternalTraceOptions
+    TraceLog::kInternalEnableSampling = 1 << 2;
 const TraceLog::InternalTraceOptions
     TraceLog::kInternalEchoToConsole = 1 << 3;
 const TraceLog::InternalTraceOptions
diff --git a/base/trace_event/trace_sampling_thread.cc b/base/trace_event/trace_sampling_thread.cc
new file mode 100644
index 0000000..5a0d2f8
--- /dev/null
+++ b/base/trace_event/trace_sampling_thread.cc
@@ -0,0 +1,107 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_impl.h"
+#include "base/trace_event/trace_log.h"
+#include "base/trace_event/trace_sampling_thread.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceBucketData {
+ public:
+  TraceBucketData(base::subtle::AtomicWord* bucket,
+                  const char* name,
+                  TraceSampleCallback callback);
+  ~TraceBucketData();
+
+  TRACE_EVENT_API_ATOMIC_WORD* bucket;
+  const char* bucket_name;
+  TraceSampleCallback callback;
+};
+
+TraceSamplingThread::TraceSamplingThread()
+    : thread_running_(false),
+      waitable_event_for_testing_(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                  WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+TraceSamplingThread::~TraceSamplingThread() {}
+
+void TraceSamplingThread::ThreadMain() {
+  PlatformThread::SetName("Sampling Thread");
+  thread_running_ = true;
+  const int kSamplingFrequencyMicroseconds = 1000;
+  while (!cancellation_flag_.IsSet()) {
+    PlatformThread::Sleep(
+        TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds));
+    GetSamples();
+    waitable_event_for_testing_.Signal();
+  }
+}
+
+// static
+void TraceSamplingThread::DefaultSamplingCallback(
+    TraceBucketData* bucket_data) {
+  TRACE_EVENT_API_ATOMIC_WORD category_and_name =
+      TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket);
+  if (!category_and_name)
+    return;
+  const char* const combined =
+      reinterpret_cast<const char* const>(category_and_name);
+  const char* category_group;
+  const char* name;
+  ExtractCategoryAndName(combined, &category_group, &name);
+  TRACE_EVENT_API_ADD_TRACE_EVENT(
+      TRACE_EVENT_PHASE_SAMPLE,
+      TraceLog::GetCategoryGroupEnabled(category_group), name,
+      trace_event_internal::kGlobalScope, trace_event_internal::kNoId, 0,
+      NULL, NULL, NULL, NULL, 0);
+}
+
+void TraceSamplingThread::GetSamples() {
+  for (size_t i = 0; i < sample_buckets_.size(); ++i) {
+    TraceBucketData* bucket_data = &sample_buckets_[i];
+    bucket_data->callback.Run(bucket_data);
+  }
+}
+
+void TraceSamplingThread::RegisterSampleBucket(
+    TRACE_EVENT_API_ATOMIC_WORD* bucket,
+    const char* const name,
+    TraceSampleCallback callback) {
+  // Access to sample_buckets_ doesn't cause races with the sampling thread
+  // that uses the sample_buckets_, because it is guaranteed that
+  // RegisterSampleBucket is called before the sampling thread is created.
+  DCHECK(!thread_running_);
+  sample_buckets_.push_back(TraceBucketData(bucket, name, callback));
+}
+
+// static
+void TraceSamplingThread::ExtractCategoryAndName(const char* combined,
+                                                 const char** category,
+                                                 const char** name) {
+  *category = combined;
+  *name = &combined[strlen(combined) + 1];
+}
+
+void TraceSamplingThread::Stop() {
+  cancellation_flag_.Set();
+}
+
+void TraceSamplingThread::WaitSamplingEventForTesting() {
+  waitable_event_for_testing_.Wait();
+}
+
+TraceBucketData::TraceBucketData(base::subtle::AtomicWord* bucket,
+                                 const char* name,
+                                 TraceSampleCallback callback)
+    : bucket(bucket), bucket_name(name), callback(callback) {}
+
+TraceBucketData::~TraceBucketData() {}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/trace_sampling_thread.h b/base/trace_event/trace_sampling_thread.h
new file mode 100644
index 0000000..f976a80
--- /dev/null
+++ b/base/trace_event/trace_sampling_thread.h
@@ -0,0 +1,54 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_SAMPLING_THREAD_H_
+#define BASE_TRACE_EVENT_TRACE_SAMPLING_THREAD_H_
+
+#include "base/synchronization/cancellation_flag.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceBucketData;
+typedef base::Callback<void(TraceBucketData*)> TraceSampleCallback;
+
+// This object must be created on the IO thread.
+class TraceSamplingThread : public PlatformThread::Delegate {
+ public:
+  TraceSamplingThread();
+  ~TraceSamplingThread() override;
+
+  // Implementation of PlatformThread::Delegate:
+  void ThreadMain() override;
+
+  static void DefaultSamplingCallback(TraceBucketData* bucket_data);
+
+  void Stop();
+  void WaitSamplingEventForTesting();
+
+ private:
+  friend class TraceLog;
+
+  void GetSamples();
+  // Not thread-safe. Once the ThreadMain has been called, this can no longer
+  // be called.
+  void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD* bucket,
+                            const char* const name,
+                            TraceSampleCallback callback);
+  // Splits a combined "category\0name" into the two component parts.
+  static void ExtractCategoryAndName(const char* combined,
+                                     const char** category,
+                                     const char** name);
+  std::vector<TraceBucketData> sample_buckets_;
+  bool thread_running_;
+  CancellationFlag cancellation_flag_;
+  WaitableEvent waitable_event_for_testing_;
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_TRACE_SAMPLING_THREAD_H_
diff --git a/base/tracked_objects.cc b/base/tracked_objects.cc
index 131af14..487fd19 100644
--- a/base/tracked_objects.cc
+++ b/base/tracked_objects.cc
@@ -4,7 +4,6 @@
 
 #include "base/tracked_objects.h"
 
-#include <ctype.h>
 #include <limits.h>
 #include <stdlib.h>
 
@@ -14,10 +13,8 @@
 #include "base/compiler_specific.h"
 #include "base/debug/leak_annotations.h"
 #include "base/logging.h"
-#include "base/metrics/histogram_macros.h"
-#include "base/numerics/safe_conversions.h"
-#include "base/numerics/safe_math.h"
 #include "base/process/process_handle.h"
+#include "base/strings/stringprintf.h"
 #include "base/third_party/valgrind/memcheck.h"
 #include "base/threading/worker_pool.h"
 #include "base/tracking_info.h"
@@ -32,9 +29,6 @@
 namespace tracked_objects {
 
 namespace {
-
-constexpr char kWorkerThreadSanitizedName[] = "WorkerThread-*";
-
 // When ThreadData is first initialized, should we start in an ACTIVE state to
 // record all of the startup-time tasks, or should we start up DEACTIVATED, so
 // that we only record after parsing the command line flag --enable-tracking.
@@ -80,22 +74,6 @@
   return current_timing_enabled == ENABLED_TIMING;
 }
 
-// Sanitize a thread name by replacing trailing sequence of digits with "*".
-// Examples:
-// 1. "BrowserBlockingWorker1/23857" => "BrowserBlockingWorker1/*"
-// 2. "Chrome_IOThread" => "Chrome_IOThread"
-std::string SanitizeThreadName(const std::string& thread_name) {
-  size_t i = thread_name.length();
-
-  while (i > 0 && isdigit(thread_name[i - 1]))
-    --i;
-
-  if (i == thread_name.length())
-    return thread_name;
-
-  return thread_name.substr(0, i) + '*';
-}
-
 }  // namespace
 
 //------------------------------------------------------------------------------
@@ -108,15 +86,10 @@
       queue_duration_sum_(0),
       run_duration_max_(0),
       queue_duration_max_(0),
-      alloc_ops_(0),
-      free_ops_(0),
-      allocated_bytes_(0),
-      freed_bytes_(0),
-      alloc_overhead_bytes_(0),
-      max_allocated_bytes_(0),
       run_duration_sample_(0),
       queue_duration_sample_(0),
-      last_phase_snapshot_(nullptr) {}
+      last_phase_snapshot_(nullptr) {
+}
 
 DeathData::DeathData(const DeathData& other)
     : count_(other.count_),
@@ -125,12 +98,6 @@
       queue_duration_sum_(other.queue_duration_sum_),
       run_duration_max_(other.run_duration_max_),
       queue_duration_max_(other.queue_duration_max_),
-      alloc_ops_(other.alloc_ops_),
-      free_ops_(other.free_ops_),
-      allocated_bytes_(other.allocated_bytes_),
-      freed_bytes_(other.freed_bytes_),
-      alloc_overhead_bytes_(other.alloc_overhead_bytes_),
-      max_allocated_bytes_(other.max_allocated_bytes_),
       run_duration_sample_(other.run_duration_sample_),
       queue_duration_sample_(other.queue_duration_sample_),
       last_phase_snapshot_(nullptr) {
@@ -158,9 +125,9 @@
 #define CONDITIONAL_ASSIGN(assign_it, target, source) \
   ((target) ^= ((target) ^ (source)) & -static_cast<int32_t>(assign_it))
 
-void DeathData::RecordDurations(const int32_t queue_duration,
-                                const int32_t run_duration,
-                                const uint32_t random_number) {
+void DeathData::RecordDeath(const int32_t queue_duration,
+                            const int32_t run_duration,
+                            const uint32_t random_number) {
   // We'll just clamp at INT_MAX, but we should note this in the UI as such.
   if (count_ < INT_MAX)
     base::subtle::NoBarrier_Store(&count_, count_ + 1);
@@ -197,28 +164,12 @@
   }
 }
 
-void DeathData::RecordAllocations(const uint32_t alloc_ops,
-                                  const uint32_t free_ops,
-                                  const uint32_t allocated_bytes,
-                                  const uint32_t freed_bytes,
-                                  const uint32_t alloc_overhead_bytes,
-                                  const uint32_t max_allocated_bytes) {
-  // Use saturating arithmetic.
-  SaturatingMemberAdd(alloc_ops, &alloc_ops_);
-  SaturatingMemberAdd(free_ops, &free_ops_);
-  SaturatingMemberAdd(allocated_bytes, &allocated_bytes_);
-  SaturatingMemberAdd(freed_bytes, &freed_bytes_);
-  SaturatingMemberAdd(alloc_overhead_bytes, &alloc_overhead_bytes_);
-
-  int32_t max = base::saturated_cast<int32_t>(max_allocated_bytes);
-  if (max > max_allocated_bytes_)
-    base::subtle::NoBarrier_Store(&max_allocated_bytes_, max);
-}
-
 void DeathData::OnProfilingPhaseCompleted(int profiling_phase) {
   // Snapshotting and storing current state.
-  last_phase_snapshot_ =
-      new DeathDataPhaseSnapshot(profiling_phase, *this, last_phase_snapshot_);
+  last_phase_snapshot_ = new DeathDataPhaseSnapshot(
+      profiling_phase, count(), run_duration_sum(), run_duration_max(),
+      run_duration_sample(), queue_duration_sum(), queue_duration_max(),
+      queue_duration_sample(), last_phase_snapshot_);
 
   // Not touching fields for which a delta can be computed by comparing with a
   // snapshot from the previous phase. Resetting other fields.  Sample values
@@ -250,17 +201,6 @@
   base::subtle::NoBarrier_Store(&queue_duration_max_, 0);
 }
 
-void DeathData::SaturatingMemberAdd(const uint32_t addend,
-                                    base::subtle::Atomic32* sum) {
-  // Bail quick if no work or already saturated.
-  if (addend == 0U || *sum == INT_MAX)
-    return;
-
-  base::CheckedNumeric<int32_t> new_sum = *sum;
-  new_sum += addend;
-  base::subtle::NoBarrier_Store(sum, new_sum.ValueOrDefault(INT_MAX));
-}
-
 //------------------------------------------------------------------------------
 DeathDataSnapshot::DeathDataSnapshot()
     : count(-1),
@@ -269,13 +209,8 @@
       run_duration_sample(-1),
       queue_duration_sum(-1),
       queue_duration_max(-1),
-      queue_duration_sample(-1),
-      alloc_ops(-1),
-      free_ops(-1),
-      allocated_bytes(-1),
-      freed_bytes(-1),
-      alloc_overhead_bytes(-1),
-      max_allocated_bytes(-1) {}
+      queue_duration_sample(-1) {
+}
 
 DeathDataSnapshot::DeathDataSnapshot(int count,
                                      int32_t run_duration_sum,
@@ -283,58 +218,25 @@
                                      int32_t run_duration_sample,
                                      int32_t queue_duration_sum,
                                      int32_t queue_duration_max,
-                                     int32_t queue_duration_sample,
-                                     int32_t alloc_ops,
-                                     int32_t free_ops,
-                                     int32_t allocated_bytes,
-                                     int32_t freed_bytes,
-                                     int32_t alloc_overhead_bytes,
-                                     int32_t max_allocated_bytes)
+                                     int32_t queue_duration_sample)
     : count(count),
       run_duration_sum(run_duration_sum),
       run_duration_max(run_duration_max),
       run_duration_sample(run_duration_sample),
       queue_duration_sum(queue_duration_sum),
       queue_duration_max(queue_duration_max),
-      queue_duration_sample(queue_duration_sample),
-      alloc_ops(alloc_ops),
-      free_ops(free_ops),
-      allocated_bytes(allocated_bytes),
-      freed_bytes(freed_bytes),
-      alloc_overhead_bytes(alloc_overhead_bytes),
-      max_allocated_bytes(max_allocated_bytes) {}
-
-DeathDataSnapshot::DeathDataSnapshot(const DeathData& death_data)
-    : count(death_data.count()),
-      run_duration_sum(death_data.run_duration_sum()),
-      run_duration_max(death_data.run_duration_max()),
-      run_duration_sample(death_data.run_duration_sample()),
-      queue_duration_sum(death_data.queue_duration_sum()),
-      queue_duration_max(death_data.queue_duration_max()),
-      queue_duration_sample(death_data.queue_duration_sample()),
-      alloc_ops(death_data.alloc_ops()),
-      free_ops(death_data.free_ops()),
-      allocated_bytes(death_data.allocated_bytes()),
-      freed_bytes(death_data.freed_bytes()),
-      alloc_overhead_bytes(death_data.alloc_overhead_bytes()),
-      max_allocated_bytes(death_data.max_allocated_bytes()) {}
-
-DeathDataSnapshot::DeathDataSnapshot(const DeathDataSnapshot& death_data) =
-    default;
+      queue_duration_sample(queue_duration_sample) {}
 
 DeathDataSnapshot::~DeathDataSnapshot() {
 }
 
 DeathDataSnapshot DeathDataSnapshot::Delta(
     const DeathDataSnapshot& older) const {
-  return DeathDataSnapshot(
-      count - older.count, run_duration_sum - older.run_duration_sum,
-      run_duration_max, run_duration_sample,
-      queue_duration_sum - older.queue_duration_sum, queue_duration_max,
-      queue_duration_sample, alloc_ops - older.alloc_ops,
-      free_ops - older.free_ops, allocated_bytes - older.allocated_bytes,
-      freed_bytes - older.freed_bytes,
-      alloc_overhead_bytes - older.alloc_overhead_bytes, max_allocated_bytes);
+  return DeathDataSnapshot(count - older.count,
+                           run_duration_sum - older.run_duration_sum,
+                           run_duration_max, run_duration_sample,
+                           queue_duration_sum - older.queue_duration_sum,
+                           queue_duration_max, queue_duration_sample);
 }
 
 //------------------------------------------------------------------------------
@@ -350,7 +252,8 @@
 
 BirthOnThreadSnapshot::BirthOnThreadSnapshot(const BirthOnThread& birth)
     : location(birth.location()),
-      sanitized_thread_name(birth.birth_thread()->sanitized_thread_name()) {}
+      thread_name(birth.birth_thread()->thread_name()) {
+}
 
 BirthOnThreadSnapshot::~BirthOnThreadSnapshot() {
 }
@@ -382,6 +285,9 @@
 base::ThreadLocalStorage::StaticSlot ThreadData::tls_index_ = TLS_INITIALIZER;
 
 // static
+int ThreadData::worker_thread_data_creation_count_ = 0;
+
+// static
 int ThreadData::cleanup_count_ = 0;
 
 // static
@@ -391,7 +297,7 @@
 ThreadData* ThreadData::all_thread_data_list_head_ = NULL;
 
 // static
-ThreadData* ThreadData::first_retired_thread_data_ = NULL;
+ThreadData* ThreadData::first_retired_worker_ = NULL;
 
 // static
 base::LazyInstance<base::Lock>::Leaky
@@ -400,14 +306,25 @@
 // static
 base::subtle::Atomic32 ThreadData::status_ = ThreadData::UNINITIALIZED;
 
-ThreadData::ThreadData(const std::string& sanitized_thread_name)
+ThreadData::ThreadData(const std::string& suggested_name)
     : next_(NULL),
-      next_retired_thread_data_(NULL),
-      sanitized_thread_name_(sanitized_thread_name),
+      next_retired_worker_(NULL),
+      worker_thread_number_(0),
       incarnation_count_for_pool_(-1),
       current_stopwatch_(NULL) {
-  DCHECK(sanitized_thread_name_.empty() ||
-         !isdigit(sanitized_thread_name_.back()));
+  DCHECK_GE(suggested_name.size(), 0u);
+  thread_name_ = suggested_name;
+  PushToHeadOfList();  // Which sets real incarnation_count_for_pool_.
+}
+
+ThreadData::ThreadData(int thread_number)
+    : next_(NULL),
+      next_retired_worker_(NULL),
+      worker_thread_number_(thread_number),
+      incarnation_count_for_pool_(-1),
+      current_stopwatch_(NULL) {
+  CHECK_GT(thread_number, 0);
+  base::StringAppendF(&thread_name_, "WorkerThread-%d", thread_number);
   PushToHeadOfList();  // Which sets real incarnation_count_for_pool_.
 }
 
@@ -438,17 +355,15 @@
 ThreadData* ThreadData::next() const { return next_; }
 
 // static
-void ThreadData::InitializeThreadContext(const std::string& thread_name) {
+void ThreadData::InitializeThreadContext(const std::string& suggested_name) {
   if (base::WorkerPool::RunsTasksOnCurrentThread())
     return;
-  DCHECK_NE(thread_name, kWorkerThreadSanitizedName);
   EnsureTlsInitialization();
   ThreadData* current_thread_data =
       reinterpret_cast<ThreadData*>(tls_index_.Get());
   if (current_thread_data)
     return;  // Browser tests instigate this.
-  current_thread_data =
-      GetRetiredOrCreateThreadData(SanitizeThreadName(thread_name));
+  current_thread_data = new ThreadData(suggested_name);
   tls_index_.Set(current_thread_data);
 }
 
@@ -461,8 +376,26 @@
     return registered;
 
   // We must be a worker thread, since we didn't pre-register.
-  ThreadData* worker_thread_data =
-      GetRetiredOrCreateThreadData(kWorkerThreadSanitizedName);
+  ThreadData* worker_thread_data = NULL;
+  int worker_thread_number = 0;
+  {
+    base::AutoLock lock(*list_lock_.Pointer());
+    if (first_retired_worker_) {
+      worker_thread_data = first_retired_worker_;
+      first_retired_worker_ = first_retired_worker_->next_retired_worker_;
+      worker_thread_data->next_retired_worker_ = NULL;
+    } else {
+      worker_thread_number = ++worker_thread_data_creation_count_;
+    }
+  }
+
+  // If we can't find a previously used instance, then we have to create one.
+  if (!worker_thread_data) {
+    DCHECK_GT(worker_thread_number, 0);
+    worker_thread_data = new ThreadData(worker_thread_number);
+  }
+  DCHECK_GT(worker_thread_data->worker_thread_number_, 0);
+
   tls_index_.Set(worker_thread_data);
   return worker_thread_data;
 }
@@ -476,23 +409,21 @@
 }
 
 void ThreadData::OnThreadTerminationCleanup() {
-  // We must NOT do any allocations during this callback. There is a chance that
-  // the allocator is no longer active on this thread.
-
   // The list_lock_ was created when we registered the callback, so it won't be
   // allocated here despite the lazy reference.
   base::AutoLock lock(*list_lock_.Pointer());
   if (incarnation_counter_ != incarnation_count_for_pool_)
     return;  // ThreadData was constructed in an earlier unit test.
   ++cleanup_count_;
-
-  // Add this ThreadData to a retired list so that it can be reused by a thread
-  // with the same name sanitized name in the future.
-  // |next_retired_thread_data_| is expected to be nullptr for a ThreadData
-  // associated with an active thread.
-  DCHECK(!next_retired_thread_data_);
-  next_retired_thread_data_ = first_retired_thread_data_;
-  first_retired_thread_data_ = this;
+  // Only worker threads need to be retired and reused.
+  if (!worker_thread_number_) {
+    return;
+  }
+  // We must NOT do any allocations during this callback.
+  // Using the simple linked lists avoids all allocations.
+  DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL));
+  this->next_retired_worker_ = first_retired_worker_;
+  first_retired_worker_ = this;
 }
 
 // static
@@ -524,8 +455,7 @@
     if (birth_count.second > 0) {
       current_phase_tasks->push_back(
           TaskSnapshot(BirthOnThreadSnapshot(*birth_count.first),
-                       DeathDataSnapshot(birth_count.second, 0, 0, 0, 0, 0, 0,
-                                         0, 0, 0, 0, 0, 0),
+                       DeathDataSnapshot(birth_count.second, 0, 0, 0, 0, 0, 0),
                        "Still_Alive"));
     }
   }
@@ -584,21 +514,7 @@
     base::AutoLock lock(map_lock_);  // Lock as the map may get relocated now.
     death_data = &death_map_[&births];
   }  // Release lock ASAP.
-  death_data->RecordDurations(queue_duration, run_duration, random_number_);
-
-#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
-  if (stopwatch.heap_tracking_enabled()) {
-    base::debug::ThreadHeapUsage heap_usage = stopwatch.heap_usage().usage();
-    // Saturate the 64 bit counts on conversion to 32 bit storage.
-    death_data->RecordAllocations(
-        base::saturated_cast<int32_t>(heap_usage.alloc_ops),
-        base::saturated_cast<int32_t>(heap_usage.free_ops),
-        base::saturated_cast<int32_t>(heap_usage.alloc_bytes),
-        base::saturated_cast<int32_t>(heap_usage.free_bytes),
-        base::saturated_cast<int32_t>(heap_usage.alloc_overhead_bytes),
-        base::saturated_cast<int32_t>(heap_usage.max_allocated_bytes));
-  }
-#endif
+  death_data->RecordDeath(queue_duration, run_duration, random_number_);
 }
 
 // static
@@ -719,7 +635,7 @@
       if (death_data.count > 0) {
         (*phased_snapshots)[phase->profiling_phase].tasks.push_back(
             TaskSnapshot(BirthOnThreadSnapshot(*death.first), death_data,
-                         sanitized_thread_name()));
+                         thread_name()));
       }
     }
   }
@@ -737,7 +653,13 @@
   for (const auto& death : death_map_) {
     deaths->push_back(std::make_pair(
         death.first,
-        DeathDataPhaseSnapshot(profiling_phase, death.second,
+        DeathDataPhaseSnapshot(profiling_phase, death.second.count(),
+                               death.second.run_duration_sum(),
+                               death.second.run_duration_max(),
+                               death.second.run_duration_sample(),
+                               death.second.queue_duration_sum(),
+                               death.second.queue_duration_max(),
+                               death.second.queue_duration_sample(),
                                death.second.last_phase_snapshot())));
   }
 }
@@ -794,7 +716,6 @@
 
   if (status > DEACTIVATED)
     status = PROFILING_ACTIVE;
-
   base::subtle::Release_Store(&status_, status);
 }
 
@@ -823,9 +744,10 @@
 }
 
 // static
-void ThreadData::EnsureCleanupWasCalled(int major_threads_shutdown_count) {
-  ALLOW_UNUSED_PARAM(major_threads_shutdown_count);
+void ThreadData::EnsureCleanupWasCalled(int /*major_threads_shutdown_count*/) {
   base::AutoLock lock(*list_lock_.Pointer());
+  if (worker_thread_data_creation_count_ == 0)
+    return;  // We haven't really run much, and couldn't have leaked.
 
   // TODO(jar): until this is working on XP, don't run the real test.
 #if 0
@@ -850,14 +772,16 @@
     all_thread_data_list_head_ = NULL;
     ++incarnation_counter_;
     // To be clean, break apart the retired worker list (though we leak them).
-    while (first_retired_thread_data_) {
-      ThreadData* thread_data = first_retired_thread_data_;
-      first_retired_thread_data_ = thread_data->next_retired_thread_data_;
-      thread_data->next_retired_thread_data_ = nullptr;
+    while (first_retired_worker_) {
+      ThreadData* worker = first_retired_worker_;
+      CHECK_GT(worker->worker_thread_number_, 0);
+      first_retired_worker_ = worker->next_retired_worker_;
+      worker->next_retired_worker_ = NULL;
     }
   }
 
   // Put most global static back in pristine shape.
+  worker_thread_data_creation_count_ = 0;
   cleanup_count_ = 0;
   tls_index_.Set(NULL);
   // Almost UNINITIALIZED.
@@ -889,39 +813,6 @@
   }
 }
 
-// static
-ThreadData* ThreadData::GetRetiredOrCreateThreadData(
-    const std::string& sanitized_thread_name) {
-  SCOPED_UMA_HISTOGRAM_TIMER("TrackedObjects.GetRetiredOrCreateThreadData");
-
-  {
-    base::AutoLock lock(*list_lock_.Pointer());
-    ThreadData** pcursor = &first_retired_thread_data_;
-    ThreadData* cursor = first_retired_thread_data_;
-
-    // Assuming that there aren't more than a few tens of retired ThreadData
-    // instances, this lookup should be quick compared to the thread creation
-    // time. Retired ThreadData instances cannot be stored in a map because
-    // insertions are done from OnThreadTerminationCleanup() where allocations
-    // are not allowed.
-    //
-    // Note: Test processes may have more than a few tens of retired ThreadData
-    // instances.
-    while (cursor) {
-      if (cursor->sanitized_thread_name() == sanitized_thread_name) {
-        DCHECK_EQ(*pcursor, cursor);
-        *pcursor = cursor->next_retired_thread_data_;
-        cursor->next_retired_thread_data_ = nullptr;
-        return cursor;
-      }
-      pcursor = &cursor->next_retired_thread_data_;
-      cursor = cursor->next_retired_thread_data_;
-    }
-  }
-
-  return new ThreadData(sanitized_thread_name);
-}
-
 //------------------------------------------------------------------------------
 TaskStopwatch::TaskStopwatch()
     : wallclock_duration_ms_(0),
@@ -932,10 +823,6 @@
   state_ = CREATED;
   child_ = NULL;
 #endif
-#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
-  heap_tracking_enabled_ =
-      base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled();
-#endif
 }
 
 TaskStopwatch::~TaskStopwatch() {
@@ -952,10 +839,6 @@
 #endif
 
   start_time_ = ThreadData::Now();
-#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
-  if (heap_tracking_enabled_)
-    heap_usage_.Start();
-#endif
 
   current_thread_data_ = ThreadData::Get();
   if (!current_thread_data_)
@@ -979,10 +862,6 @@
   state_ = STOPPED;
   DCHECK(child_ == NULL);
 #endif
-#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
-  if (heap_tracking_enabled_)
-    heap_usage_.Stop(true);
-#endif
 
   if (!start_time_.is_null() && !end_time.is_null()) {
     wallclock_duration_ms_ = (end_time - start_time_).InMilliseconds();
@@ -1034,9 +913,23 @@
 
 DeathDataPhaseSnapshot::DeathDataPhaseSnapshot(
     int profiling_phase,
-    const DeathData& death,
+    int count,
+    int32_t run_duration_sum,
+    int32_t run_duration_max,
+    int32_t run_duration_sample,
+    int32_t queue_duration_sum,
+    int32_t queue_duration_max,
+    int32_t queue_duration_sample,
     const DeathDataPhaseSnapshot* prev)
-    : profiling_phase(profiling_phase), death_data(death), prev(prev) {}
+    : profiling_phase(profiling_phase),
+      death_data(count,
+                 run_duration_sum,
+                 run_duration_max,
+                 run_duration_sample,
+                 queue_duration_sum,
+                 queue_duration_max,
+                 queue_duration_sample),
+      prev(prev) {}
 
 //------------------------------------------------------------------------------
 // TaskSnapshot
@@ -1046,10 +939,11 @@
 
 TaskSnapshot::TaskSnapshot(const BirthOnThreadSnapshot& birth,
                            const DeathDataSnapshot& death_data,
-                           const std::string& death_sanitized_thread_name)
+                           const std::string& death_thread_name)
     : birth(birth),
       death_data(death_data),
-      death_sanitized_thread_name(death_sanitized_thread_name) {}
+      death_thread_name(death_thread_name) {
+}
 
 TaskSnapshot::~TaskSnapshot() {
 }
diff --git a/base/tracked_objects.h b/base/tracked_objects.h
index 36caec3..7ef0317 100644
--- a/base/tracked_objects.h
+++ b/base/tracked_objects.h
@@ -14,12 +14,9 @@
 #include <utility>
 #include <vector>
 
-#include "base/allocator/features.h"
 #include "base/atomicops.h"
 #include "base/base_export.h"
 #include "base/containers/hash_tables.h"
-#include "base/debug/debugging_flags.h"
-#include "base/debug/thread_heap_usage_tracker.h"
 #include "base/gtest_prod_util.h"
 #include "base/lazy_instance.h"
 #include "base/location.h"
@@ -62,76 +59,71 @@
 // with great efficiency (i.e., copying of strings is never needed, and
 // comparisons for equality can be based on pointer comparisons).
 //
-// Next, a Births instance is constructed or found. A Births instance records
-// (in a base class BirthOnThread) references to the static data provided in a
-// Location instance, as well as a pointer to the ThreadData bound to the thread
-// on which the birth takes place (see discussion on ThreadData below). There is
-// at most one Births instance for each Location / ThreadData pair. The derived
-// Births class contains slots for recording statistics about all instances born
-// at the same location. Statistics currently include only the count of
-// instances constructed.
+// Next, a Births instance is created for use ONLY on the thread where this
+// instance was created.  That Births instance records (in a base class
+// BirthOnThread) references to the static data provided in a Location instance,
+// as well as a pointer specifying the thread on which the birth takes place.
+// Hence there is at most one Births instance for each Location on each thread.
+// The derived Births class contains slots for recording statistics about all
+// instances born at the same location.  Statistics currently include only the
+// count of instances constructed.
 //
 // Since the base class BirthOnThread contains only constant data, it can be
-// freely accessed by any thread at any time. The statistics must be handled
-// more carefully; they are updated exclusively by the single thread to which
-// the ThreadData is bound at a given time.
+// freely accessed by any thread at any time (i.e., only the statistic needs to
+// be handled carefully, and stats are updated exclusively on the birth thread).
 //
 // For Tasks, having now either constructed or found the Births instance
 // described above, a pointer to the Births instance is then recorded into the
-// PendingTask structure.  This fact alone is very useful in debugging, when
-// there is a question of where an instance came from.  In addition, the birth
-// time is also recorded and used to later evaluate the lifetime duration of the
-// whole Task.  As a result of the above embedding, we can find out a Task's
-// location of birth, and name of birth thread, without using any locks, as all
-// that data is constant across the life of the process.
+// PendingTask structure in MessageLoop.  This fact alone is very useful in
+// debugging, when there is a question of where an instance came from.  In
+// addition, the birth time is also recorded and used to later evaluate the
+// lifetime duration of the whole Task.  As a result of the above embedding, we
+// can find out a Task's location of birth, and thread of birth, without using
+// any locks, as all that data is constant across the life of the process.
 //
 // The above work *could* also be done for any other object as well by calling
 // TallyABirthIfActive() and TallyRunOnNamedThreadIfTracking() as appropriate.
 //
-// The upper bound for the amount of memory used in the above data structures is
-// the product of the number of ThreadData instances and the number of
-// Locations. Fortunately, Locations are often created on a single thread and
-// the memory utilization is actually fairly restrained.
+// The amount of memory used in the above data structures depends on how many
+// threads there are, and how many Locations of construction there are.
+// Fortunately, we don't use memory that is the product of those two counts, but
+// rather we only need one Births instance for each thread that constructs an
+// instance at a Location.  In many cases, instances are only created on one
+// thread, so the memory utilization is actually fairly restrained.
 //
 // Lastly, when an instance is deleted, the final tallies of statistics are
 // carefully accumulated.  That tallying writes into slots (members) in a
-// collection of DeathData instances.  For each Births / death ThreadData pair,
-// there is a DeathData instance to record the additional death count, as well
-// as to accumulate the run-time and queue-time durations for the instance as it
-// is destroyed (dies). Since a ThreadData is bound to at most one thread at a
-// time, there is no need to lock such DeathData instances. (i.e., these
-// accumulated stats in a DeathData instance are exclusively updated by the
-// singular owning thread).
+// collection of DeathData instances.  For each birth place Location that is
+// destroyed on a thread, there is a DeathData instance to record the additional
+// death count, as well as accumulate the run-time and queue-time durations for
+// the instance as it is destroyed (dies).  By maintaining a single place to
+// aggregate this running sum *only* for the given thread, we avoid the need to
+// lock such DeathData instances. (i.e., these accumulated stats in a DeathData
+// instance are exclusively updated by the singular owning thread).
 //
-// With the above life cycle description complete, the major remaining detail is
-// explaining how existing Births and DeathData instances are found to avoid
-// redundant allocations.
+// With the above life cycle description complete, the major remaining detail
+// is explaining how each thread maintains a list of DeathData instances, and
+// of Births instances, and is able to avoid additional (redundant/unnecessary)
+// allocations.
 //
-// A ThreadData instance maintains maps of Births and DeathData instances. The
-// Births map is indexed by Location and the DeathData map is indexed by
-// Births*. As noted earlier, we can compare Locations very efficiently as we
-// consider the underlying data (file, function, line) to be atoms, and hence
-// pointer comparison is used rather than (slow) string comparisons.
+// Each thread maintains a list of data items specific to that thread in a
+// ThreadData instance (for that specific thread only).  The two critical items
+// are lists of DeathData and Births instances.  These lists are maintained in
+// STL maps, which are indexed by Location.  As noted earlier, we can compare
+// locations very efficiently as we consider the underlying data (file,
+// function, line) to be atoms, and hence pointer comparison is used rather than
+// (slow) string comparisons.
 //
-// The first time that a thread calls ThreadData::InitializeThreadContext() or
-// ThreadData::Get(), a ThreadData instance is bound to it and stored in TLS. If
-// a ThreadData bound to a terminated thread with the same sanitized name (i.e.
-// name without trailing digits) as the current thread is available, it is
-// reused. Otherwise, a new ThreadData instance is instantiated. Since a
-// ThreadData is bound to at most one thread at a time, there is no need to
-// acquire a lock to access its maps. Over time, a ThreadData may be bound to
-// different threads that share the same sanitized name.
-//
-// We maintain a list of all ThreadData instances for the current process. Each
-// ThreadData instance has a pointer to the next one. A static member of
-// ThreadData provides a pointer to the first item on this global list, and
-// access via that all_thread_data_list_head_ item requires the use of the
-// list_lock_.
-//
-// When new ThreadData instances are added to the global list, they are pre-
-// pended, which ensures that any prior acquisition of the list is valid (i.e.,
-// the holder can iterate over it without fear of it changing, or the necessity
-// of using an additional lock.  Iterations are actually pretty rare (used
+// To provide a mechanism for iterating over all "known threads," which means
+// threads that have recorded a birth or a death, we create a singly linked list
+// of ThreadData instances.  Each such instance maintains a pointer to the next
+// one.  A static member of ThreadData provides a pointer to the first item on
+// this global list, and access via that all_thread_data_list_head_ item
+// requires the use of the list_lock_.
+// When new ThreadData instances is added to the global list, it is pre-pended,
+// which ensures that any prior acquisition of the list is valid (i.e., the
+// holder can iterate over it without fear of it changing, or the necessity of
+// using an additional lock.  Iterations are actually pretty rare (used
 // primarily for cleanup, or snapshotting data for display), so this lock has
 // very little global performance impact.
 //
@@ -178,13 +170,12 @@
 // memory reference).
 //
 // TODO(jar): We can implement a Snapshot system that *tries* to grab the
-// snapshots on the source threads *when* they have SingleThreadTaskRunners
-// available (worker threads don't have SingleThreadTaskRunners, and hence
-// gathering from them will continue to be asynchronous).  We had an
-// implementation of this in the past, but the difficulty is dealing with
-// threads being terminated. We can *try* to post a task to threads that have a
-// SingleThreadTaskRunner and check if that succeeds (will fail if the thread
-// has been terminated). This *might* be valuable when we are collecting data
+// snapshots on the source threads *when* they have MessageLoops available
+// (worker threads don't have message loops generally, and hence gathering from
+// them will continue to be asynchronous).  We had an implementation of this in
+// the past, but the difficulty is dealing with message loops being terminated.
+// We can *try* to spam the available threads via some task runner to
+// achieve this feat, and it *might* be valuable when we are collecting data
 // for upload via UMA (where correctness of data may be more significant than
 // for a single screen of about:profiler).
 //
@@ -235,7 +226,7 @@
   ~BirthOnThreadSnapshot();
 
   LocationSnapshot location;
-  std::string sanitized_thread_name;
+  std::string thread_name;
 };
 
 //------------------------------------------------------------------------------
@@ -257,8 +248,6 @@
   DISALLOW_COPY_AND_ASSIGN(Births);
 };
 
-class DeathData;
-
 //------------------------------------------------------------------------------
 // A "snapshotted" representation of the DeathData class.
 
@@ -276,15 +265,7 @@
                     int32_t run_duration_sample,
                     int32_t queue_duration_sum,
                     int32_t queue_duration_max,
-                    int32_t queue_duration_sample,
-                    int32_t alloc_ops,
-                    int32_t free_ops,
-                    int32_t allocated_bytes,
-                    int32_t freed_bytes,
-                    int32_t alloc_overhead_bytes,
-                    int32_t max_allocated_bytes);
-  DeathDataSnapshot(const DeathData& death_data);
-  DeathDataSnapshot(const DeathDataSnapshot& other);
+                    int32_t queue_duration_sample);
   ~DeathDataSnapshot();
 
   // Calculates and returns the delta between this snapshot and an earlier
@@ -298,13 +279,6 @@
   int32_t queue_duration_sum;
   int32_t queue_duration_max;
   int32_t queue_duration_sample;
-
-  int32_t alloc_ops;
-  int32_t free_ops;
-  int32_t allocated_bytes;
-  int32_t freed_bytes;
-  int32_t alloc_overhead_bytes;
-  int32_t max_allocated_bytes;
 };
 
 //------------------------------------------------------------------------------
@@ -313,7 +287,13 @@
 
 struct DeathDataPhaseSnapshot {
   DeathDataPhaseSnapshot(int profiling_phase,
-                         const DeathData& death_data,
+                         int count,
+                         int32_t run_duration_sum,
+                         int32_t run_duration_max,
+                         int32_t run_duration_sample,
+                         int32_t queue_duration_sum,
+                         int32_t queue_duration_max,
+                         int32_t queue_duration_sample,
                          const DeathDataPhaseSnapshot* prev);
 
   // Profiling phase at which completion this snapshot was taken.
@@ -346,26 +326,9 @@
 
   // Update stats for a task destruction (death) that had a Run() time of
   // |duration|, and has had a queueing delay of |queue_duration|.
-  void RecordDurations(const int32_t queue_duration,
-                       const int32_t run_duration,
-                       const uint32_t random_number);
-
-  // Update stats for a task destruction that performed |alloc_ops|
-  // allocations, |free_ops| frees, allocated |allocated_bytes| bytes, freed
-  // |freed_bytes|, where an estimated |alloc_overhead_bytes| went to heap
-  // overhead, and where at most |max_allocated_bytes| were outstanding at any
-  // one time.
-  // Note that |alloc_overhead_bytes|/|alloc_ops| yields the average estimated
-  // heap overhead of allocations in the task, and |allocated_bytes|/|alloc_ops|
-  // yields the average size of allocation.
-  // Note also that |allocated_bytes|-|freed_bytes| yields the net heap memory
-  // usage of the task, which can be negative.
-  void RecordAllocations(const uint32_t alloc_ops,
-                         const uint32_t free_ops,
-                         const uint32_t allocated_bytes,
-                         const uint32_t freed_bytes,
-                         const uint32_t alloc_overhead_bytes,
-                         const uint32_t max_allocated_bytes);
+  void RecordDeath(const int32_t queue_duration,
+                   const int32_t run_duration,
+                   const uint32_t random_number);
 
   // Metrics and past snapshots accessors, used only for serialization and in
   // tests.
@@ -388,22 +351,6 @@
   int32_t queue_duration_sample() const {
     return base::subtle::NoBarrier_Load(&queue_duration_sample_);
   }
-  int32_t alloc_ops() const {
-    return base::subtle::NoBarrier_Load(&alloc_ops_);
-  }
-  int32_t free_ops() const { return base::subtle::NoBarrier_Load(&free_ops_); }
-  int32_t allocated_bytes() const {
-    return base::subtle::NoBarrier_Load(&allocated_bytes_);
-  }
-  int32_t freed_bytes() const {
-    return base::subtle::NoBarrier_Load(&freed_bytes_);
-  }
-  int32_t alloc_overhead_bytes() const {
-    return base::subtle::NoBarrier_Load(&alloc_overhead_bytes_);
-  }
-  int32_t max_allocated_bytes() const {
-    return base::subtle::NoBarrier_Load(&max_allocated_bytes_);
-  }
   const DeathDataPhaseSnapshot* last_phase_snapshot() const {
     return last_phase_snapshot_;
   }
@@ -414,12 +361,6 @@
   void OnProfilingPhaseCompleted(int profiling_phase);
 
  private:
-  // A saturating addition operation for member variables. This elides the
-  // use of atomic-primitive reads for members that are only written on the
-  // owning thread.
-  static void SaturatingMemberAdd(const uint32_t addend,
-                                  base::subtle::Atomic32* sum);
-
   // Members are ordered from most regularly read and updated, to least
   // frequently used.  This might help a bit with cache lines.
   // Number of runs seen (divisor for calculating averages).
@@ -442,24 +383,6 @@
   // snapshot thread.
   base::subtle::Atomic32 run_duration_max_;
   base::subtle::Atomic32 queue_duration_max_;
-
-  // The cumulative number of allocation and free operations.
-  base::subtle::Atomic32 alloc_ops_;
-  base::subtle::Atomic32 free_ops_;
-
-  // The number of bytes allocated by the task.
-  base::subtle::Atomic32 allocated_bytes_;
-
-  // The number of bytes freed by the task.
-  base::subtle::Atomic32 freed_bytes_;
-
-  // The cumulative number of overhead bytes. Where available this yields an
-  // estimate of the heap overhead for allocations.
-  base::subtle::Atomic32 alloc_overhead_bytes_;
-
-  // The high-watermark for the number of outstanding heap allocated bytes.
-  base::subtle::Atomic32 max_allocated_bytes_;
-
   // Samples, used by crowd sourcing gatherers.  These are almost never read,
   // and rarely updated.  They can be modified only on the death thread.
   base::subtle::Atomic32 run_duration_sample_;
@@ -484,14 +407,14 @@
   TaskSnapshot();
   TaskSnapshot(const BirthOnThreadSnapshot& birth,
                const DeathDataSnapshot& death_data,
-               const std::string& death_sanitized_thread_name);
+               const std::string& death_thread_name);
   ~TaskSnapshot();
 
   BirthOnThreadSnapshot birth;
   // Delta between death data for a thread for a certain profiling phase and the
   // snapshot for the pervious phase, if any.  Otherwise, just a snapshot.
   DeathDataSnapshot death_data;
-  std::string death_sanitized_thread_name;
+  std::string death_thread_name;
 };
 
 //------------------------------------------------------------------------------
@@ -527,8 +450,9 @@
 
   // Initialize the current thread context with a new instance of ThreadData.
   // This is used by all threads that have names, and should be explicitly
-  // set *before* any births on the threads have taken place.
-  static void InitializeThreadContext(const std::string& thread_name);
+  // set *before* any births on the threads have taken place.  It is generally
+  // only used by the message loop, which has a well defined thread name.
+  static void InitializeThreadContext(const std::string& suggested_name);
 
   // Using Thread Local Store, find the current instance for collecting data.
   // If an instance does not exist, construct one (and remember it for use on
@@ -586,9 +510,7 @@
   static void TallyRunInAScopedRegionIfTracking(const Births* births,
                                                 const TaskStopwatch& stopwatch);
 
-  const std::string& sanitized_thread_name() const {
-    return sanitized_thread_name_;
-  }
+  const std::string& thread_name() const { return thread_name_; }
 
   // Initializes all statics if needed (this initialization call should be made
   // while we are single threaded).
@@ -637,7 +559,12 @@
   typedef std::vector<std::pair<const Births*, DeathDataPhaseSnapshot>>
       DeathsSnapshot;
 
-  explicit ThreadData(const std::string& sanitized_thread_name);
+  // Worker thread construction creates a name since there is none.
+  explicit ThreadData(int thread_number);
+
+  // Message loop based construction should provide a name.
+  explicit ThreadData(const std::string& suggested_name);
+
   ~ThreadData();
 
   // Push this instance to the head of all_thread_data_list_head_, linking it to
@@ -701,12 +628,6 @@
   // ThreadData instances.
   static void ShutdownSingleThreadedCleanup(bool leak);
 
-  // Returns a ThreadData instance for a thread whose sanitized name is
-  // |sanitized_thread_name|. The returned instance may have been extracted from
-  // the list of retired ThreadData instances or newly allocated.
-  static ThreadData* GetRetiredOrCreateThreadData(
-      const std::string& sanitized_thread_name);
-
   // When non-null, this specifies an external function that supplies monotone
   // increasing time functcion.
   static NowFunction* now_function_for_testing_;
@@ -714,16 +635,22 @@
   // We use thread local store to identify which ThreadData to interact with.
   static base::ThreadLocalStorage::StaticSlot tls_index_;
 
-  // Linked list of ThreadData instances that were associated with threads that
-  // have been terminated and that have not been associated with a new thread
-  // since then. This is only accessed while |list_lock_| is held.
-  static ThreadData* first_retired_thread_data_;
+  // List of ThreadData instances for use with worker threads.  When a worker
+  // thread is done (terminated), we push it onto this list.  When a new worker
+  // thread is created, we first try to re-use a ThreadData instance from the
+  // list, and if none are available, construct a new one.
+  // This is only accessed while list_lock_ is held.
+  static ThreadData* first_retired_worker_;
 
   // Link to the most recently created instance (starts a null terminated list).
   // The list is traversed by about:profiler when it needs to snapshot data.
   // This is only accessed while list_lock_ is held.
   static ThreadData* all_thread_data_list_head_;
 
+  // The next available worker thread number.  This should only be accessed when
+  // the list_lock_ is held.
+  static int worker_thread_data_creation_count_;
+
   // The number of times TLS has called us back to cleanup a ThreadData
   // instance.  This is only accessed while list_lock_ is held.
   static int cleanup_count_;
@@ -744,16 +671,23 @@
 
   // Link to next instance (null terminated list).  Used to globally track all
   // registered instances (corresponds to all registered threads where we keep
-  // data). Only modified in the constructor.
+  // data).
   ThreadData* next_;
 
-  // Pointer to another retired ThreadData instance. This value is nullptr if
-  // this is associated with an active thread.
-  ThreadData* next_retired_thread_data_;
+  // Pointer to another ThreadData instance for a Worker-Thread that has been
+  // retired (its thread was terminated).  This value is non-NULL only for a
+  // retired ThreadData associated with a Worker-Thread.
+  ThreadData* next_retired_worker_;
 
-  // The name of the thread that is being recorded, with all trailing digits
-  // replaced with a single "*" character.
-  const std::string sanitized_thread_name_;
+  // The name of the thread that is being recorded.  If this thread has no
+  // message_loop, then this is a worker thread, with a sequence number postfix.
+  std::string thread_name_;
+
+  // Indicate if this is a worker thread, and the ThreadData contexts should be
+  // stored in the unregistered_thread_data_pool_ when not in use.
+  // Value is zero when it is not a worker thread.  Value is a positive integer
+  // corresponding to the created thread name if it is a worker thread.
+  int worker_thread_number_;
 
   // A map used on each thread to keep track of Births on this thread.
   // This map should only be accessed on the thread it was constructed on.
@@ -821,13 +755,6 @@
   // this thread during that period.
   int32_t RunDurationMs() const;
 
-#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
-  const base::debug::ThreadHeapUsageTracker& heap_usage() const {
-    return heap_usage_;
-  }
-  bool heap_tracking_enabled() const { return heap_tracking_enabled_; }
-#endif
-
   // Returns tracking info for the current thread.
   ThreadData* GetThreadData() const;
 
@@ -835,11 +762,6 @@
   // Time when the stopwatch was started.
   TrackedTime start_time_;
 
-#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
-  base::debug::ThreadHeapUsageTracker heap_usage_;
-  bool heap_tracking_enabled_;
-#endif
-
   // Wallclock duration of the task.
   int32_t wallclock_duration_ms_;
 
diff --git a/base/tracked_objects_unittest.cc b/base/tracked_objects_unittest.cc
index f208e3c..70d9601 100644
--- a/base/tracked_objects_unittest.cc
+++ b/base/tracked_objects_unittest.cc
@@ -11,27 +11,17 @@
 
 #include <memory>
 
-#include "base/macros.h"
 #include "base/process/process_handle.h"
-#include "base/strings/stringprintf.h"
-#include "base/threading/thread.h"
 #include "base/time/time.h"
 #include "base/tracking_info.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 const int kLineNumber = 1776;
 const char kFile[] = "FixedUnitTestFileName";
-const char kWorkerThreadName[] = "WorkerThread-*";
+const char kWorkerThreadName[] = "WorkerThread-1";
 const char kMainThreadName[] = "SomeMainThreadName";
 const char kStillAlive[] = "Still_Alive";
 
-const int32_t kAllocOps = 23;
-const int32_t kFreeOps = 27;
-const int32_t kAllocatedBytes = 59934;
-const int32_t kFreedBytes = 2 * kAllocatedBytes;
-const int32_t kAllocOverheadBytes = kAllocOps * 8;
-const int32_t kMaxAllocatedBytes = kAllocatedBytes / 2;
-
 namespace tracked_objects {
 
 class TrackedObjectsTest : public testing::Test {
@@ -95,8 +85,7 @@
     EXPECT_EQ(kLineNumber,
               process_data_phase.tasks[0].birth.location.line_number);
 
-    EXPECT_EQ(birth_thread,
-              process_data_phase.tasks[0].birth.sanitized_thread_name);
+    EXPECT_EQ(birth_thread, process_data_phase.tasks[0].birth.thread_name);
 
     EXPECT_EQ(count, process_data_phase.tasks[0].death_data.count);
     EXPECT_EQ(count * run_ms,
@@ -111,8 +100,7 @@
     EXPECT_EQ(queue_ms,
               process_data_phase.tasks[0].death_data.queue_duration_sample);
 
-    EXPECT_EQ(death_thread,
-              process_data_phase.tasks[0].death_sanitized_thread_name);
+    EXPECT_EQ(death_thread, process_data_phase.tasks[0].death_thread_name);
 
     EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
   }
@@ -120,16 +108,6 @@
   // Sets time that will be returned by ThreadData::Now().
   static void SetTestTime(unsigned int test_time) { test_time_ = test_time; }
 
-  int GetNumThreadData() {
-    int num_thread_data = 0;
-    ThreadData* current = ThreadData::first();
-    while (current) {
-      ++num_thread_data;
-      current = current->next();
-    }
-    return num_thread_data;
-  }
-
  private:
   // Returns test time in milliseconds.
   static unsigned int GetTestTime() { return test_time_; }
@@ -245,8 +223,7 @@
             process_data_phase.tasks[0].birth.location.function_name);
   EXPECT_EQ(kLineNumber,
             process_data_phase.tasks[0].birth.location.line_number);
-  EXPECT_EQ(kWorkerThreadName,
-            process_data_phase.tasks[0].birth.sanitized_thread_name);
+  EXPECT_EQ(kWorkerThreadName, process_data_phase.tasks[0].birth.thread_name);
   EXPECT_EQ(1, process_data_phase.tasks[0].death_data.count);
   EXPECT_EQ(time_elapsed,
             process_data_phase.tasks[0].death_data.run_duration_sum);
@@ -257,11 +234,10 @@
   EXPECT_EQ(0, process_data_phase.tasks[0].death_data.queue_duration_sum);
   EXPECT_EQ(0, process_data_phase.tasks[0].death_data.queue_duration_max);
   EXPECT_EQ(0, process_data_phase.tasks[0].death_data.queue_duration_sample);
-  EXPECT_EQ(kWorkerThreadName,
-            process_data_phase.tasks[0].death_sanitized_thread_name);
+  EXPECT_EQ(kWorkerThreadName, process_data_phase.tasks[0].death_thread_name);
 }
 
-TEST_F(TrackedObjectsTest, DeathDataTestRecordDurations) {
+TEST_F(TrackedObjectsTest, DeathDataTestRecordDeath) {
   ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
 
   std::unique_ptr<DeathData> data(new DeathData());
@@ -279,7 +255,7 @@
   int32_t queue_ms = 8;
 
   const int kUnrandomInt = 0;  // Fake random int that ensure we sample data.
-  data->RecordDurations(queue_ms, run_ms, kUnrandomInt);
+  data->RecordDeath(queue_ms, run_ms, kUnrandomInt);
   EXPECT_EQ(data->run_duration_sum(), run_ms);
   EXPECT_EQ(data->run_duration_max(), run_ms);
   EXPECT_EQ(data->run_duration_sample(), run_ms);
@@ -289,7 +265,7 @@
   EXPECT_EQ(data->count(), 1);
   EXPECT_EQ(nullptr, data->last_phase_snapshot());
 
-  data->RecordDurations(queue_ms, run_ms, kUnrandomInt);
+  data->RecordDeath(queue_ms, run_ms, kUnrandomInt);
   EXPECT_EQ(data->run_duration_sum(), run_ms + run_ms);
   EXPECT_EQ(data->run_duration_max(), run_ms);
   EXPECT_EQ(data->run_duration_sample(), run_ms);
@@ -300,77 +276,18 @@
   EXPECT_EQ(nullptr, data->last_phase_snapshot());
 }
 
-TEST_F(TrackedObjectsTest, DeathDataTestRecordAllocations) {
-  ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
-
-  std::unique_ptr<DeathData> data(new DeathData());
-  ASSERT_NE(data, nullptr);
-
-  EXPECT_EQ(data->alloc_ops(), 0);
-  EXPECT_EQ(data->free_ops(), 0);
-  EXPECT_EQ(data->allocated_bytes(), 0);
-  EXPECT_EQ(data->freed_bytes(), 0);
-  EXPECT_EQ(data->alloc_overhead_bytes(), 0);
-  EXPECT_EQ(data->max_allocated_bytes(), 0);
-
-  EXPECT_EQ(nullptr, data->last_phase_snapshot());
-
-  data->RecordAllocations(kAllocOps, kFreeOps, kAllocatedBytes, kFreedBytes,
-                          kAllocOverheadBytes, kMaxAllocatedBytes);
-  EXPECT_EQ(data->alloc_ops(), kAllocOps);
-  EXPECT_EQ(data->free_ops(), kFreeOps);
-  EXPECT_EQ(data->allocated_bytes(), kAllocatedBytes);
-  EXPECT_EQ(data->freed_bytes(), kFreedBytes);
-  EXPECT_EQ(data->alloc_overhead_bytes(), kAllocOverheadBytes);
-  EXPECT_EQ(data->max_allocated_bytes(), kMaxAllocatedBytes);
-
-  // Record another batch, with a smaller max.
-  const int32_t kSmallerMaxAllocatedBytes = kMaxAllocatedBytes / 2;
-  data->RecordAllocations(kAllocOps, kFreeOps, kAllocatedBytes, kFreedBytes,
-                          kAllocOverheadBytes, kSmallerMaxAllocatedBytes);
-  EXPECT_EQ(data->alloc_ops(), 2 * kAllocOps);
-  EXPECT_EQ(data->free_ops(), 2 * kFreeOps);
-  EXPECT_EQ(data->allocated_bytes(), 2 * kAllocatedBytes);
-  EXPECT_EQ(data->freed_bytes(), 2 * kFreedBytes);
-  EXPECT_EQ(data->alloc_overhead_bytes(), 2 * kAllocOverheadBytes);
-  EXPECT_EQ(data->max_allocated_bytes(), kMaxAllocatedBytes);
-
-  // Now with a larger max.
-  const int32_t kLargerMaxAllocatedBytes = kMaxAllocatedBytes * 2;
-  data->RecordAllocations(kAllocOps, kFreeOps, kAllocatedBytes, kFreedBytes,
-                          kAllocOverheadBytes, kLargerMaxAllocatedBytes);
-  EXPECT_EQ(data->alloc_ops(), 3 * kAllocOps);
-  EXPECT_EQ(data->free_ops(), 3 * kFreeOps);
-  EXPECT_EQ(data->allocated_bytes(), 3 * kAllocatedBytes);
-  EXPECT_EQ(data->freed_bytes(), 3 * kFreedBytes);
-  EXPECT_EQ(data->alloc_overhead_bytes(), 3 * kAllocOverheadBytes);
-  EXPECT_EQ(data->max_allocated_bytes(), kLargerMaxAllocatedBytes);
-
-  // Saturate everything.
-  data->RecordAllocations(INT_MAX, INT_MAX, INT_MAX, INT_MAX, INT_MAX, INT_MAX);
-  EXPECT_EQ(data->alloc_ops(), INT_MAX);
-  EXPECT_EQ(data->free_ops(), INT_MAX);
-  EXPECT_EQ(data->allocated_bytes(), INT_MAX);
-  EXPECT_EQ(data->freed_bytes(), INT_MAX);
-  EXPECT_EQ(data->alloc_overhead_bytes(), INT_MAX);
-  EXPECT_EQ(data->max_allocated_bytes(), INT_MAX);
-}
-
 TEST_F(TrackedObjectsTest, DeathDataTest2Phases) {
   ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
 
   std::unique_ptr<DeathData> data(new DeathData());
   ASSERT_NE(data, nullptr);
 
-  const int32_t run_ms = 42;
-  const int32_t queue_ms = 8;
+  int32_t run_ms = 42;
+  int32_t queue_ms = 8;
 
   const int kUnrandomInt = 0;  // Fake random int that ensure we sample data.
-  data->RecordDurations(queue_ms, run_ms, kUnrandomInt);
-  data->RecordDurations(queue_ms, run_ms, kUnrandomInt);
-
-  data->RecordAllocations(kAllocOps, kFreeOps, kAllocatedBytes, kFreedBytes,
-                          kAllocOverheadBytes, kMaxAllocatedBytes);
+  data->RecordDeath(queue_ms, run_ms, kUnrandomInt);
+  data->RecordDeath(queue_ms, run_ms, kUnrandomInt);
 
   data->OnProfilingPhaseCompleted(123);
   EXPECT_EQ(data->run_duration_sum(), run_ms + run_ms);
@@ -380,14 +297,6 @@
   EXPECT_EQ(data->queue_duration_max(), 0);
   EXPECT_EQ(data->queue_duration_sample(), queue_ms);
   EXPECT_EQ(data->count(), 2);
-
-  EXPECT_EQ(data->alloc_ops(), kAllocOps);
-  EXPECT_EQ(data->free_ops(), kFreeOps);
-  EXPECT_EQ(data->allocated_bytes(), kAllocatedBytes);
-  EXPECT_EQ(data->freed_bytes(), kFreedBytes);
-  EXPECT_EQ(data->alloc_overhead_bytes(), kAllocOverheadBytes);
-  EXPECT_EQ(data->max_allocated_bytes(), kMaxAllocatedBytes);
-
   ASSERT_NE(nullptr, data->last_phase_snapshot());
   EXPECT_EQ(123, data->last_phase_snapshot()->profiling_phase);
   EXPECT_EQ(2, data->last_phase_snapshot()->death_data.count);
@@ -402,26 +311,12 @@
             data->last_phase_snapshot()->death_data.queue_duration_max);
   EXPECT_EQ(queue_ms,
             data->last_phase_snapshot()->death_data.queue_duration_sample);
-
-  EXPECT_EQ(kAllocOps, data->last_phase_snapshot()->death_data.alloc_ops);
-  EXPECT_EQ(kFreeOps, data->last_phase_snapshot()->death_data.free_ops);
-  EXPECT_EQ(kAllocatedBytes,
-            data->last_phase_snapshot()->death_data.allocated_bytes);
-  EXPECT_EQ(kFreedBytes, data->last_phase_snapshot()->death_data.freed_bytes);
-  EXPECT_EQ(kAllocOverheadBytes,
-            data->last_phase_snapshot()->death_data.alloc_overhead_bytes);
-  EXPECT_EQ(kMaxAllocatedBytes,
-            data->last_phase_snapshot()->death_data.max_allocated_bytes);
-
   EXPECT_EQ(nullptr, data->last_phase_snapshot()->prev);
 
-  const int32_t run_ms1 = 21;
-  const int32_t queue_ms1 = 4;
+  int32_t run_ms1 = 21;
+  int32_t queue_ms1 = 4;
 
-  data->RecordDurations(queue_ms1, run_ms1, kUnrandomInt);
-  data->RecordAllocations(kAllocOps, kFreeOps, kAllocatedBytes, kFreedBytes,
-                          kAllocOverheadBytes, kMaxAllocatedBytes);
-
+  data->RecordDeath(queue_ms1, run_ms1, kUnrandomInt);
   EXPECT_EQ(data->run_duration_sum(), run_ms + run_ms + run_ms1);
   EXPECT_EQ(data->run_duration_max(), run_ms1);
   EXPECT_EQ(data->run_duration_sample(), run_ms1);
@@ -429,14 +324,6 @@
   EXPECT_EQ(data->queue_duration_max(), queue_ms1);
   EXPECT_EQ(data->queue_duration_sample(), queue_ms1);
   EXPECT_EQ(data->count(), 3);
-
-  EXPECT_EQ(data->alloc_ops(), 2 * kAllocOps);
-  EXPECT_EQ(data->free_ops(), 2 * kFreeOps);
-  EXPECT_EQ(data->allocated_bytes(), 2 * kAllocatedBytes);
-  EXPECT_EQ(data->freed_bytes(), 2 * kFreedBytes);
-  EXPECT_EQ(data->alloc_overhead_bytes(), 2 * kAllocOverheadBytes);
-  EXPECT_EQ(data->max_allocated_bytes(), kMaxAllocatedBytes);
-
   ASSERT_NE(nullptr, data->last_phase_snapshot());
   EXPECT_EQ(123, data->last_phase_snapshot()->profiling_phase);
   EXPECT_EQ(2, data->last_phase_snapshot()->death_data.count);
@@ -451,17 +338,6 @@
             data->last_phase_snapshot()->death_data.queue_duration_max);
   EXPECT_EQ(queue_ms,
             data->last_phase_snapshot()->death_data.queue_duration_sample);
-
-  EXPECT_EQ(kAllocOps, data->last_phase_snapshot()->death_data.alloc_ops);
-  EXPECT_EQ(kFreeOps, data->last_phase_snapshot()->death_data.free_ops);
-  EXPECT_EQ(kAllocatedBytes,
-            data->last_phase_snapshot()->death_data.allocated_bytes);
-  EXPECT_EQ(kFreedBytes, data->last_phase_snapshot()->death_data.freed_bytes);
-  EXPECT_EQ(kAllocOverheadBytes,
-            data->last_phase_snapshot()->death_data.alloc_overhead_bytes);
-  EXPECT_EQ(kMaxAllocatedBytes,
-            data->last_phase_snapshot()->death_data.max_allocated_bytes);
-
   EXPECT_EQ(nullptr, data->last_phase_snapshot()->prev);
 }
 
@@ -477,13 +353,6 @@
   snapshot.queue_duration_max = 101;
   snapshot.queue_duration_sample = 26;
 
-  snapshot.alloc_ops = 95;
-  snapshot.free_ops = 90;
-  snapshot.allocated_bytes = 10240;
-  snapshot.freed_bytes = 4096;
-  snapshot.alloc_overhead_bytes = 950;
-  snapshot.max_allocated_bytes = 10240;
-
   DeathDataSnapshot older_snapshot;
   older_snapshot.count = 2;
   older_snapshot.run_duration_sum = 95;
@@ -493,13 +362,6 @@
   older_snapshot.queue_duration_max = 99;
   older_snapshot.queue_duration_sample = 21;
 
-  older_snapshot.alloc_ops = 45;
-  older_snapshot.free_ops = 40;
-  older_snapshot.allocated_bytes = 4096;
-  older_snapshot.freed_bytes = 2048;
-  older_snapshot.alloc_overhead_bytes = 450;
-  older_snapshot.max_allocated_bytes = 10200;
-
   const DeathDataSnapshot& delta = snapshot.Delta(older_snapshot);
   EXPECT_EQ(8, delta.count);
   EXPECT_EQ(5, delta.run_duration_sum);
@@ -508,13 +370,6 @@
   EXPECT_EQ(10, delta.queue_duration_sum);
   EXPECT_EQ(101, delta.queue_duration_max);
   EXPECT_EQ(26, delta.queue_duration_sample);
-
-  EXPECT_EQ(50, delta.alloc_ops);
-  EXPECT_EQ(50, delta.free_ops);
-  EXPECT_EQ(6144, delta.allocated_bytes);
-  EXPECT_EQ(2048, delta.freed_bytes);
-  EXPECT_EQ(500, delta.alloc_overhead_bytes);
-  EXPECT_EQ(10240, delta.max_allocated_bytes);
 }
 
 TEST_F(TrackedObjectsTest, DeactivatedBirthOnlyToSnapshotWorkerThread) {
@@ -676,8 +531,7 @@
   EXPECT_EQ(kLineNumber,
             process_data_phase0.tasks[0].birth.location.line_number);
 
-  EXPECT_EQ(kMainThreadName,
-            process_data_phase0.tasks[0].birth.sanitized_thread_name);
+  EXPECT_EQ(kMainThreadName, process_data_phase0.tasks[0].birth.thread_name);
 
   EXPECT_EQ(1, process_data_phase0.tasks[0].death_data.count);
   EXPECT_EQ(2, process_data_phase0.tasks[0].death_data.run_duration_sum);
@@ -687,8 +541,7 @@
   EXPECT_EQ(4, process_data_phase0.tasks[0].death_data.queue_duration_max);
   EXPECT_EQ(4, process_data_phase0.tasks[0].death_data.queue_duration_sample);
 
-  EXPECT_EQ(kMainThreadName,
-            process_data_phase0.tasks[0].death_sanitized_thread_name);
+  EXPECT_EQ(kMainThreadName, process_data_phase0.tasks[0].death_thread_name);
 
   auto it1 = process_data.phased_snapshots.find(1);
   ASSERT_TRUE(it1 != process_data.phased_snapshots.end());
@@ -702,8 +555,7 @@
   EXPECT_EQ(kLineNumber,
             process_data_phase1.tasks[0].birth.location.line_number);
 
-  EXPECT_EQ(kMainThreadName,
-            process_data_phase1.tasks[0].birth.sanitized_thread_name);
+  EXPECT_EQ(kMainThreadName, process_data_phase1.tasks[0].birth.thread_name);
 
   EXPECT_EQ(1, process_data_phase1.tasks[0].death_data.count);
   EXPECT_EQ(10, process_data_phase1.tasks[0].death_data.run_duration_sum);
@@ -713,8 +565,7 @@
   EXPECT_EQ(2, process_data_phase1.tasks[0].death_data.queue_duration_max);
   EXPECT_EQ(2, process_data_phase1.tasks[0].death_data.queue_duration_sample);
 
-  EXPECT_EQ(kMainThreadName,
-            process_data_phase1.tasks[0].death_sanitized_thread_name);
+  EXPECT_EQ(kMainThreadName, process_data_phase1.tasks[0].death_thread_name);
 
   EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
 }
@@ -797,8 +648,7 @@
   EXPECT_EQ(kLineNumber,
             process_data_phase0.tasks[0].birth.location.line_number);
 
-  EXPECT_EQ(kMainThreadName,
-            process_data_phase0.tasks[0].birth.sanitized_thread_name);
+  EXPECT_EQ(kMainThreadName, process_data_phase0.tasks[0].birth.thread_name);
 
   EXPECT_EQ(1, process_data_phase0.tasks[0].death_data.count);
   EXPECT_EQ(6, process_data_phase0.tasks[0].death_data.run_duration_sum);
@@ -808,8 +658,7 @@
   EXPECT_EQ(7, process_data_phase0.tasks[0].death_data.queue_duration_max);
   EXPECT_EQ(7, process_data_phase0.tasks[0].death_data.queue_duration_sample);
 
-  EXPECT_EQ(kMainThreadName,
-            process_data_phase0.tasks[0].death_sanitized_thread_name);
+  EXPECT_EQ(kMainThreadName, process_data_phase0.tasks[0].death_thread_name);
 
   auto it1 = process_data.phased_snapshots.find(1);
   ASSERT_TRUE(it1 != process_data.phased_snapshots.end());
@@ -823,8 +672,7 @@
   EXPECT_EQ(kLineNumber,
             process_data_phase1.tasks[0].birth.location.line_number);
 
-  EXPECT_EQ(kMainThreadName,
-            process_data_phase1.tasks[0].birth.sanitized_thread_name);
+  EXPECT_EQ(kMainThreadName, process_data_phase1.tasks[0].birth.thread_name);
 
   EXPECT_EQ(1, process_data_phase1.tasks[0].death_data.count);
   EXPECT_EQ(4, process_data_phase1.tasks[0].death_data.run_duration_sum);
@@ -834,8 +682,7 @@
   EXPECT_EQ(5, process_data_phase1.tasks[0].death_data.queue_duration_max);
   EXPECT_EQ(5, process_data_phase1.tasks[0].death_data.queue_duration_sample);
 
-  EXPECT_EQ(kMainThreadName,
-            process_data_phase1.tasks[0].death_sanitized_thread_name);
+  EXPECT_EQ(kMainThreadName, process_data_phase1.tasks[0].death_thread_name);
 
   auto it2 = process_data.phased_snapshots.find(2);
   ASSERT_TRUE(it2 != process_data.phased_snapshots.end());
@@ -849,8 +696,7 @@
   EXPECT_EQ(kLineNumber,
             process_data_phase2.tasks[0].birth.location.line_number);
 
-  EXPECT_EQ(kMainThreadName,
-            process_data_phase2.tasks[0].birth.sanitized_thread_name);
+  EXPECT_EQ(kMainThreadName, process_data_phase2.tasks[0].birth.thread_name);
 
   EXPECT_EQ(1, process_data_phase2.tasks[0].death_data.count);
   EXPECT_EQ(2, process_data_phase2.tasks[0].death_data.run_duration_sum);
@@ -860,8 +706,7 @@
   EXPECT_EQ(3, process_data_phase2.tasks[0].death_data.queue_duration_max);
   EXPECT_EQ(3, process_data_phase2.tasks[0].death_data.queue_duration_sample);
 
-  EXPECT_EQ(kMainThreadName,
-            process_data_phase2.tasks[0].death_sanitized_thread_name);
+  EXPECT_EQ(kMainThreadName, process_data_phase2.tasks[0].death_thread_name);
 
   EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
 }
@@ -908,8 +753,7 @@
   EXPECT_EQ(kLineNumber,
             process_data_phase0.tasks[0].birth.location.line_number);
 
-  EXPECT_EQ(kMainThreadName,
-            process_data_phase0.tasks[0].birth.sanitized_thread_name);
+  EXPECT_EQ(kMainThreadName, process_data_phase0.tasks[0].birth.thread_name);
 
   EXPECT_EQ(1, process_data_phase0.tasks[0].death_data.count);
   EXPECT_EQ(2, process_data_phase0.tasks[0].death_data.run_duration_sum);
@@ -919,8 +763,7 @@
   EXPECT_EQ(4, process_data_phase0.tasks[0].death_data.queue_duration_max);
   EXPECT_EQ(4, process_data_phase0.tasks[0].death_data.queue_duration_sample);
 
-  EXPECT_EQ(kMainThreadName,
-            process_data_phase0.tasks[0].death_sanitized_thread_name);
+  EXPECT_EQ(kMainThreadName, process_data_phase0.tasks[0].death_thread_name);
 
   auto it1 = process_data.phased_snapshots.find(1);
   ASSERT_TRUE(it1 != process_data.phased_snapshots.end());
@@ -973,8 +816,7 @@
   EXPECT_EQ(kLineNumber,
             process_data_phase1.tasks[0].birth.location.line_number);
 
-  EXPECT_EQ(kMainThreadName,
-            process_data_phase1.tasks[0].birth.sanitized_thread_name);
+  EXPECT_EQ(kMainThreadName, process_data_phase1.tasks[0].birth.thread_name);
 
   EXPECT_EQ(1, process_data_phase1.tasks[0].death_data.count);
   EXPECT_EQ(2, process_data_phase1.tasks[0].death_data.run_duration_sum);
@@ -984,8 +826,7 @@
   EXPECT_EQ(4, process_data_phase1.tasks[0].death_data.queue_duration_max);
   EXPECT_EQ(4, process_data_phase1.tasks[0].death_data.queue_duration_sample);
 
-  EXPECT_EQ(kMainThreadName,
-            process_data_phase1.tasks[0].death_sanitized_thread_name);
+  EXPECT_EQ(kMainThreadName, process_data_phase1.tasks[0].death_thread_name);
 
   EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
 }
@@ -1152,8 +993,7 @@
             process_data_phase.tasks[0].birth.location.function_name);
   EXPECT_EQ(kLineNumber,
             process_data_phase.tasks[0].birth.location.line_number);
-  EXPECT_EQ(kMainThreadName,
-            process_data_phase.tasks[0].birth.sanitized_thread_name);
+  EXPECT_EQ(kMainThreadName, process_data_phase.tasks[0].birth.thread_name);
   EXPECT_EQ(1, process_data_phase.tasks[0].death_data.count);
   EXPECT_EQ(2, process_data_phase.tasks[0].death_data.run_duration_sum);
   EXPECT_EQ(2, process_data_phase.tasks[0].death_data.run_duration_max);
@@ -1161,15 +1001,13 @@
   EXPECT_EQ(4, process_data_phase.tasks[0].death_data.queue_duration_sum);
   EXPECT_EQ(4, process_data_phase.tasks[0].death_data.queue_duration_max);
   EXPECT_EQ(4, process_data_phase.tasks[0].death_data.queue_duration_sample);
-  EXPECT_EQ(kMainThreadName,
-            process_data_phase.tasks[0].death_sanitized_thread_name);
+  EXPECT_EQ(kMainThreadName, process_data_phase.tasks[0].death_thread_name);
   EXPECT_EQ(kFile, process_data_phase.tasks[1].birth.location.file_name);
   EXPECT_EQ(kFunction,
             process_data_phase.tasks[1].birth.location.function_name);
   EXPECT_EQ(kSecondFakeLineNumber,
             process_data_phase.tasks[1].birth.location.line_number);
-  EXPECT_EQ(kMainThreadName,
-            process_data_phase.tasks[1].birth.sanitized_thread_name);
+  EXPECT_EQ(kMainThreadName, process_data_phase.tasks[1].birth.thread_name);
   EXPECT_EQ(1, process_data_phase.tasks[1].death_data.count);
   EXPECT_EQ(0, process_data_phase.tasks[1].death_data.run_duration_sum);
   EXPECT_EQ(0, process_data_phase.tasks[1].death_data.run_duration_max);
@@ -1177,8 +1015,7 @@
   EXPECT_EQ(0, process_data_phase.tasks[1].death_data.queue_duration_sum);
   EXPECT_EQ(0, process_data_phase.tasks[1].death_data.queue_duration_max);
   EXPECT_EQ(0, process_data_phase.tasks[1].death_data.queue_duration_sample);
-  EXPECT_EQ(kStillAlive,
-            process_data_phase.tasks[1].death_sanitized_thread_name);
+  EXPECT_EQ(kStillAlive, process_data_phase.tasks[1].death_thread_name);
   EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
 }
 
@@ -1321,8 +1158,7 @@
             process_data_phase.tasks[t0].birth.location.function_name);
   EXPECT_EQ(kLineNumber,
             process_data_phase.tasks[t0].birth.location.line_number);
-  EXPECT_EQ(kMainThreadName,
-            process_data_phase.tasks[t0].birth.sanitized_thread_name);
+  EXPECT_EQ(kMainThreadName, process_data_phase.tasks[t0].birth.thread_name);
   EXPECT_EQ(1, process_data_phase.tasks[t0].death_data.count);
   EXPECT_EQ(6, process_data_phase.tasks[t0].death_data.run_duration_sum);
   EXPECT_EQ(6, process_data_phase.tasks[t0].death_data.run_duration_max);
@@ -1330,15 +1166,13 @@
   EXPECT_EQ(4, process_data_phase.tasks[t0].death_data.queue_duration_sum);
   EXPECT_EQ(4, process_data_phase.tasks[t0].death_data.queue_duration_max);
   EXPECT_EQ(4, process_data_phase.tasks[t0].death_data.queue_duration_sample);
-  EXPECT_EQ(kMainThreadName,
-            process_data_phase.tasks[t0].death_sanitized_thread_name);
+  EXPECT_EQ(kMainThreadName, process_data_phase.tasks[t0].death_thread_name);
   EXPECT_EQ(kFile, process_data_phase.tasks[t1].birth.location.file_name);
   EXPECT_EQ(kFunction,
             process_data_phase.tasks[t1].birth.location.function_name);
   EXPECT_EQ(kSecondFakeLineNumber,
             process_data_phase.tasks[t1].birth.location.line_number);
-  EXPECT_EQ(kMainThreadName,
-            process_data_phase.tasks[t1].birth.sanitized_thread_name);
+  EXPECT_EQ(kMainThreadName, process_data_phase.tasks[t1].birth.thread_name);
   EXPECT_EQ(1, process_data_phase.tasks[t1].death_data.count);
   EXPECT_EQ(2, process_data_phase.tasks[t1].death_data.run_duration_sum);
   EXPECT_EQ(2, process_data_phase.tasks[t1].death_data.run_duration_max);
@@ -1346,30 +1180,8 @@
   EXPECT_EQ(1, process_data_phase.tasks[t1].death_data.queue_duration_sum);
   EXPECT_EQ(1, process_data_phase.tasks[t1].death_data.queue_duration_max);
   EXPECT_EQ(1, process_data_phase.tasks[t1].death_data.queue_duration_sample);
-  EXPECT_EQ(kMainThreadName,
-            process_data_phase.tasks[t1].death_sanitized_thread_name);
+  EXPECT_EQ(kMainThreadName, process_data_phase.tasks[t1].death_thread_name);
   EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
 }
 
-// Repetitively create and stop named threads. Verify that the number of
-// instantiated ThreadData instance is equal to the number of different
-// sanitized thread names used in the test.
-TEST_F(TrackedObjectsTest, ReuseRetiredThreadData) {
-  const char* const kThreadNames[] = {"Foo%d", "Bar%d", "123Dummy%d",
-                                      "456Dummy%d", "%d"};
-  constexpr int kNumIterations = 10;
-  EXPECT_EQ(0, GetNumThreadData());
-
-  for (int i = 0; i < kNumIterations; ++i) {
-    for (const char* thread_name : kThreadNames) {
-      base::Thread thread(base::StringPrintf(thread_name, i));
-      EXPECT_TRUE(thread.Start());
-    }
-  }
-
-  // Expect one ThreadData instance for each element in |kThreadNames| and one
-  // ThreadData instance for the main thread.
-  EXPECT_EQ(static_cast<int>(arraysize(kThreadNames) + 1), GetNumThreadData());
-}
-
 }  // namespace tracked_objects
diff --git a/base/tuple.h b/base/tuple.h
index 34fd789..e82f2e5 100644
--- a/base/tuple.h
+++ b/base/tuple.h
@@ -28,6 +28,7 @@
 #include <stddef.h>
 #include <tuple>
 
+#include "base/bind_helpers.h"
 #include "build/build_config.h"
 
 namespace base {
@@ -42,6 +43,56 @@
 template <size_t... Ns>
 struct MakeIndexSequenceImpl;
 
+#if defined(_PREFAST_) && defined(OS_WIN)
+
+// Work around VC++ 2013 /analyze internal compiler error:
+// https://connect.microsoft.com/VisualStudio/feedback/details/1053626
+
+template <> struct MakeIndexSequenceImpl<0> {
+  using Type = IndexSequence<>;
+};
+template <> struct MakeIndexSequenceImpl<1> {
+  using Type = IndexSequence<0>;
+};
+template <> struct MakeIndexSequenceImpl<2> {
+  using Type = IndexSequence<0,1>;
+};
+template <> struct MakeIndexSequenceImpl<3> {
+  using Type = IndexSequence<0,1,2>;
+};
+template <> struct MakeIndexSequenceImpl<4> {
+  using Type = IndexSequence<0,1,2,3>;
+};
+template <> struct MakeIndexSequenceImpl<5> {
+  using Type = IndexSequence<0,1,2,3,4>;
+};
+template <> struct MakeIndexSequenceImpl<6> {
+  using Type = IndexSequence<0,1,2,3,4,5>;
+};
+template <> struct MakeIndexSequenceImpl<7> {
+  using Type = IndexSequence<0,1,2,3,4,5,6>;
+};
+template <> struct MakeIndexSequenceImpl<8> {
+  using Type = IndexSequence<0,1,2,3,4,5,6,7>;
+};
+template <> struct MakeIndexSequenceImpl<9> {
+  using Type = IndexSequence<0,1,2,3,4,5,6,7,8>;
+};
+template <> struct MakeIndexSequenceImpl<10> {
+  using Type = IndexSequence<0,1,2,3,4,5,6,7,8,9>;
+};
+template <> struct MakeIndexSequenceImpl<11> {
+  using Type = IndexSequence<0,1,2,3,4,5,6,7,8,9,10>;
+};
+template <> struct MakeIndexSequenceImpl<12> {
+  using Type = IndexSequence<0,1,2,3,4,5,6,7,8,9,10,11>;
+};
+template <> struct MakeIndexSequenceImpl<13> {
+  using Type = IndexSequence<0,1,2,3,4,5,6,7,8,9,10,11,12>;
+};
+
+#else  // defined(OS_WIN) && defined(_PREFAST_)
+
 template <size_t... Ns>
 struct MakeIndexSequenceImpl<0, Ns...> {
   using Type = IndexSequence<Ns...>;
@@ -51,6 +102,8 @@
 struct MakeIndexSequenceImpl<N, Ns...>
     : MakeIndexSequenceImpl<N - 1, N - 1, Ns...> {};
 
+#endif  // defined(OS_WIN) && defined(_PREFAST_)
+
 // std::get() in <=libstdc++-4.6 returns an lvalue-reference for
 // rvalue-reference of a tuple, where an rvalue-reference is expected.
 template <size_t I, typename... Ts>
@@ -68,10 +121,6 @@
 template <size_t N>
 using MakeIndexSequence = typename MakeIndexSequenceImpl<N>::Type;
 
-template <typename T>
-using MakeIndexSequenceForTuple =
-    MakeIndexSequence<std::tuple_size<typename std::decay<T>::type>::value>;
-
 // Dispatchers ----------------------------------------------------------------
 //
 // Helper functions that call the given method on an object, with the unpacked
@@ -83,63 +132,62 @@
 
 // Non-Static Dispatchers with no out params.
 
-template <typename ObjT, typename Method, typename Tuple, size_t... Ns>
+template <typename ObjT, typename Method, typename... Ts, size_t... Ns>
 inline void DispatchToMethodImpl(const ObjT& obj,
                                  Method method,
-                                 Tuple&& args,
+                                 const std::tuple<Ts...>& arg,
                                  IndexSequence<Ns...>) {
-  (obj->*method)(base::get<Ns>(std::forward<Tuple>(args))...);
+  (obj->*method)(internal::Unwrap(std::get<Ns>(arg))...);
 }
 
-template <typename ObjT, typename Method, typename Tuple>
+template <typename ObjT, typename Method, typename... Ts>
 inline void DispatchToMethod(const ObjT& obj,
                              Method method,
-                             Tuple&& args) {
-  DispatchToMethodImpl(obj, method, std::forward<Tuple>(args),
-                       MakeIndexSequenceForTuple<Tuple>());
+                             const std::tuple<Ts...>& arg) {
+  DispatchToMethodImpl(obj, method, arg, MakeIndexSequence<sizeof...(Ts)>());
 }
 
 // Static Dispatchers with no out params.
 
-template <typename Function, typename Tuple, size_t... Ns>
+template <typename Function, typename... Ts, size_t... Ns>
 inline void DispatchToFunctionImpl(Function function,
-                                   Tuple&& args,
+                                   const std::tuple<Ts...>& arg,
                                    IndexSequence<Ns...>) {
-  (*function)(base::get<Ns>(std::forward<Tuple>(args))...);
+  (*function)(internal::Unwrap(std::get<Ns>(arg))...);
 }
 
-template <typename Function, typename Tuple>
-inline void DispatchToFunction(Function function, Tuple&& args) {
-  DispatchToFunctionImpl(function, std::forward<Tuple>(args),
-                         MakeIndexSequenceForTuple<Tuple>());
+template <typename Function, typename... Ts>
+inline void DispatchToFunction(Function function,
+                               const std::tuple<Ts...>& arg) {
+  DispatchToFunctionImpl(function, arg, MakeIndexSequence<sizeof...(Ts)>());
 }
 
 // Dispatchers with out parameters.
 
 template <typename ObjT,
           typename Method,
-          typename InTuple,
-          typename OutTuple,
+          typename... InTs,
+          typename... OutTs,
           size_t... InNs,
           size_t... OutNs>
 inline void DispatchToMethodImpl(const ObjT& obj,
                                  Method method,
-                                 InTuple&& in,
-                                 OutTuple* out,
+                                 const std::tuple<InTs...>& in,
+                                 std::tuple<OutTs...>* out,
                                  IndexSequence<InNs...>,
                                  IndexSequence<OutNs...>) {
-  (obj->*method)(base::get<InNs>(std::forward<InTuple>(in))...,
+  (obj->*method)(internal::Unwrap(std::get<InNs>(in))...,
                  &std::get<OutNs>(*out)...);
 }
 
-template <typename ObjT, typename Method, typename InTuple, typename OutTuple>
+template <typename ObjT, typename Method, typename... InTs, typename... OutTs>
 inline void DispatchToMethod(const ObjT& obj,
                              Method method,
-                             InTuple&& in,
-                             OutTuple* out) {
-  DispatchToMethodImpl(obj, method, std::forward<InTuple>(in), out,
-                       MakeIndexSequenceForTuple<InTuple>(),
-                       MakeIndexSequenceForTuple<OutTuple>());
+                             const std::tuple<InTs...>& in,
+                             std::tuple<OutTs...>* out) {
+  DispatchToMethodImpl(obj, method, in, out,
+                       MakeIndexSequence<sizeof...(InTs)>(),
+                       MakeIndexSequence<sizeof...(OutTs)>());
 }
 
 }  // namespace base
diff --git a/base/values.cc b/base/values.cc
index 5cc0d69..d579699 100644
--- a/base/values.cc
+++ b/base/values.cc
@@ -21,12 +21,6 @@
 
 namespace {
 
-const char* const kTypeNames[] = {"null",   "boolean", "integer",    "double",
-                                  "string", "binary",  "dictionary", "list"};
-static_assert(arraysize(kTypeNames) ==
-                  static_cast<size_t>(Value::Type::LIST) + 1,
-              "kTypeNames Has Wrong Size");
-
 std::unique_ptr<Value> CopyWithoutEmptyChildren(const Value& node);
 
 // Make a deep copy of |node|, but don't include empty lists or dictionaries
@@ -61,10 +55,10 @@
 
 std::unique_ptr<Value> CopyWithoutEmptyChildren(const Value& node) {
   switch (node.GetType()) {
-    case Value::Type::LIST:
+    case Value::TYPE_LIST:
       return CopyListWithoutEmptyChildren(static_cast<const ListValue&>(node));
 
-    case Value::Type::DICTIONARY:
+    case Value::TYPE_DICTIONARY:
       return CopyDictionaryWithoutEmptyChildren(
           static_cast<const DictionaryValue&>(node));
 
@@ -75,323 +69,63 @@
 
 }  // namespace
 
+Value::~Value() {
+}
+
 // static
 std::unique_ptr<Value> Value::CreateNullValue() {
-  return WrapUnique(new Value(Type::NONE));
+  return WrapUnique(new Value(TYPE_NULL));
 }
 
-// static
-std::unique_ptr<BinaryValue> BinaryValue::CreateWithCopiedBuffer(
-    const char* buffer,
-    size_t size) {
-  return MakeUnique<BinaryValue>(std::vector<char>(buffer, buffer + size));
+bool Value::GetAsBinary(const BinaryValue**) const {
+  return false;
 }
 
-Value::Value(const Value& that) {
-  InternalCopyConstructFrom(that);
+bool Value::GetAsBoolean(bool*) const {
+  return false;
 }
 
-Value::Value(Value&& that) {
-  InternalMoveConstructFrom(std::move(that));
+bool Value::GetAsInteger(int*) const {
+  return false;
 }
 
-Value::Value() : type_(Type::NONE) {}
-
-Value::Value(Type type) : type_(type) {
-  // Initialize with the default value.
-  switch (type_) {
-    case Type::NONE:
-      return;
-
-    case Type::BOOLEAN:
-      bool_value_ = false;
-      return;
-    case Type::INTEGER:
-      int_value_ = 0;
-      return;
-    case Type::DOUBLE:
-      double_value_ = 0.0;
-      return;
-    case Type::STRING:
-      string_value_.Init();
-      return;
-    case Type::BINARY:
-      binary_value_.Init();
-      return;
-    case Type::DICTIONARY:
-      dict_ptr_.Init(MakeUnique<DictStorage>());
-      return;
-    case Type::LIST:
-      list_.Init();
-      return;
-  }
+bool Value::GetAsDouble(double*) const {
+  return false;
 }
 
-Value::Value(bool in_bool) : type_(Type::BOOLEAN), bool_value_(in_bool) {}
-
-Value::Value(int in_int) : type_(Type::INTEGER), int_value_(in_int) {}
-
-Value::Value(double in_double) : type_(Type::DOUBLE), double_value_(in_double) {
-  if (!std::isfinite(double_value_)) {
-    NOTREACHED() << "Non-finite (i.e. NaN or positive/negative infinity) "
-                 << "values cannot be represented in JSON";
-    double_value_ = 0.0;
-  }
+bool Value::GetAsString(std::string*) const {
+  return false;
 }
 
-Value::Value(const char* in_string) : type_(Type::STRING) {
-  string_value_.Init(in_string);
-  DCHECK(IsStringUTF8(*string_value_));
+bool Value::GetAsString(string16*) const {
+  return false;
 }
 
-Value::Value(const std::string& in_string) : type_(Type::STRING) {
-  string_value_.Init(in_string);
-  DCHECK(IsStringUTF8(*string_value_));
+bool Value::GetAsString(const StringValue**) const {
+  return false;
 }
 
-Value::Value(std::string&& in_string) : type_(Type::STRING) {
-  string_value_.Init(std::move(in_string));
-  DCHECK(IsStringUTF8(*string_value_));
+bool Value::GetAsList(ListValue**) {
+  return false;
 }
 
-Value::Value(const char16* in_string) : type_(Type::STRING) {
-  string_value_.Init(UTF16ToUTF8(in_string));
+bool Value::GetAsList(const ListValue**) const {
+  return false;
 }
 
-Value::Value(const string16& in_string) : type_(Type::STRING) {
-  string_value_.Init(UTF16ToUTF8(in_string));
+bool Value::GetAsDictionary(DictionaryValue**) {
+  return false;
 }
 
-Value::Value(StringPiece in_string) : Value(in_string.as_string()) {}
-
-Value::Value(const std::vector<char>& in_blob) : type_(Type::BINARY) {
-  binary_value_.Init(in_blob);
-}
-
-Value::Value(std::vector<char>&& in_blob) : type_(Type::BINARY) {
-  binary_value_.Init(std::move(in_blob));
-}
-
-Value& Value::operator=(const Value& that) {
-  if (this != &that) {
-    if (type_ == that.type_) {
-      InternalCopyAssignFromSameType(that);
-    } else {
-      InternalCleanup();
-      InternalCopyConstructFrom(that);
-    }
-  }
-
-  return *this;
-}
-
-Value& Value::operator=(Value&& that) {
-  if (this != &that) {
-    if (type_ == that.type_) {
-      InternalMoveAssignFromSameType(std::move(that));
-    } else {
-      InternalCleanup();
-      InternalMoveConstructFrom(std::move(that));
-    }
-  }
-
-  return *this;
-}
-
-Value::~Value() {
-  InternalCleanup();
-}
-
-// static
-const char* Value::GetTypeName(Value::Type type) {
-  DCHECK_GE(static_cast<int>(type), 0);
-  DCHECK_LT(static_cast<size_t>(type), arraysize(kTypeNames));
-  return kTypeNames[static_cast<size_t>(type)];
-}
-
-bool Value::GetBool() const {
-  CHECK(is_bool());
-  return bool_value_;
-}
-
-int Value::GetInt() const {
-  CHECK(is_int());
-  return int_value_;
-}
-
-double Value::GetDouble() const {
-  if (is_double())
-    return double_value_;
-  if (is_int())
-    return int_value_;
-  CHECK(false);
-  return 0.0;
-}
-
-const std::string& Value::GetString() const {
-  CHECK(is_string());
-  return *string_value_;
-}
-
-const std::vector<char>& Value::GetBlob() const {
-  CHECK(is_blob());
-  return *binary_value_;
-}
-
-size_t Value::GetSize() const {
-  return GetBlob().size();
-}
-
-const char* Value::GetBuffer() const {
-  return GetBlob().data();
-}
-
-bool Value::GetAsBoolean(bool* out_value) const {
-  if (out_value && is_bool()) {
-    *out_value = bool_value_;
-    return true;
-  }
-  return is_bool();
-}
-
-bool Value::GetAsInteger(int* out_value) const {
-  if (out_value && is_int()) {
-    *out_value = int_value_;
-    return true;
-  }
-  return is_int();
-}
-
-bool Value::GetAsDouble(double* out_value) const {
-  if (out_value && is_double()) {
-    *out_value = double_value_;
-    return true;
-  } else if (out_value && is_int()) {
-    // Allow promotion from int to double.
-    *out_value = int_value_;
-    return true;
-  }
-  return is_double() || is_int();
-}
-
-bool Value::GetAsString(std::string* out_value) const {
-  if (out_value && is_string()) {
-    *out_value = *string_value_;
-    return true;
-  }
-  return is_string();
-}
-
-bool Value::GetAsString(string16* out_value) const {
-  if (out_value && is_string()) {
-    *out_value = UTF8ToUTF16(*string_value_);
-    return true;
-  }
-  return is_string();
-}
-
-bool Value::GetAsString(const Value** out_value) const {
-  if (out_value && is_string()) {
-    *out_value = static_cast<const Value*>(this);
-    return true;
-  }
-  return is_string();
-}
-
-bool Value::GetAsString(StringPiece* out_value) const {
-  if (out_value && is_string()) {
-    *out_value = *string_value_;
-    return true;
-  }
-  return is_string();
-}
-
-bool Value::GetAsBinary(const BinaryValue** out_value) const {
-  if (out_value && is_blob()) {
-    *out_value = this;
-    return true;
-  }
-  return is_blob();
-}
-
-bool Value::GetAsList(ListValue** out_value) {
-  if (out_value && is_list()) {
-    *out_value = static_cast<ListValue*>(this);
-    return true;
-  }
-  return is_list();
-}
-
-bool Value::GetAsList(const ListValue** out_value) const {
-  if (out_value && is_list()) {
-    *out_value = static_cast<const ListValue*>(this);
-    return true;
-  }
-  return is_list();
-}
-
-bool Value::GetAsDictionary(DictionaryValue** out_value) {
-  if (out_value && is_dict()) {
-    *out_value = static_cast<DictionaryValue*>(this);
-    return true;
-  }
-  return is_dict();
-}
-
-bool Value::GetAsDictionary(const DictionaryValue** out_value) const {
-  if (out_value && is_dict()) {
-    *out_value = static_cast<const DictionaryValue*>(this);
-    return true;
-  }
-  return is_dict();
+bool Value::GetAsDictionary(const DictionaryValue**) const {
+  return false;
 }
 
 Value* Value::DeepCopy() const {
   // This method should only be getting called for null Values--all subclasses
   // need to provide their own implementation;.
-  switch (type()) {
-    case Type::NONE:
-      return CreateNullValue().release();
-
-    case Type::BOOLEAN:
-      return new Value(bool_value_);
-    case Type::INTEGER:
-      return new Value(int_value_);
-    case Type::DOUBLE:
-      return new Value(double_value_);
-    case Type::STRING:
-      return new Value(*string_value_);
-    // For now, make BinaryValues for backward-compatibility. Convert to
-    // Value when that code is deleted.
-    case Type::BINARY:
-      return new Value(*binary_value_);
-
-    // TODO(crbug.com/646113): Clean this up when DictionaryValue and ListValue
-    // are completely inlined.
-    case Type::DICTIONARY: {
-      DictionaryValue* result = new DictionaryValue;
-
-      for (const auto& current_entry : **dict_ptr_) {
-        result->SetWithoutPathExpansion(current_entry.first,
-                                        current_entry.second->CreateDeepCopy());
-      }
-
-      return result;
-    }
-
-    case Type::LIST: {
-      ListValue* result = new ListValue;
-
-      for (const auto& entry : *list_)
-        result->Append(entry->CreateDeepCopy());
-
-      return result;
-    }
-
-    default:
-      NOTREACHED();
-      return nullptr;
-  }
+  DCHECK(IsType(TYPE_NULL));
+  return CreateNullValue().release();
 }
 
 std::unique_ptr<Value> Value::CreateDeepCopy() const {
@@ -399,53 +133,10 @@
 }
 
 bool Value::Equals(const Value* other) const {
-  if (other->type() != type())
-    return false;
-
-  switch (type()) {
-    case Type::NONE:
-      return true;
-    case Type::BOOLEAN:
-      return bool_value_ == other->bool_value_;
-    case Type::INTEGER:
-      return int_value_ == other->int_value_;
-    case Type::DOUBLE:
-      return double_value_ == other->double_value_;
-    case Type::STRING:
-      return *string_value_ == *(other->string_value_);
-    case Type::BINARY:
-      return *binary_value_ == *(other->binary_value_);
-    // TODO(crbug.com/646113): Clean this up when DictionaryValue and ListValue
-    // are completely inlined.
-    case Type::DICTIONARY: {
-      if ((*dict_ptr_)->size() != (*other->dict_ptr_)->size())
-        return false;
-
-      return std::equal(std::begin(**dict_ptr_), std::end(**dict_ptr_),
-                        std::begin(**(other->dict_ptr_)),
-                        [](const DictStorage::value_type& lhs,
-                           const DictStorage::value_type& rhs) {
-                          if (lhs.first != rhs.first)
-                            return false;
-
-                          return lhs.second->Equals(rhs.second.get());
-                        });
-    }
-    case Type::LIST: {
-      if (list_->size() != other->list_->size())
-        return false;
-
-      return std::equal(std::begin(*list_), std::end(*list_),
-                        std::begin(*(other->list_)),
-                        [](const ListStorage::value_type& lhs,
-                           const ListStorage::value_type& rhs) {
-                          return lhs->Equals(rhs.get());
-                        });
-    }
-  }
-
-  NOTREACHED();
-  return false;
+  // This method should only be getting called for null Values--all subclasses
+  // need to provide their own implementation;.
+  DCHECK(IsType(TYPE_NULL));
+  return other->IsType(TYPE_NULL);
 }
 
 // static
@@ -455,161 +146,189 @@
   return a->Equals(b);
 }
 
-void Value::InternalCopyFundamentalValue(const Value& that) {
-  switch (type_) {
-    case Type::NONE:
-      // Nothing to do.
-      return;
+Value::Value(Type type) : type_(type) {}
 
-    case Type::BOOLEAN:
-      bool_value_ = that.bool_value_;
-      return;
-    case Type::INTEGER:
-      int_value_ = that.int_value_;
-      return;
-    case Type::DOUBLE:
-      double_value_ = that.double_value_;
-      return;
+Value::Value(const Value& that) : type_(that.type_) {}
+
+Value& Value::operator=(const Value& that) {
+  type_ = that.type_;
+  return *this;
+}
+
+///////////////////// FundamentalValue ////////////////////
+
+FundamentalValue::FundamentalValue(bool in_value)
+    : Value(TYPE_BOOLEAN), boolean_value_(in_value) {
+}
+
+FundamentalValue::FundamentalValue(int in_value)
+    : Value(TYPE_INTEGER), integer_value_(in_value) {
+}
+
+FundamentalValue::FundamentalValue(double in_value)
+    : Value(TYPE_DOUBLE), double_value_(in_value) {
+  if (!std::isfinite(double_value_)) {
+    NOTREACHED() << "Non-finite (i.e. NaN or positive/negative infinity) "
+                 << "values cannot be represented in JSON";
+    double_value_ = 0.0;
+  }
+}
+
+FundamentalValue::~FundamentalValue() {
+}
+
+bool FundamentalValue::GetAsBoolean(bool* out_value) const {
+  if (out_value && IsType(TYPE_BOOLEAN))
+    *out_value = boolean_value_;
+  return (IsType(TYPE_BOOLEAN));
+}
+
+bool FundamentalValue::GetAsInteger(int* out_value) const {
+  if (out_value && IsType(TYPE_INTEGER))
+    *out_value = integer_value_;
+  return (IsType(TYPE_INTEGER));
+}
+
+bool FundamentalValue::GetAsDouble(double* out_value) const {
+  if (out_value && IsType(TYPE_DOUBLE))
+    *out_value = double_value_;
+  else if (out_value && IsType(TYPE_INTEGER))
+    *out_value = integer_value_;
+  return (IsType(TYPE_DOUBLE) || IsType(TYPE_INTEGER));
+}
+
+FundamentalValue* FundamentalValue::DeepCopy() const {
+  switch (GetType()) {
+    case TYPE_BOOLEAN:
+      return new FundamentalValue(boolean_value_);
+
+    case TYPE_INTEGER:
+      return new FundamentalValue(integer_value_);
+
+    case TYPE_DOUBLE:
+      return new FundamentalValue(double_value_);
 
     default:
       NOTREACHED();
+      return NULL;
   }
 }
 
-void Value::InternalCopyConstructFrom(const Value& that) {
-  type_ = that.type_;
+bool FundamentalValue::Equals(const Value* other) const {
+  if (other->GetType() != GetType())
+    return false;
 
-  switch (type_) {
-    case Type::NONE:
-    case Type::BOOLEAN:
-    case Type::INTEGER:
-    case Type::DOUBLE:
-      InternalCopyFundamentalValue(that);
-      return;
-
-    case Type::STRING:
-      string_value_.Init(*that.string_value_);
-      return;
-    case Type::BINARY:
-      binary_value_.Init(*that.binary_value_);
-      return;
-    // DictStorage and ListStorage are move-only types due to the presence of
-    // unique_ptrs. This is why the call to |CreateDeepCopy| is necessary here.
-    // TODO(crbug.com/646113): Clean this up when DictStorage and ListStorage
-    // can be copied directly.
-    case Type::DICTIONARY:
-      dict_ptr_.Init(std::move(*that.CreateDeepCopy()->dict_ptr_));
-      return;
-    case Type::LIST:
-      list_.Init(std::move(*that.CreateDeepCopy()->list_));
-      return;
+  switch (GetType()) {
+    case TYPE_BOOLEAN: {
+      bool lhs, rhs;
+      return GetAsBoolean(&lhs) && other->GetAsBoolean(&rhs) && lhs == rhs;
+    }
+    case TYPE_INTEGER: {
+      int lhs, rhs;
+      return GetAsInteger(&lhs) && other->GetAsInteger(&rhs) && lhs == rhs;
+    }
+    case TYPE_DOUBLE: {
+      double lhs, rhs;
+      return GetAsDouble(&lhs) && other->GetAsDouble(&rhs) && lhs == rhs;
+    }
+    default:
+      NOTREACHED();
+      return false;
   }
 }
 
-void Value::InternalMoveConstructFrom(Value&& that) {
-  type_ = that.type_;
+///////////////////// StringValue ////////////////////
 
-  switch (type_) {
-    case Type::NONE:
-    case Type::BOOLEAN:
-    case Type::INTEGER:
-    case Type::DOUBLE:
-      InternalCopyFundamentalValue(that);
-      return;
-
-    case Type::STRING:
-      string_value_.InitFromMove(std::move(that.string_value_));
-      return;
-    case Type::BINARY:
-      binary_value_.InitFromMove(std::move(that.binary_value_));
-      return;
-    case Type::DICTIONARY:
-      dict_ptr_.InitFromMove(std::move(that.dict_ptr_));
-      return;
-    case Type::LIST:
-      list_.InitFromMove(std::move(that.list_));
-      return;
-  }
+StringValue::StringValue(const std::string& in_value)
+    : Value(TYPE_STRING),
+      value_(in_value) {
+  DCHECK(IsStringUTF8(in_value));
 }
 
-void Value::InternalCopyAssignFromSameType(const Value& that) {
-  CHECK_EQ(type_, that.type_);
-
-  switch (type_) {
-    case Type::NONE:
-    case Type::BOOLEAN:
-    case Type::INTEGER:
-    case Type::DOUBLE:
-      InternalCopyFundamentalValue(that);
-      return;
-
-    case Type::STRING:
-      *string_value_ = *that.string_value_;
-      return;
-    case Type::BINARY:
-      *binary_value_ = *that.binary_value_;
-      return;
-    // DictStorage and ListStorage are move-only types due to the presence of
-    // unique_ptrs. This is why the call to |CreateDeepCopy| is necessary here.
-    // TODO(crbug.com/646113): Clean this up when DictStorage and ListStorage
-    // can be copied directly.
-    case Type::DICTIONARY:
-      *dict_ptr_ = std::move(*that.CreateDeepCopy()->dict_ptr_);
-      return;
-    case Type::LIST:
-      *list_ = std::move(*that.CreateDeepCopy()->list_);
-      return;
-  }
+StringValue::StringValue(const string16& in_value)
+    : Value(TYPE_STRING),
+      value_(UTF16ToUTF8(in_value)) {
 }
 
-void Value::InternalMoveAssignFromSameType(Value&& that) {
-  CHECK_EQ(type_, that.type_);
-
-  switch (type_) {
-    case Type::NONE:
-    case Type::BOOLEAN:
-    case Type::INTEGER:
-    case Type::DOUBLE:
-      InternalCopyFundamentalValue(that);
-      return;
-
-    case Type::STRING:
-      *string_value_ = std::move(*that.string_value_);
-      return;
-    case Type::BINARY:
-      *binary_value_ = std::move(*that.binary_value_);
-      return;
-    case Type::DICTIONARY:
-      *dict_ptr_ = std::move(*that.dict_ptr_);
-      return;
-    case Type::LIST:
-      *list_ = std::move(*that.list_);
-      return;
-  }
+StringValue::~StringValue() {
 }
 
-void Value::InternalCleanup() {
-  switch (type_) {
-    case Type::NONE:
-    case Type::BOOLEAN:
-    case Type::INTEGER:
-    case Type::DOUBLE:
-      // Nothing to do
-      return;
+std::string* StringValue::GetString() {
+  return &value_;
+}
 
-    case Type::STRING:
-      string_value_.Destroy();
-      return;
-    case Type::BINARY:
-      binary_value_.Destroy();
-      return;
-    case Type::DICTIONARY:
-      dict_ptr_.Destroy();
-      return;
-    case Type::LIST:
-      list_.Destroy();
-      return;
-  }
+const std::string& StringValue::GetString() const {
+  return value_;
+}
+
+bool StringValue::GetAsString(std::string* out_value) const {
+  if (out_value)
+    *out_value = value_;
+  return true;
+}
+
+bool StringValue::GetAsString(string16* out_value) const {
+  if (out_value)
+    *out_value = UTF8ToUTF16(value_);
+  return true;
+}
+
+bool StringValue::GetAsString(const StringValue** out_value) const {
+  if (out_value)
+    *out_value = this;
+  return true;
+}
+
+StringValue* StringValue::DeepCopy() const {
+  return new StringValue(value_);
+}
+
+bool StringValue::Equals(const Value* other) const {
+  if (other->GetType() != GetType())
+    return false;
+  std::string lhs, rhs;
+  return GetAsString(&lhs) && other->GetAsString(&rhs) && lhs == rhs;
+}
+
+///////////////////// BinaryValue ////////////////////
+
+BinaryValue::BinaryValue()
+    : Value(TYPE_BINARY),
+      size_(0) {
+}
+
+BinaryValue::BinaryValue(std::unique_ptr<char[]> buffer, size_t size)
+    : Value(TYPE_BINARY), buffer_(std::move(buffer)), size_(size) {}
+
+BinaryValue::~BinaryValue() {
+}
+
+// static
+std::unique_ptr<BinaryValue> BinaryValue::CreateWithCopiedBuffer(
+    const char* buffer,
+    size_t size) {
+  std::unique_ptr<char[]> buffer_copy(new char[size]);
+  memcpy(buffer_copy.get(), buffer, size);
+  return base::MakeUnique<BinaryValue>(std::move(buffer_copy), size);
+}
+
+bool BinaryValue::GetAsBinary(const BinaryValue** out_value) const {
+  if (out_value)
+    *out_value = this;
+  return true;
+}
+
+BinaryValue* BinaryValue::DeepCopy() const {
+  return CreateWithCopiedBuffer(buffer_.get(), size_).release();
+}
+
+bool BinaryValue::Equals(const Value* other) const {
+  if (other->GetType() != GetType())
+    return false;
+  const BinaryValue* other_binary = static_cast<const BinaryValue*>(other);
+  if (other_binary->size_ != size_)
+    return false;
+  return !memcmp(GetBuffer(), other_binary->GetBuffer(), size_);
 }
 
 ///////////////////// DictionaryValue ////////////////////
@@ -625,102 +344,122 @@
   return nullptr;
 }
 
-DictionaryValue::DictionaryValue() : Value(Type::DICTIONARY) {}
+DictionaryValue::DictionaryValue()
+    : Value(TYPE_DICTIONARY) {
+}
 
-bool DictionaryValue::HasKey(StringPiece key) const {
+DictionaryValue::~DictionaryValue() {
+  Clear();
+}
+
+bool DictionaryValue::GetAsDictionary(DictionaryValue** out_value) {
+  if (out_value)
+    *out_value = this;
+  return true;
+}
+
+bool DictionaryValue::GetAsDictionary(const DictionaryValue** out_value) const {
+  if (out_value)
+    *out_value = this;
+  return true;
+}
+
+bool DictionaryValue::HasKey(const std::string& key) const {
   DCHECK(IsStringUTF8(key));
-  auto current_entry = (*dict_ptr_)->find(key.as_string());
-  DCHECK((current_entry == (*dict_ptr_)->end()) || current_entry->second);
-  return current_entry != (*dict_ptr_)->end();
+  auto current_entry = dictionary_.find(key);
+  DCHECK((current_entry == dictionary_.end()) || current_entry->second);
+  return current_entry != dictionary_.end();
 }
 
 void DictionaryValue::Clear() {
-  (*dict_ptr_)->clear();
+  dictionary_.clear();
 }
 
-void DictionaryValue::Set(StringPiece path, std::unique_ptr<Value> in_value) {
+void DictionaryValue::Set(const std::string& path,
+                          std::unique_ptr<Value> in_value) {
   DCHECK(IsStringUTF8(path));
   DCHECK(in_value);
 
-  StringPiece current_path(path);
+  std::string current_path(path);
   DictionaryValue* current_dictionary = this;
   for (size_t delimiter_position = current_path.find('.');
-       delimiter_position != StringPiece::npos;
+       delimiter_position != std::string::npos;
        delimiter_position = current_path.find('.')) {
     // Assume that we're indexing into a dictionary.
-    StringPiece key = current_path.substr(0, delimiter_position);
-    DictionaryValue* child_dictionary = nullptr;
+    std::string key(current_path, 0, delimiter_position);
+    DictionaryValue* child_dictionary = NULL;
     if (!current_dictionary->GetDictionary(key, &child_dictionary)) {
       child_dictionary = new DictionaryValue;
-      current_dictionary->SetWithoutPathExpansion(
-          key, base::WrapUnique(child_dictionary));
+      current_dictionary->SetWithoutPathExpansion(key, child_dictionary);
     }
 
     current_dictionary = child_dictionary;
-    current_path = current_path.substr(delimiter_position + 1);
+    current_path.erase(0, delimiter_position + 1);
   }
 
   current_dictionary->SetWithoutPathExpansion(current_path,
                                               std::move(in_value));
 }
 
-void DictionaryValue::Set(StringPiece path, Value* in_value) {
+void DictionaryValue::Set(const std::string& path, Value* in_value) {
   Set(path, WrapUnique(in_value));
 }
 
-void DictionaryValue::SetBoolean(StringPiece path, bool in_value) {
-  Set(path, new Value(in_value));
+void DictionaryValue::SetBoolean(const std::string& path, bool in_value) {
+  Set(path, new FundamentalValue(in_value));
 }
 
-void DictionaryValue::SetInteger(StringPiece path, int in_value) {
-  Set(path, new Value(in_value));
+void DictionaryValue::SetInteger(const std::string& path, int in_value) {
+  Set(path, new FundamentalValue(in_value));
 }
 
-void DictionaryValue::SetDouble(StringPiece path, double in_value) {
-  Set(path, new Value(in_value));
+void DictionaryValue::SetDouble(const std::string& path, double in_value) {
+  Set(path, new FundamentalValue(in_value));
 }
 
-void DictionaryValue::SetString(StringPiece path, StringPiece in_value) {
-  Set(path, new Value(in_value));
+void DictionaryValue::SetString(const std::string& path,
+                                const std::string& in_value) {
+  Set(path, new StringValue(in_value));
 }
 
-void DictionaryValue::SetString(StringPiece path, const string16& in_value) {
-  Set(path, new Value(in_value));
+void DictionaryValue::SetString(const std::string& path,
+                                const string16& in_value) {
+  Set(path, new StringValue(in_value));
 }
 
-void DictionaryValue::SetWithoutPathExpansion(StringPiece key,
+void DictionaryValue::SetWithoutPathExpansion(const std::string& key,
                                               std::unique_ptr<Value> in_value) {
-  (**dict_ptr_)[key.as_string()] = std::move(in_value);
+  dictionary_[key] = std::move(in_value);
 }
 
-void DictionaryValue::SetWithoutPathExpansion(StringPiece key,
+void DictionaryValue::SetWithoutPathExpansion(const std::string& key,
                                               Value* in_value) {
   SetWithoutPathExpansion(key, WrapUnique(in_value));
 }
 
-void DictionaryValue::SetBooleanWithoutPathExpansion(StringPiece path,
-                                                     bool in_value) {
-  SetWithoutPathExpansion(path, base::MakeUnique<base::Value>(in_value));
+void DictionaryValue::SetBooleanWithoutPathExpansion(
+    const std::string& path, bool in_value) {
+  SetWithoutPathExpansion(path, new FundamentalValue(in_value));
 }
 
-void DictionaryValue::SetIntegerWithoutPathExpansion(StringPiece path,
-                                                     int in_value) {
-  SetWithoutPathExpansion(path, base::MakeUnique<base::Value>(in_value));
+void DictionaryValue::SetIntegerWithoutPathExpansion(
+    const std::string& path, int in_value) {
+  SetWithoutPathExpansion(path, new FundamentalValue(in_value));
 }
 
-void DictionaryValue::SetDoubleWithoutPathExpansion(StringPiece path,
-                                                    double in_value) {
-  SetWithoutPathExpansion(path, base::MakeUnique<base::Value>(in_value));
+void DictionaryValue::SetDoubleWithoutPathExpansion(
+    const std::string& path, double in_value) {
+  SetWithoutPathExpansion(path, new FundamentalValue(in_value));
 }
 
-void DictionaryValue::SetStringWithoutPathExpansion(StringPiece path,
-                                                    StringPiece in_value) {
-  SetWithoutPathExpansion(path, base::MakeUnique<base::Value>(in_value));
+void DictionaryValue::SetStringWithoutPathExpansion(
+    const std::string& path, const std::string& in_value) {
+  SetWithoutPathExpansion(path, new StringValue(in_value));
 }
 
-void DictionaryValue::SetStringWithoutPathExpansion(StringPiece path,
-                                                    const string16& in_value) {
-  SetWithoutPathExpansion(path, base::MakeUnique<base::Value>(in_value));
+void DictionaryValue::SetStringWithoutPathExpansion(
+    const std::string& path, const string16& in_value) {
+  SetWithoutPathExpansion(path, new StringValue(in_value));
 }
 
 bool DictionaryValue::Get(StringPiece path,
@@ -733,7 +472,8 @@
        delimiter_position = current_path.find('.')) {
     const DictionaryValue* child_dictionary = NULL;
     if (!current_dictionary->GetDictionaryWithoutPathExpansion(
-            current_path.substr(0, delimiter_position), &child_dictionary)) {
+            current_path.substr(0, delimiter_position).as_string(),
+            &child_dictionary)) {
       return false;
     }
 
@@ -741,7 +481,8 @@
     current_path = current_path.substr(delimiter_position + 1);
   }
 
-  return current_dictionary->GetWithoutPathExpansion(current_path, out_value);
+  return current_dictionary->GetWithoutPathExpansion(current_path.as_string(),
+                                                     out_value);
 }
 
 bool DictionaryValue::Get(StringPiece path, Value** out_value)  {
@@ -750,7 +491,8 @@
       const_cast<const Value**>(out_value));
 }
 
-bool DictionaryValue::GetBoolean(StringPiece path, bool* bool_value) const {
+bool DictionaryValue::GetBoolean(const std::string& path,
+                                 bool* bool_value) const {
   const Value* value;
   if (!Get(path, &value))
     return false;
@@ -758,7 +500,8 @@
   return value->GetAsBoolean(bool_value);
 }
 
-bool DictionaryValue::GetInteger(StringPiece path, int* out_value) const {
+bool DictionaryValue::GetInteger(const std::string& path,
+                                 int* out_value) const {
   const Value* value;
   if (!Get(path, &value))
     return false;
@@ -766,7 +509,8 @@
   return value->GetAsInteger(out_value);
 }
 
-bool DictionaryValue::GetDouble(StringPiece path, double* out_value) const {
+bool DictionaryValue::GetDouble(const std::string& path,
+                                double* out_value) const {
   const Value* value;
   if (!Get(path, &value))
     return false;
@@ -774,7 +518,7 @@
   return value->GetAsDouble(out_value);
 }
 
-bool DictionaryValue::GetString(StringPiece path,
+bool DictionaryValue::GetString(const std::string& path,
                                 std::string* out_value) const {
   const Value* value;
   if (!Get(path, &value))
@@ -783,7 +527,8 @@
   return value->GetAsString(out_value);
 }
 
-bool DictionaryValue::GetString(StringPiece path, string16* out_value) const {
+bool DictionaryValue::GetString(const std::string& path,
+                                string16* out_value) const {
   const Value* value;
   if (!Get(path, &value))
     return false;
@@ -791,7 +536,7 @@
   return value->GetAsString(out_value);
 }
 
-bool DictionaryValue::GetStringASCII(StringPiece path,
+bool DictionaryValue::GetStringASCII(const std::string& path,
                                      std::string* out_value) const {
   std::string out;
   if (!GetString(path, &out))
@@ -806,20 +551,21 @@
   return true;
 }
 
-bool DictionaryValue::GetBinary(StringPiece path,
+bool DictionaryValue::GetBinary(const std::string& path,
                                 const BinaryValue** out_value) const {
   const Value* value;
   bool result = Get(path, &value);
-  if (!result || !value->IsType(Type::BINARY))
+  if (!result || !value->IsType(TYPE_BINARY))
     return false;
 
   if (out_value)
-    *out_value = value;
+    *out_value = static_cast<const BinaryValue*>(value);
 
   return true;
 }
 
-bool DictionaryValue::GetBinary(StringPiece path, BinaryValue** out_value) {
+bool DictionaryValue::GetBinary(const std::string& path,
+                                BinaryValue** out_value) {
   return static_cast<const DictionaryValue&>(*this).GetBinary(
       path,
       const_cast<const BinaryValue**>(out_value));
@@ -829,7 +575,7 @@
                                     const DictionaryValue** out_value) const {
   const Value* value;
   bool result = Get(path, &value);
-  if (!result || !value->IsType(Type::DICTIONARY))
+  if (!result || !value->IsType(TYPE_DICTIONARY))
     return false;
 
   if (out_value)
@@ -845,11 +591,11 @@
       const_cast<const DictionaryValue**>(out_value));
 }
 
-bool DictionaryValue::GetList(StringPiece path,
+bool DictionaryValue::GetList(const std::string& path,
                               const ListValue** out_value) const {
   const Value* value;
   bool result = Get(path, &value);
-  if (!result || !value->IsType(Type::LIST))
+  if (!result || !value->IsType(TYPE_LIST))
     return false;
 
   if (out_value)
@@ -858,17 +604,17 @@
   return true;
 }
 
-bool DictionaryValue::GetList(StringPiece path, ListValue** out_value) {
+bool DictionaryValue::GetList(const std::string& path, ListValue** out_value) {
   return static_cast<const DictionaryValue&>(*this).GetList(
       path,
       const_cast<const ListValue**>(out_value));
 }
 
-bool DictionaryValue::GetWithoutPathExpansion(StringPiece key,
+bool DictionaryValue::GetWithoutPathExpansion(const std::string& key,
                                               const Value** out_value) const {
   DCHECK(IsStringUTF8(key));
-  auto entry_iterator = (*dict_ptr_)->find(key.as_string());
-  if (entry_iterator == (*dict_ptr_)->end())
+  auto entry_iterator = dictionary_.find(key);
+  if (entry_iterator == dictionary_.end())
     return false;
 
   if (out_value)
@@ -876,14 +622,14 @@
   return true;
 }
 
-bool DictionaryValue::GetWithoutPathExpansion(StringPiece key,
+bool DictionaryValue::GetWithoutPathExpansion(const std::string& key,
                                               Value** out_value) {
   return static_cast<const DictionaryValue&>(*this).GetWithoutPathExpansion(
       key,
       const_cast<const Value**>(out_value));
 }
 
-bool DictionaryValue::GetBooleanWithoutPathExpansion(StringPiece key,
+bool DictionaryValue::GetBooleanWithoutPathExpansion(const std::string& key,
                                                      bool* out_value) const {
   const Value* value;
   if (!GetWithoutPathExpansion(key, &value))
@@ -892,7 +638,7 @@
   return value->GetAsBoolean(out_value);
 }
 
-bool DictionaryValue::GetIntegerWithoutPathExpansion(StringPiece key,
+bool DictionaryValue::GetIntegerWithoutPathExpansion(const std::string& key,
                                                      int* out_value) const {
   const Value* value;
   if (!GetWithoutPathExpansion(key, &value))
@@ -901,7 +647,7 @@
   return value->GetAsInteger(out_value);
 }
 
-bool DictionaryValue::GetDoubleWithoutPathExpansion(StringPiece key,
+bool DictionaryValue::GetDoubleWithoutPathExpansion(const std::string& key,
                                                     double* out_value) const {
   const Value* value;
   if (!GetWithoutPathExpansion(key, &value))
@@ -911,7 +657,7 @@
 }
 
 bool DictionaryValue::GetStringWithoutPathExpansion(
-    StringPiece key,
+    const std::string& key,
     std::string* out_value) const {
   const Value* value;
   if (!GetWithoutPathExpansion(key, &value))
@@ -920,7 +666,7 @@
   return value->GetAsString(out_value);
 }
 
-bool DictionaryValue::GetStringWithoutPathExpansion(StringPiece key,
+bool DictionaryValue::GetStringWithoutPathExpansion(const std::string& key,
                                                     string16* out_value) const {
   const Value* value;
   if (!GetWithoutPathExpansion(key, &value))
@@ -930,11 +676,11 @@
 }
 
 bool DictionaryValue::GetDictionaryWithoutPathExpansion(
-    StringPiece key,
+    const std::string& key,
     const DictionaryValue** out_value) const {
   const Value* value;
   bool result = GetWithoutPathExpansion(key, &value);
-  if (!result || !value->IsType(Type::DICTIONARY))
+  if (!result || !value->IsType(TYPE_DICTIONARY))
     return false;
 
   if (out_value)
@@ -944,7 +690,7 @@
 }
 
 bool DictionaryValue::GetDictionaryWithoutPathExpansion(
-    StringPiece key,
+    const std::string& key,
     DictionaryValue** out_value) {
   const DictionaryValue& const_this =
       static_cast<const DictionaryValue&>(*this);
@@ -954,11 +700,11 @@
 }
 
 bool DictionaryValue::GetListWithoutPathExpansion(
-    StringPiece key,
+    const std::string& key,
     const ListValue** out_value) const {
   const Value* value;
   bool result = GetWithoutPathExpansion(key, &value);
-  if (!result || !value->IsType(Type::LIST))
+  if (!result || !value->IsType(TYPE_LIST))
     return false;
 
   if (out_value)
@@ -967,7 +713,7 @@
   return true;
 }
 
-bool DictionaryValue::GetListWithoutPathExpansion(StringPiece key,
+bool DictionaryValue::GetListWithoutPathExpansion(const std::string& key,
                                                   ListValue** out_value) {
   return
       static_cast<const DictionaryValue&>(*this).GetListWithoutPathExpansion(
@@ -975,17 +721,17 @@
           const_cast<const ListValue**>(out_value));
 }
 
-bool DictionaryValue::Remove(StringPiece path,
+bool DictionaryValue::Remove(const std::string& path,
                              std::unique_ptr<Value>* out_value) {
   DCHECK(IsStringUTF8(path));
-  StringPiece current_path(path);
+  std::string current_path(path);
   DictionaryValue* current_dictionary = this;
   size_t delimiter_position = current_path.rfind('.');
-  if (delimiter_position != StringPiece::npos) {
+  if (delimiter_position != std::string::npos) {
     if (!GetDictionary(current_path.substr(0, delimiter_position),
                        &current_dictionary))
       return false;
-    current_path = current_path.substr(delimiter_position + 1);
+    current_path.erase(0, delimiter_position + 1);
   }
 
   return current_dictionary->RemoveWithoutPathExpansion(current_path,
@@ -993,20 +739,20 @@
 }
 
 bool DictionaryValue::RemoveWithoutPathExpansion(
-    StringPiece key,
+    const std::string& key,
     std::unique_ptr<Value>* out_value) {
   DCHECK(IsStringUTF8(key));
-  auto entry_iterator = (*dict_ptr_)->find(key.as_string());
-  if (entry_iterator == (*dict_ptr_)->end())
+  auto entry_iterator = dictionary_.find(key);
+  if (entry_iterator == dictionary_.end())
     return false;
 
   if (out_value)
     *out_value = std::move(entry_iterator->second);
-  (*dict_ptr_)->erase(entry_iterator);
+  dictionary_.erase(entry_iterator);
   return true;
 }
 
-bool DictionaryValue::RemovePath(StringPiece path,
+bool DictionaryValue::RemovePath(const std::string& path,
                                  std::unique_ptr<Value>* out_value) {
   bool result = false;
   size_t delimiter_position = path.find('.');
@@ -1014,7 +760,7 @@
   if (delimiter_position == std::string::npos)
     return RemoveWithoutPathExpansion(path, out_value);
 
-  StringPiece subdict_path = path.substr(0, delimiter_position);
+  const std::string subdict_path = path.substr(0, delimiter_position);
   DictionaryValue* subdict = NULL;
   if (!GetDictionary(subdict_path, &subdict))
     return false;
@@ -1036,11 +782,10 @@
 }
 
 void DictionaryValue::MergeDictionary(const DictionaryValue* dictionary) {
-  CHECK(dictionary->is_dict());
   for (DictionaryValue::Iterator it(*dictionary); !it.IsAtEnd(); it.Advance()) {
     const Value* merge_value = &it.value();
     // Check whether we have to merge dictionaries.
-    if (merge_value->IsType(Value::Type::DICTIONARY)) {
+    if (merge_value->IsType(Value::TYPE_DICTIONARY)) {
       DictionaryValue* sub_dict;
       if (GetDictionaryWithoutPathExpansion(it.key(), &sub_dict)) {
         sub_dict->MergeDictionary(
@@ -1049,31 +794,59 @@
       }
     }
     // All other cases: Make a copy and hook it up.
-    SetWithoutPathExpansion(it.key(),
-                            base::WrapUnique(merge_value->DeepCopy()));
+    SetWithoutPathExpansion(it.key(), merge_value->DeepCopy());
   }
 }
 
 void DictionaryValue::Swap(DictionaryValue* other) {
-  CHECK(other->is_dict());
-  dict_ptr_->swap(*(other->dict_ptr_));
+  dictionary_.swap(other->dictionary_);
 }
 
 DictionaryValue::Iterator::Iterator(const DictionaryValue& target)
-    : target_(target), it_((*target.dict_ptr_)->begin()) {}
+    : target_(target),
+      it_(target.dictionary_.begin()) {}
 
 DictionaryValue::Iterator::Iterator(const Iterator& other) = default;
 
 DictionaryValue::Iterator::~Iterator() {}
 
 DictionaryValue* DictionaryValue::DeepCopy() const {
-  return static_cast<DictionaryValue*>(Value::DeepCopy());
+  DictionaryValue* result = new DictionaryValue;
+
+  for (const auto& current_entry : dictionary_) {
+    result->SetWithoutPathExpansion(current_entry.first,
+                                    current_entry.second->CreateDeepCopy());
+  }
+
+  return result;
 }
 
 std::unique_ptr<DictionaryValue> DictionaryValue::CreateDeepCopy() const {
   return WrapUnique(DeepCopy());
 }
 
+bool DictionaryValue::Equals(const Value* other) const {
+  if (other->GetType() != GetType())
+    return false;
+
+  const DictionaryValue* other_dict =
+      static_cast<const DictionaryValue*>(other);
+  Iterator lhs_it(*this);
+  Iterator rhs_it(*other_dict);
+  while (!lhs_it.IsAtEnd() && !rhs_it.IsAtEnd()) {
+    if (lhs_it.key() != rhs_it.key() ||
+        !lhs_it.value().Equals(&rhs_it.value())) {
+      return false;
+    }
+    lhs_it.Advance();
+    rhs_it.Advance();
+  }
+  if (!lhs_it.IsAtEnd() || !rhs_it.IsAtEnd())
+    return false;
+
+  return true;
+}
+
 ///////////////////// ListValue ////////////////////
 
 // static
@@ -1086,10 +859,15 @@
   return nullptr;
 }
 
-ListValue::ListValue() : Value(Type::LIST) {}
+ListValue::ListValue() : Value(TYPE_LIST) {
+}
+
+ListValue::~ListValue() {
+  Clear();
+}
 
 void ListValue::Clear() {
-  list_->clear();
+  list_.clear();
 }
 
 bool ListValue::Set(size_t index, Value* in_value) {
@@ -1100,25 +878,25 @@
   if (!in_value)
     return false;
 
-  if (index >= list_->size()) {
+  if (index >= list_.size()) {
     // Pad out any intermediate indexes with null settings
-    while (index > list_->size())
+    while (index > list_.size())
       Append(CreateNullValue());
     Append(std::move(in_value));
   } else {
     // TODO(dcheng): remove this DCHECK once the raw pointer version is removed?
-    DCHECK((*list_)[index] != in_value);
-    (*list_)[index] = std::move(in_value);
+    DCHECK(list_[index] != in_value);
+    list_[index] = std::move(in_value);
   }
   return true;
 }
 
 bool ListValue::Get(size_t index, const Value** out_value) const {
-  if (index >= list_->size())
+  if (index >= list_.size())
     return false;
 
   if (out_value)
-    *out_value = (*list_)[index].get();
+    *out_value = list_[index].get();
 
   return true;
 }
@@ -1172,11 +950,11 @@
 bool ListValue::GetBinary(size_t index, const BinaryValue** out_value) const {
   const Value* value;
   bool result = Get(index, &value);
-  if (!result || !value->IsType(Type::BINARY))
+  if (!result || !value->IsType(TYPE_BINARY))
     return false;
 
   if (out_value)
-    *out_value = value;
+    *out_value = static_cast<const BinaryValue*>(value);
 
   return true;
 }
@@ -1191,7 +969,7 @@
                               const DictionaryValue** out_value) const {
   const Value* value;
   bool result = Get(index, &value);
-  if (!result || !value->IsType(Type::DICTIONARY))
+  if (!result || !value->IsType(TYPE_DICTIONARY))
     return false;
 
   if (out_value)
@@ -1209,7 +987,7 @@
 bool ListValue::GetList(size_t index, const ListValue** out_value) const {
   const Value* value;
   bool result = Get(index, &value);
-  if (!result || !value->IsType(Type::LIST))
+  if (!result || !value->IsType(TYPE_LIST))
     return false;
 
   if (out_value)
@@ -1225,21 +1003,21 @@
 }
 
 bool ListValue::Remove(size_t index, std::unique_ptr<Value>* out_value) {
-  if (index >= list_->size())
+  if (index >= list_.size())
     return false;
 
   if (out_value)
-    *out_value = std::move((*list_)[index]);
+    *out_value = std::move(list_[index]);
 
-  list_->erase(list_->begin() + index);
+  list_.erase(list_.begin() + index);
   return true;
 }
 
 bool ListValue::Remove(const Value& value, size_t* index) {
-  for (auto it = list_->begin(); it != list_->end(); ++it) {
+  for (auto it = list_.begin(); it != list_.end(); ++it) {
     if ((*it)->Equals(&value)) {
-      size_t previous_index = it - list_->begin();
-      list_->erase(it);
+      size_t previous_index = it - list_.begin();
+      list_.erase(it);
 
       if (index)
         *index = previous_index;
@@ -1252,40 +1030,38 @@
 ListValue::iterator ListValue::Erase(iterator iter,
                                      std::unique_ptr<Value>* out_value) {
   if (out_value)
-    *out_value = std::move(*ListStorage::iterator(iter));
+    *out_value = std::move(*Storage::iterator(iter));
 
-  return list_->erase(iter);
+  return list_.erase(iter);
 }
 
 void ListValue::Append(std::unique_ptr<Value> in_value) {
-  list_->push_back(std::move(in_value));
+  list_.push_back(std::move(in_value));
 }
 
-#if !defined(OS_LINUX)
 void ListValue::Append(Value* in_value) {
   DCHECK(in_value);
   Append(WrapUnique(in_value));
 }
-#endif
 
 void ListValue::AppendBoolean(bool in_value) {
-  Append(MakeUnique<Value>(in_value));
+  Append(new FundamentalValue(in_value));
 }
 
 void ListValue::AppendInteger(int in_value) {
-  Append(MakeUnique<Value>(in_value));
+  Append(new FundamentalValue(in_value));
 }
 
 void ListValue::AppendDouble(double in_value) {
-  Append(MakeUnique<Value>(in_value));
+  Append(new FundamentalValue(in_value));
 }
 
-void ListValue::AppendString(StringPiece in_value) {
-  Append(MakeUnique<Value>(in_value));
+void ListValue::AppendString(const std::string& in_value) {
+  Append(new StringValue(in_value));
 }
 
 void ListValue::AppendString(const string16& in_value) {
-  Append(MakeUnique<Value>(in_value));
+  Append(new StringValue(in_value));
 }
 
 void ListValue::AppendStrings(const std::vector<std::string>& in_values) {
@@ -1302,46 +1078,82 @@
   }
 }
 
-bool ListValue::AppendIfNotPresent(std::unique_ptr<Value> in_value) {
+bool ListValue::AppendIfNotPresent(Value* in_value) {
   DCHECK(in_value);
-  for (const auto& entry : *list_) {
-    if (entry->Equals(in_value.get())) {
+  for (const auto& entry : list_) {
+    if (entry->Equals(in_value)) {
+      delete in_value;
       return false;
     }
   }
-  list_->push_back(std::move(in_value));
+  list_.emplace_back(in_value);
   return true;
 }
 
-bool ListValue::Insert(size_t index, std::unique_ptr<Value> in_value) {
+bool ListValue::Insert(size_t index, Value* in_value) {
   DCHECK(in_value);
-  if (index > list_->size())
+  if (index > list_.size())
     return false;
 
-  list_->insert(list_->begin() + index, std::move(in_value));
+  list_.insert(list_.begin() + index, WrapUnique(in_value));
   return true;
 }
 
 ListValue::const_iterator ListValue::Find(const Value& value) const {
-  return std::find_if(list_->begin(), list_->end(),
+  return std::find_if(list_.begin(), list_.end(),
                       [&value](const std::unique_ptr<Value>& entry) {
                         return entry->Equals(&value);
                       });
 }
 
 void ListValue::Swap(ListValue* other) {
-  CHECK(other->is_list());
-  list_->swap(*(other->list_));
+  list_.swap(other->list_);
+}
+
+bool ListValue::GetAsList(ListValue** out_value) {
+  if (out_value)
+    *out_value = this;
+  return true;
+}
+
+bool ListValue::GetAsList(const ListValue** out_value) const {
+  if (out_value)
+    *out_value = this;
+  return true;
 }
 
 ListValue* ListValue::DeepCopy() const {
-  return static_cast<ListValue*>(Value::DeepCopy());
+  ListValue* result = new ListValue;
+
+  for (const auto& entry : list_)
+    result->Append(entry->CreateDeepCopy());
+
+  return result;
 }
 
 std::unique_ptr<ListValue> ListValue::CreateDeepCopy() const {
   return WrapUnique(DeepCopy());
 }
 
+bool ListValue::Equals(const Value* other) const {
+  if (other->GetType() != GetType())
+    return false;
+
+  const ListValue* other_list =
+      static_cast<const ListValue*>(other);
+  Storage::const_iterator lhs_it, rhs_it;
+  for (lhs_it = begin(), rhs_it = other_list->begin();
+       lhs_it != end() && rhs_it != other_list->end();
+       ++lhs_it, ++rhs_it) {
+    if (!(*lhs_it)->Equals(rhs_it->get()))
+      return false;
+  }
+  if (lhs_it != end() || rhs_it != other_list->end())
+    return false;
+
+  return true;
+}
+
 ValueSerializer::~ValueSerializer() {
 }
 
@@ -1354,11 +1166,4 @@
   return out << json;
 }
 
-std::ostream& operator<<(std::ostream& out, const Value::Type& type) {
-  if (static_cast<int>(type) < 0 ||
-      static_cast<size_t>(type) >= arraysize(kTypeNames))
-    return out << "Invalid Type (index = " << static_cast<int>(type) << ")";
-  return out << Value::GetTypeName(type);
-}
-
 }  // namespace base
diff --git a/base/values.h b/base/values.h
index 35f66df..e3d6089 100644
--- a/base/values.h
+++ b/base/values.h
@@ -30,16 +30,17 @@
 #include "base/base_export.h"
 #include "base/compiler_specific.h"
 #include "base/macros.h"
-#include "base/memory/manual_constructor.h"
 #include "base/strings/string16.h"
 #include "base/strings/string_piece.h"
 
 namespace base {
 
+class BinaryValue;
 class DictionaryValue;
+class FundamentalValue;
 class ListValue;
+class StringValue;
 class Value;
-using BinaryValue = Value;
 
 // The Value class is the base class for Values. A Value can be instantiated
 // via the Create*Value() factory methods, or by directly creating instances of
@@ -48,151 +49,158 @@
 // See the file-level comment above for more information.
 class BASE_EXPORT Value {
  public:
-  using DictStorage = std::map<std::string, std::unique_ptr<Value>>;
-  using ListStorage = std::vector<std::unique_ptr<Value>>;
-
-  enum class Type {
-    NONE = 0,
-    BOOLEAN,
-    INTEGER,
-    DOUBLE,
-    STRING,
-    BINARY,
-    DICTIONARY,
-    LIST
+  enum Type {
+    TYPE_NULL = 0,
+    TYPE_BOOLEAN,
+    TYPE_INTEGER,
+    TYPE_DOUBLE,
+    TYPE_STRING,
+    TYPE_BINARY,
+    TYPE_DICTIONARY,
+    TYPE_LIST
     // Note: Do not add more types. See the file-level comment above for why.
   };
 
+  virtual ~Value();
+
   static std::unique_ptr<Value> CreateNullValue();
 
-  // For situations where you want to keep ownership of your buffer, this
-  // factory method creates a new BinaryValue by copying the contents of the
-  // buffer that's passed in.
-  // DEPRECATED, use MakeUnique<Value>(const std::vector<char>&) instead.
-  // TODO(crbug.com/646113): Delete this and migrate callsites.
-  static std::unique_ptr<BinaryValue> CreateWithCopiedBuffer(const char* buffer,
-                                                             size_t size);
-
-  Value(const Value& that);
-  Value(Value&& that);
-  Value();  // A null value.
-  explicit Value(Type type);
-  explicit Value(bool in_bool);
-  explicit Value(int in_int);
-  explicit Value(double in_double);
-
-  // Value(const char*) and Value(const char16*) are required despite
-  // Value(const std::string&) and Value(const string16&) because otherwise the
-  // compiler will choose the Value(bool) constructor for these arguments.
-  // Value(std::string&&) allow for efficient move construction.
-  // Value(StringPiece) exists due to many callsites passing StringPieces as
-  // arguments.
-  explicit Value(const char* in_string);
-  explicit Value(const std::string& in_string);
-  explicit Value(std::string&& in_string);
-  explicit Value(const char16* in_string);
-  explicit Value(const string16& in_string);
-  explicit Value(StringPiece in_string);
-
-  explicit Value(const std::vector<char>& in_blob);
-  explicit Value(std::vector<char>&& in_blob);
-
-  Value& operator=(const Value& that);
-  Value& operator=(Value&& that);
-
-  ~Value();
-
-  // Returns the name for a given |type|.
-  static const char* GetTypeName(Type type);
-
   // Returns the type of the value stored by the current Value object.
   // Each type will be implemented by only one subclass of Value, so it's
   // safe to use the Type to determine whether you can cast from
   // Value* to (Implementing Class)*.  Also, a Value object never changes
   // its type after construction.
-  Type GetType() const { return type_; }  // DEPRECATED, use type().
-  Type type() const { return type_; }
+  Type GetType() const { return type_; }
 
   // Returns true if the current object represents a given type.
   bool IsType(Type type) const { return type == type_; }
-  bool is_bool() const { return type() == Type::BOOLEAN; }
-  bool is_int() const { return type() == Type::INTEGER; }
-  bool is_double() const { return type() == Type::DOUBLE; }
-  bool is_string() const { return type() == Type::STRING; }
-  bool is_blob() const { return type() == Type::BINARY; }
-  bool is_dict() const { return type() == Type::DICTIONARY; }
-  bool is_list() const { return type() == Type::LIST; }
-
-  // These will all fatally assert if the type doesn't match.
-  bool GetBool() const;
-  int GetInt() const;
-  double GetDouble() const;  // Implicitly converts from int if necessary.
-  const std::string& GetString() const;
-  const std::vector<char>& GetBlob() const;
-
-  size_t GetSize() const;         // DEPRECATED, use GetBlob().size() instead.
-  const char* GetBuffer() const;  // DEPRECATED, use GetBlob().data() instead.
 
   // These methods allow the convenient retrieval of the contents of the Value.
   // If the current object can be converted into the given type, the value is
   // returned through the |out_value| parameter and true is returned;
   // otherwise, false is returned and |out_value| is unchanged.
-  bool GetAsBoolean(bool* out_value) const;
-  bool GetAsInteger(int* out_value) const;
-  bool GetAsDouble(double* out_value) const;
-  bool GetAsString(std::string* out_value) const;
-  bool GetAsString(string16* out_value) const;
-  bool GetAsString(const Value** out_value) const;
-  bool GetAsString(StringPiece* out_value) const;
-  bool GetAsBinary(const BinaryValue** out_value) const;
-  // ListValue::From is the equivalent for std::unique_ptr conversions.
-  bool GetAsList(ListValue** out_value);
-  bool GetAsList(const ListValue** out_value) const;
-  // DictionaryValue::From is the equivalent for std::unique_ptr conversions.
-  bool GetAsDictionary(DictionaryValue** out_value);
-  bool GetAsDictionary(const DictionaryValue** out_value) const;
+  virtual bool GetAsBoolean(bool* out_value) const;
+  virtual bool GetAsInteger(int* out_value) const;
+  virtual bool GetAsDouble(double* out_value) const;
+  virtual bool GetAsString(std::string* out_value) const;
+  virtual bool GetAsString(string16* out_value) const;
+  virtual bool GetAsString(const StringValue** out_value) const;
+  virtual bool GetAsBinary(const BinaryValue** out_value) const;
+  virtual bool GetAsList(ListValue** out_value);
+  virtual bool GetAsList(const ListValue** out_value) const;
+  virtual bool GetAsDictionary(DictionaryValue** out_value);
+  virtual bool GetAsDictionary(const DictionaryValue** out_value) const;
   // Note: Do not add more types. See the file-level comment above for why.
 
   // This creates a deep copy of the entire Value tree, and returns a pointer
-  // to the copy. The caller gets ownership of the copy, of course.
+  // to the copy.  The caller gets ownership of the copy, of course.
+  //
   // Subclasses return their own type directly in their overrides;
   // this works because C++ supports covariant return types.
-  Value* DeepCopy() const;
+  virtual Value* DeepCopy() const;
   // Preferred version of DeepCopy. TODO(estade): remove the above.
   std::unique_ptr<Value> CreateDeepCopy() const;
 
   // Compares if two Value objects have equal contents.
-  bool Equals(const Value* other) const;
+  virtual bool Equals(const Value* other) const;
 
   // Compares if two Value objects have equal contents. Can handle NULLs.
   // NULLs are considered equal but different from Value::CreateNullValue().
   static bool Equals(const Value* a, const Value* b);
 
  protected:
-  // TODO(crbug.com/646113): Make these private once DictionaryValue and
-  // ListValue are properly inlined.
-  Type type_;
-
-  union {
-    bool bool_value_;
-    int int_value_;
-    double double_value_;
-    ManualConstructor<std::string> string_value_;
-    ManualConstructor<std::vector<char>> binary_value_;
-    // For current gcc and clang sizeof(DictStorage) = 48, which would result
-    // in sizeof(Value) = 56 if DictStorage was stack allocated. Allocating it
-    // on the heap results in sizeof(Value) = 40 for all of gcc, clang and MSVC.
-    ManualConstructor<std::unique_ptr<DictStorage>> dict_ptr_;
-    ManualConstructor<ListStorage> list_;
-  };
+  // These aren't safe for end-users, but they are useful for subclasses.
+  explicit Value(Type type);
+  Value(const Value& that);
+  Value& operator=(const Value& that);
 
  private:
-  void InternalCopyFundamentalValue(const Value& that);
-  void InternalCopyConstructFrom(const Value& that);
-  void InternalMoveConstructFrom(Value&& that);
-  void InternalCopyAssignFromSameType(const Value& that);
-  void InternalMoveAssignFromSameType(Value&& that);
-  void InternalCleanup();
+  Type type_;
+};
+
+// FundamentalValue represents the simple fundamental types of values.
+class BASE_EXPORT FundamentalValue : public Value {
+ public:
+  explicit FundamentalValue(bool in_value);
+  explicit FundamentalValue(int in_value);
+  explicit FundamentalValue(double in_value);
+  ~FundamentalValue() override;
+
+  // Overridden from Value:
+  bool GetAsBoolean(bool* out_value) const override;
+  bool GetAsInteger(int* out_value) const override;
+  // Values of both type TYPE_INTEGER and TYPE_DOUBLE can be obtained as
+  // doubles.
+  bool GetAsDouble(double* out_value) const override;
+  FundamentalValue* DeepCopy() const override;
+  bool Equals(const Value* other) const override;
+
+ private:
+  union {
+    bool boolean_value_;
+    int integer_value_;
+    double double_value_;
+  };
+};
+
+class BASE_EXPORT StringValue : public Value {
+ public:
+  // Initializes a StringValue with a UTF-8 narrow character string.
+  explicit StringValue(const std::string& in_value);
+
+  // Initializes a StringValue with a string16.
+  explicit StringValue(const string16& in_value);
+
+  ~StringValue() override;
+
+  // Returns |value_| as a pointer or reference.
+  std::string* GetString();
+  const std::string& GetString() const;
+
+  // Overridden from Value:
+  bool GetAsString(std::string* out_value) const override;
+  bool GetAsString(string16* out_value) const override;
+  bool GetAsString(const StringValue** out_value) const override;
+  StringValue* DeepCopy() const override;
+  bool Equals(const Value* other) const override;
+
+ private:
+  std::string value_;
+};
+
+class BASE_EXPORT BinaryValue: public Value {
+ public:
+  // Creates a BinaryValue with a null buffer and size of 0.
+  BinaryValue();
+
+  // Creates a BinaryValue, taking ownership of the bytes pointed to by
+  // |buffer|.
+  BinaryValue(std::unique_ptr<char[]> buffer, size_t size);
+
+  ~BinaryValue() override;
+
+  // For situations where you want to keep ownership of your buffer, this
+  // factory method creates a new BinaryValue by copying the contents of the
+  // buffer that's passed in.
+  static std::unique_ptr<BinaryValue> CreateWithCopiedBuffer(const char* buffer,
+                                                             size_t size);
+
+  size_t GetSize() const { return size_; }
+
+  // May return NULL.
+  char* GetBuffer() { return buffer_.get(); }
+  const char* GetBuffer() const { return buffer_.get(); }
+
+  // Overridden from Value:
+  bool GetAsBinary(const BinaryValue** out_value) const override;
+  BinaryValue* DeepCopy() const override;
+  bool Equals(const Value* other) const override;
+
+ private:
+  std::unique_ptr<char[]> buffer_;
+  size_t size_;
+
+  DISALLOW_COPY_AND_ASSIGN(BinaryValue);
 };
 
 // DictionaryValue provides a key-value dictionary with (optional) "path"
@@ -200,19 +208,25 @@
 // are |std::string|s and should be UTF-8 encoded.
 class BASE_EXPORT DictionaryValue : public Value {
  public:
+  using Storage = std::map<std::string, std::unique_ptr<Value>>;
   // Returns |value| if it is a dictionary, nullptr otherwise.
   static std::unique_ptr<DictionaryValue> From(std::unique_ptr<Value> value);
 
   DictionaryValue();
+  ~DictionaryValue() override;
+
+  // Overridden from Value:
+  bool GetAsDictionary(DictionaryValue** out_value) override;
+  bool GetAsDictionary(const DictionaryValue** out_value) const override;
 
   // Returns true if the current dictionary has a value for the given key.
-  bool HasKey(StringPiece key) const;
+  bool HasKey(const std::string& key) const;
 
   // Returns the number of Values in this dictionary.
-  size_t size() const { return (*dict_ptr_)->size(); }
+  size_t size() const { return dictionary_.size(); }
 
   // Returns whether the dictionary is empty.
-  bool empty() const { return (*dict_ptr_)->empty(); }
+  bool empty() const { return dictionary_.empty(); }
 
   // Clears any current contents of this dictionary.
   void Clear();
@@ -224,31 +238,32 @@
   // If the key at any step of the way doesn't exist, or exists but isn't
   // a DictionaryValue, a new DictionaryValue will be created and attached
   // to the path in that location. |in_value| must be non-null.
-  void Set(StringPiece path, std::unique_ptr<Value> in_value);
+  void Set(const std::string& path, std::unique_ptr<Value> in_value);
   // Deprecated version of the above. TODO(estade): remove.
-  void Set(StringPiece path, Value* in_value);
+  void Set(const std::string& path, Value* in_value);
 
   // Convenience forms of Set().  These methods will replace any existing
   // value at that path, even if it has a different type.
-  void SetBoolean(StringPiece path, bool in_value);
-  void SetInteger(StringPiece path, int in_value);
-  void SetDouble(StringPiece path, double in_value);
-  void SetString(StringPiece path, StringPiece in_value);
-  void SetString(StringPiece path, const string16& in_value);
+  void SetBoolean(const std::string& path, bool in_value);
+  void SetInteger(const std::string& path, int in_value);
+  void SetDouble(const std::string& path, double in_value);
+  void SetString(const std::string& path, const std::string& in_value);
+  void SetString(const std::string& path, const string16& in_value);
 
   // Like Set(), but without special treatment of '.'.  This allows e.g. URLs to
   // be used as paths.
-  void SetWithoutPathExpansion(StringPiece key,
+  void SetWithoutPathExpansion(const std::string& key,
                                std::unique_ptr<Value> in_value);
   // Deprecated version of the above. TODO(estade): remove.
-  void SetWithoutPathExpansion(StringPiece key, Value* in_value);
+  void SetWithoutPathExpansion(const std::string& key, Value* in_value);
 
   // Convenience forms of SetWithoutPathExpansion().
-  void SetBooleanWithoutPathExpansion(StringPiece path, bool in_value);
-  void SetIntegerWithoutPathExpansion(StringPiece path, int in_value);
-  void SetDoubleWithoutPathExpansion(StringPiece path, double in_value);
-  void SetStringWithoutPathExpansion(StringPiece path, StringPiece in_value);
-  void SetStringWithoutPathExpansion(StringPiece path,
+  void SetBooleanWithoutPathExpansion(const std::string& path, bool in_value);
+  void SetIntegerWithoutPathExpansion(const std::string& path, int in_value);
+  void SetDoubleWithoutPathExpansion(const std::string& path, double in_value);
+  void SetStringWithoutPathExpansion(const std::string& path,
+                                     const std::string& in_value);
+  void SetStringWithoutPathExpansion(const std::string& path,
                                      const string16& in_value);
 
   // Gets the Value associated with the given path starting from this object.
@@ -266,41 +281,46 @@
   // and the return value will be true if the path is valid and the value at
   // the end of the path can be returned in the form specified.
   // |out_value| is optional and will only be set if non-NULL.
-  bool GetBoolean(StringPiece path, bool* out_value) const;
-  bool GetInteger(StringPiece path, int* out_value) const;
-  // Values of both type Type::INTEGER and Type::DOUBLE can be obtained as
+  bool GetBoolean(const std::string& path, bool* out_value) const;
+  bool GetInteger(const std::string& path, int* out_value) const;
+  // Values of both type TYPE_INTEGER and TYPE_DOUBLE can be obtained as
   // doubles.
-  bool GetDouble(StringPiece path, double* out_value) const;
-  bool GetString(StringPiece path, std::string* out_value) const;
-  bool GetString(StringPiece path, string16* out_value) const;
-  bool GetStringASCII(StringPiece path, std::string* out_value) const;
-  bool GetBinary(StringPiece path, const BinaryValue** out_value) const;
-  bool GetBinary(StringPiece path, BinaryValue** out_value);
+  bool GetDouble(const std::string& path, double* out_value) const;
+  bool GetString(const std::string& path, std::string* out_value) const;
+  bool GetString(const std::string& path, string16* out_value) const;
+  bool GetStringASCII(const std::string& path, std::string* out_value) const;
+  bool GetBinary(const std::string& path, const BinaryValue** out_value) const;
+  bool GetBinary(const std::string& path, BinaryValue** out_value);
   bool GetDictionary(StringPiece path,
                      const DictionaryValue** out_value) const;
   bool GetDictionary(StringPiece path, DictionaryValue** out_value);
-  bool GetList(StringPiece path, const ListValue** out_value) const;
-  bool GetList(StringPiece path, ListValue** out_value);
+  bool GetList(const std::string& path, const ListValue** out_value) const;
+  bool GetList(const std::string& path, ListValue** out_value);
 
   // Like Get(), but without special treatment of '.'.  This allows e.g. URLs to
   // be used as paths.
-  bool GetWithoutPathExpansion(StringPiece key, const Value** out_value) const;
-  bool GetWithoutPathExpansion(StringPiece key, Value** out_value);
-  bool GetBooleanWithoutPathExpansion(StringPiece key, bool* out_value) const;
-  bool GetIntegerWithoutPathExpansion(StringPiece key, int* out_value) const;
-  bool GetDoubleWithoutPathExpansion(StringPiece key, double* out_value) const;
-  bool GetStringWithoutPathExpansion(StringPiece key,
+  bool GetWithoutPathExpansion(const std::string& key,
+                               const Value** out_value) const;
+  bool GetWithoutPathExpansion(const std::string& key, Value** out_value);
+  bool GetBooleanWithoutPathExpansion(const std::string& key,
+                                      bool* out_value) const;
+  bool GetIntegerWithoutPathExpansion(const std::string& key,
+                                      int* out_value) const;
+  bool GetDoubleWithoutPathExpansion(const std::string& key,
+                                     double* out_value) const;
+  bool GetStringWithoutPathExpansion(const std::string& key,
                                      std::string* out_value) const;
-  bool GetStringWithoutPathExpansion(StringPiece key,
+  bool GetStringWithoutPathExpansion(const std::string& key,
                                      string16* out_value) const;
   bool GetDictionaryWithoutPathExpansion(
-      StringPiece key,
+      const std::string& key,
       const DictionaryValue** out_value) const;
-  bool GetDictionaryWithoutPathExpansion(StringPiece key,
+  bool GetDictionaryWithoutPathExpansion(const std::string& key,
                                          DictionaryValue** out_value);
-  bool GetListWithoutPathExpansion(StringPiece key,
+  bool GetListWithoutPathExpansion(const std::string& key,
                                    const ListValue** out_value) const;
-  bool GetListWithoutPathExpansion(StringPiece key, ListValue** out_value);
+  bool GetListWithoutPathExpansion(const std::string& key,
+                                   ListValue** out_value);
 
   // Removes the Value with the specified path from this dictionary (or one
   // of its child dictionaries, if the path is more than just a local key).
@@ -308,16 +328,18 @@
   // |out_value|.  If |out_value| is NULL, the removed value will be deleted.
   // This method returns true if |path| is a valid path; otherwise it will
   // return false and the DictionaryValue object will be unchanged.
-  bool Remove(StringPiece path, std::unique_ptr<Value>* out_value);
+  virtual bool Remove(const std::string& path,
+                      std::unique_ptr<Value>* out_value);
 
   // Like Remove(), but without special treatment of '.'.  This allows e.g. URLs
   // to be used as paths.
-  bool RemoveWithoutPathExpansion(StringPiece key,
-                                  std::unique_ptr<Value>* out_value);
+  virtual bool RemoveWithoutPathExpansion(const std::string& key,
+                                          std::unique_ptr<Value>* out_value);
 
   // Removes a path, clearing out all dictionaries on |path| that remain empty
   // after removing the value at |path|.
-  bool RemovePath(StringPiece path, std::unique_ptr<Value>* out_value);
+  virtual bool RemovePath(const std::string& path,
+                          std::unique_ptr<Value>* out_value);
 
   // Makes a copy of |this| but doesn't include empty dictionaries and lists in
   // the copy.  This never returns NULL, even if |this| itself is empty.
@@ -331,7 +353,7 @@
   void MergeDictionary(const DictionaryValue* dictionary);
 
   // Swaps contents with the |other| dictionary.
-  void Swap(DictionaryValue* other);
+  virtual void Swap(DictionaryValue* other);
 
   // This class provides an iterator over both keys and values in the
   // dictionary.  It can't be used to modify the dictionary.
@@ -341,7 +363,7 @@
     Iterator(const Iterator& other);
     ~Iterator();
 
-    bool IsAtEnd() const { return it_ == (*target_.dict_ptr_)->end(); }
+    bool IsAtEnd() const { return it_ == target_.dictionary_.end(); }
     void Advance() { ++it_; }
 
     const std::string& key() const { return it_->first; }
@@ -349,33 +371,42 @@
 
    private:
     const DictionaryValue& target_;
-    DictStorage::const_iterator it_;
+    Storage::const_iterator it_;
   };
 
-  DictionaryValue* DeepCopy() const;
+  // Overridden from Value:
+  DictionaryValue* DeepCopy() const override;
   // Preferred version of DeepCopy. TODO(estade): remove the above.
   std::unique_ptr<DictionaryValue> CreateDeepCopy() const;
+  bool Equals(const Value* other) const override;
+
+ private:
+  Storage dictionary_;
+
+  DISALLOW_COPY_AND_ASSIGN(DictionaryValue);
 };
 
 // This type of Value represents a list of other Value values.
 class BASE_EXPORT ListValue : public Value {
  public:
-  using const_iterator = ListStorage::const_iterator;
-  using iterator = ListStorage::iterator;
+  using Storage = std::vector<std::unique_ptr<Value>>;
+  using const_iterator = Storage::const_iterator;
+  using iterator = Storage::iterator;
 
   // Returns |value| if it is a list, nullptr otherwise.
   static std::unique_ptr<ListValue> From(std::unique_ptr<Value> value);
 
   ListValue();
+  ~ListValue() override;
 
   // Clears the contents of this ListValue
   void Clear();
 
   // Returns the number of Values in this list.
-  size_t GetSize() const { return list_->size(); }
+  size_t GetSize() const { return list_.size(); }
 
   // Returns whether the list is empty.
-  bool empty() const { return list_->empty(); }
+  bool empty() const { return list_.empty(); }
 
   // Sets the list item at the given index to be the Value specified by
   // the value given.  If the index beyond the current end of the list, null
@@ -399,7 +430,7 @@
   // |out_value| is optional and will only be set if non-NULL.
   bool GetBoolean(size_t index, bool* out_value) const;
   bool GetInteger(size_t index, int* out_value) const;
-  // Values of both type Type::INTEGER and Type::DOUBLE can be obtained as
+  // Values of both type TYPE_INTEGER and TYPE_DOUBLE can be obtained as
   // doubles.
   bool GetDouble(size_t index, double* out_value) const;
   bool GetString(size_t index, std::string* out_value) const;
@@ -416,7 +447,7 @@
   // passed out via |out_value|.  If |out_value| is NULL, the removed value will
   // be deleted.  This method returns true if |index| is valid; otherwise
   // it will return false and the ListValue object will be unchanged.
-  bool Remove(size_t index, std::unique_ptr<Value>* out_value);
+  virtual bool Remove(size_t index, std::unique_ptr<Value>* out_value);
 
   // Removes the first instance of |value| found in the list, if any, and
   // deletes it. |index| is the location where |value| was found. Returns false
@@ -431,27 +462,26 @@
 
   // Appends a Value to the end of the list.
   void Append(std::unique_ptr<Value> in_value);
-#if !defined(OS_LINUX)
   // Deprecated version of the above. TODO(estade): remove.
   void Append(Value* in_value);
-#endif
 
   // Convenience forms of Append.
   void AppendBoolean(bool in_value);
   void AppendInteger(int in_value);
   void AppendDouble(double in_value);
-  void AppendString(StringPiece in_value);
+  void AppendString(const std::string& in_value);
   void AppendString(const string16& in_value);
   void AppendStrings(const std::vector<std::string>& in_values);
   void AppendStrings(const std::vector<string16>& in_values);
 
-  // Appends a Value if it's not already present. Returns true if successful,
-  // or false if the value was already
-  bool AppendIfNotPresent(std::unique_ptr<Value> in_value);
+  // Appends a Value if it's not already present. Takes ownership of the
+  // |in_value|. Returns true if successful, or false if the value was already
+  // present. If the value was already present the |in_value| is deleted.
+  bool AppendIfNotPresent(Value* in_value);
 
   // Insert a Value at index.
   // Returns true if successful, or false if the index was out of range.
-  bool Insert(size_t index, std::unique_ptr<Value> in_value);
+  bool Insert(size_t index, Value* in_value);
 
   // Searches for the first instance of |value| in the list using the Equals
   // method of the Value type.
@@ -459,18 +489,28 @@
   const_iterator Find(const Value& value) const;
 
   // Swaps contents with the |other| list.
-  void Swap(ListValue* other);
+  virtual void Swap(ListValue* other);
 
   // Iteration.
-  iterator begin() { return list_->begin(); }
-  iterator end() { return list_->end(); }
+  iterator begin() { return list_.begin(); }
+  iterator end() { return list_.end(); }
 
-  const_iterator begin() const { return list_->begin(); }
-  const_iterator end() const { return list_->end(); }
+  const_iterator begin() const { return list_.begin(); }
+  const_iterator end() const { return list_.end(); }
 
-  ListValue* DeepCopy() const;
+  // Overridden from Value:
+  bool GetAsList(ListValue** out_value) override;
+  bool GetAsList(const ListValue** out_value) const override;
+  ListValue* DeepCopy() const override;
+  bool Equals(const Value* other) const override;
+
   // Preferred version of DeepCopy. TODO(estade): remove DeepCopy.
   std::unique_ptr<ListValue> CreateDeepCopy() const;
+
+ private:
+  Storage list_;
+
+  DISALLOW_COPY_AND_ASSIGN(ListValue);
 };
 
 // This interface is implemented by classes that know how to serialize
@@ -505,6 +545,16 @@
 BASE_EXPORT std::ostream& operator<<(std::ostream& out, const Value& value);
 
 BASE_EXPORT inline std::ostream& operator<<(std::ostream& out,
+                                            const FundamentalValue& value) {
+  return out << static_cast<const Value&>(value);
+}
+
+BASE_EXPORT inline std::ostream& operator<<(std::ostream& out,
+                                            const StringValue& value) {
+  return out << static_cast<const Value&>(value);
+}
+
+BASE_EXPORT inline std::ostream& operator<<(std::ostream& out,
                                             const DictionaryValue& value) {
   return out << static_cast<const Value&>(value);
 }
@@ -514,10 +564,6 @@
   return out << static_cast<const Value&>(value);
 }
 
-// Stream operator so that enum class Types can be used in log statements.
-BASE_EXPORT std::ostream& operator<<(std::ostream& out,
-                                     const Value::Type& type);
-
 }  // namespace base
 
 #endif  // BASE_VALUES_H_
diff --git a/base/values_unittest.cc b/base/values_unittest.cc
index 3bcdc16..d685222 100644
--- a/base/values_unittest.cc
+++ b/base/values_unittest.cc
@@ -9,7 +9,6 @@
 #include <limits>
 #include <memory>
 #include <utility>
-#include <vector>
 
 #include "base/memory/ptr_util.h"
 #include "base/strings/string16.h"
@@ -18,322 +17,6 @@
 
 namespace base {
 
-// Group of tests for the value constructors.
-TEST(ValuesTest, ConstructBool) {
-  Value true_value(true);
-  EXPECT_EQ(Value::Type::BOOLEAN, true_value.type());
-  EXPECT_TRUE(true_value.GetBool());
-
-  Value false_value(false);
-  EXPECT_EQ(Value::Type::BOOLEAN, false_value.type());
-  EXPECT_FALSE(false_value.GetBool());
-}
-
-TEST(ValuesTest, ConstructInt) {
-  Value value(-37);
-  EXPECT_EQ(Value::Type::INTEGER, value.type());
-  EXPECT_EQ(-37, value.GetInt());
-}
-
-TEST(ValuesTest, ConstructDouble) {
-  Value value(-4.655);
-  EXPECT_EQ(Value::Type::DOUBLE, value.type());
-  EXPECT_EQ(-4.655, value.GetDouble());
-}
-
-TEST(ValuesTest, ConstructStringFromConstCharPtr) {
-  const char* str = "foobar";
-  Value value(str);
-  EXPECT_EQ(Value::Type::STRING, value.type());
-  EXPECT_EQ("foobar", value.GetString());
-}
-
-TEST(ValuesTest, ConstructStringFromStdStringConstRef) {
-  std::string str = "foobar";
-  Value value(str);
-  EXPECT_EQ(Value::Type::STRING, value.type());
-  EXPECT_EQ("foobar", value.GetString());
-}
-
-TEST(ValuesTest, ConstructStringFromStdStringRefRef) {
-  std::string str = "foobar";
-  Value value(std::move(str));
-  EXPECT_EQ(Value::Type::STRING, value.type());
-  EXPECT_EQ("foobar", value.GetString());
-}
-
-TEST(ValuesTest, ConstructStringFromConstChar16Ptr) {
-  string16 str = ASCIIToUTF16("foobar");
-  Value value(str.c_str());
-  EXPECT_EQ(Value::Type::STRING, value.type());
-  EXPECT_EQ("foobar", value.GetString());
-}
-
-TEST(ValuesTest, ConstructStringFromString16) {
-  string16 str = ASCIIToUTF16("foobar");
-  Value value(str);
-  EXPECT_EQ(Value::Type::STRING, value.type());
-  EXPECT_EQ("foobar", value.GetString());
-}
-
-TEST(ValuesTest, ConstructStringFromStringPiece) {
-  StringPiece str = "foobar";
-  Value value(str);
-  EXPECT_EQ(Value::Type::STRING, value.type());
-  EXPECT_EQ("foobar", value.GetString());
-}
-
-TEST(ValuesTest, ConstructBinary) {
-  BinaryValue value(std::vector<char>({0xF, 0x0, 0x0, 0xB, 0xA, 0x2}));
-  EXPECT_EQ(Value::Type::BINARY, value.type());
-  EXPECT_EQ(std::vector<char>({0xF, 0x0, 0x0, 0xB, 0xA, 0x2}), value.GetBlob());
-}
-
-TEST(ValuesTest, ConstructDict) {
-  DictionaryValue value;
-  EXPECT_EQ(Value::Type::DICTIONARY, value.type());
-}
-
-TEST(ValuesTest, ConstructList) {
-  ListValue value;
-  EXPECT_EQ(Value::Type::LIST, value.type());
-}
-
-// Group of tests for the copy constructors and copy-assigmnent. For equality
-// checks comparisons of the interesting fields are done instead of relying on
-// Equals being correct.
-TEST(ValuesTest, CopyBool) {
-  Value true_value(true);
-  Value copied_true_value(true_value);
-  EXPECT_EQ(true_value.type(), copied_true_value.type());
-  EXPECT_EQ(true_value.GetBool(), copied_true_value.GetBool());
-
-  Value false_value(false);
-  Value copied_false_value(false_value);
-  EXPECT_EQ(false_value.type(), copied_false_value.type());
-  EXPECT_EQ(false_value.GetBool(), copied_false_value.GetBool());
-
-  Value blank;
-
-  blank = true_value;
-  EXPECT_EQ(true_value.type(), blank.type());
-  EXPECT_EQ(true_value.GetBool(), blank.GetBool());
-
-  blank = false_value;
-  EXPECT_EQ(false_value.type(), blank.type());
-  EXPECT_EQ(false_value.GetBool(), blank.GetBool());
-}
-
-TEST(ValuesTest, CopyInt) {
-  Value value(74);
-  Value copied_value(value);
-  EXPECT_EQ(value.type(), copied_value.type());
-  EXPECT_EQ(value.GetInt(), copied_value.GetInt());
-
-  Value blank;
-
-  blank = value;
-  EXPECT_EQ(value.type(), blank.type());
-  EXPECT_EQ(value.GetInt(), blank.GetInt());
-}
-
-TEST(ValuesTest, CopyDouble) {
-  Value value(74.896);
-  Value copied_value(value);
-  EXPECT_EQ(value.type(), copied_value.type());
-  EXPECT_EQ(value.GetDouble(), copied_value.GetDouble());
-
-  Value blank;
-
-  blank = value;
-  EXPECT_EQ(value.type(), blank.type());
-  EXPECT_EQ(value.GetDouble(), blank.GetDouble());
-}
-
-TEST(ValuesTest, CopyString) {
-  Value value("foobar");
-  Value copied_value(value);
-  EXPECT_EQ(value.type(), copied_value.type());
-  EXPECT_EQ(value.GetString(), copied_value.GetString());
-
-  Value blank;
-
-  blank = value;
-  EXPECT_EQ(value.type(), blank.type());
-  EXPECT_EQ(value.GetString(), blank.GetString());
-}
-
-TEST(ValuesTest, CopyBinary) {
-  BinaryValue value(std::vector<char>({0xF, 0x0, 0x0, 0xB, 0xA, 0x2}));
-  BinaryValue copied_value(value);
-  EXPECT_EQ(value.type(), copied_value.type());
-  EXPECT_EQ(value.GetBlob(), copied_value.GetBlob());
-
-  Value blank;
-
-  blank = value;
-  EXPECT_EQ(value.type(), blank.type());
-  EXPECT_EQ(value.GetBlob(), blank.GetBlob());
-}
-
-TEST(ValuesTest, CopyDictionary) {
-  // TODO(crbug.com/646113): Clean this up once DictionaryValue switched to
-  // value semantics.
-  int copy;
-  DictionaryValue value;
-  value.SetInteger("Int", 123);
-
-  DictionaryValue copied_value(value);
-  copied_value.GetInteger("Int", &copy);
-
-  EXPECT_EQ(value.type(), copied_value.type());
-  EXPECT_EQ(123, copy);
-
-  auto blank = MakeUnique<Value>();
-
-  *blank = value;
-  EXPECT_EQ(Value::Type::DICTIONARY, blank->type());
-
-  static_cast<DictionaryValue*>(blank.get())->GetInteger("Int", &copy);
-  EXPECT_EQ(123, copy);
-}
-
-TEST(ValuesTest, CopyList) {
-  // TODO(crbug.com/646113): Clean this up once ListValue switched to
-  // value semantics.
-  int copy;
-  ListValue value;
-  value.AppendInteger(123);
-
-  ListValue copied_value(value);
-  copied_value.GetInteger(0, &copy);
-
-  EXPECT_EQ(value.type(), copied_value.type());
-  EXPECT_EQ(123, copy);
-
-  auto blank = MakeUnique<Value>();
-
-  *blank = value;
-  EXPECT_EQ(Value::Type::LIST, blank->type());
-
-  static_cast<ListValue*>(blank.get())->GetInteger(0, &copy);
-  EXPECT_EQ(123, copy);
-}
-
-// Group of tests for the move constructors and move-assigmnent.
-TEST(ValuesTest, MoveBool) {
-  Value true_value(true);
-  Value moved_true_value(std::move(true_value));
-  EXPECT_EQ(Value::Type::BOOLEAN, moved_true_value.type());
-  EXPECT_TRUE(moved_true_value.GetBool());
-
-  Value false_value(false);
-  Value moved_false_value(std::move(false_value));
-  EXPECT_EQ(Value::Type::BOOLEAN, moved_false_value.type());
-  EXPECT_FALSE(moved_false_value.GetBool());
-
-  Value blank;
-
-  blank = Value(true);
-  EXPECT_EQ(Value::Type::BOOLEAN, blank.type());
-  EXPECT_TRUE(blank.GetBool());
-
-  blank = Value(false);
-  EXPECT_EQ(Value::Type::BOOLEAN, blank.type());
-  EXPECT_FALSE(blank.GetBool());
-}
-
-TEST(ValuesTest, MoveInt) {
-  Value value(74);
-  Value moved_value(std::move(value));
-  EXPECT_EQ(Value::Type::INTEGER, moved_value.type());
-  EXPECT_EQ(74, moved_value.GetInt());
-
-  Value blank;
-
-  blank = Value(47);
-  EXPECT_EQ(Value::Type::INTEGER, blank.type());
-  EXPECT_EQ(47, blank.GetInt());
-}
-
-TEST(ValuesTest, MoveDouble) {
-  Value value(74.896);
-  Value moved_value(std::move(value));
-  EXPECT_EQ(Value::Type::DOUBLE, moved_value.type());
-  EXPECT_EQ(74.896, moved_value.GetDouble());
-
-  Value blank;
-
-  blank = Value(654.38);
-  EXPECT_EQ(Value::Type::DOUBLE, blank.type());
-  EXPECT_EQ(654.38, blank.GetDouble());
-}
-
-TEST(ValuesTest, MoveString) {
-  Value value("foobar");
-  Value moved_value(std::move(value));
-  EXPECT_EQ(Value::Type::STRING, moved_value.type());
-  EXPECT_EQ("foobar", moved_value.GetString());
-
-  Value blank;
-
-  blank = Value("foobar");
-  EXPECT_EQ(Value::Type::STRING, blank.type());
-  EXPECT_EQ("foobar", blank.GetString());
-}
-
-TEST(ValuesTest, MoveBinary) {
-  const std::vector<char> buffer = {0xF, 0x0, 0x0, 0xB, 0xA, 0x2};
-  BinaryValue value(buffer);
-  BinaryValue moved_value(std::move(value));
-  EXPECT_EQ(Value::Type::BINARY, moved_value.type());
-  EXPECT_EQ(buffer, moved_value.GetBlob());
-
-  Value blank;
-
-  blank = BinaryValue(buffer);
-  EXPECT_EQ(Value::Type::BINARY, blank.type());
-  EXPECT_EQ(buffer, blank.GetBlob());
-}
-
-TEST(ValuesTest, MoveDictionary) {
-  // TODO(crbug.com/646113): Clean this up once DictionaryValue switched to
-  // value semantics.
-  int move;
-  DictionaryValue value;
-  value.SetInteger("Int", 123);
-
-  DictionaryValue moved_value(std::move(value));
-  moved_value.GetInteger("Int", &move);
-
-  EXPECT_EQ(Value::Type::DICTIONARY, moved_value.type());
-  EXPECT_EQ(123, move);
-
-  Value blank;
-
-  blank = DictionaryValue();
-  EXPECT_EQ(Value::Type::DICTIONARY, blank.type());
-}
-
-TEST(ValuesTest, MoveList) {
-  // TODO(crbug.com/646113): Clean this up once ListValue switched to
-  // value semantics.
-  int move;
-  ListValue value;
-  value.AppendInteger(123);
-
-  ListValue moved_value(std::move(value));
-  moved_value.GetInteger(0, &move);
-
-  EXPECT_EQ(Value::Type::LIST, moved_value.type());
-  EXPECT_EQ(123, move);
-
-  Value blank;
-
-  blank = ListValue();
-  EXPECT_EQ(Value::Type::LIST, blank.type());
-}
-
 TEST(ValuesTest, Basic) {
   // Test basic dictionary getting/setting
   DictionaryValue settings;
@@ -379,10 +62,10 @@
 
 TEST(ValuesTest, List) {
   std::unique_ptr<ListValue> mixed_list(new ListValue());
-  mixed_list->Set(0, MakeUnique<Value>(true));
-  mixed_list->Set(1, MakeUnique<Value>(42));
-  mixed_list->Set(2, MakeUnique<Value>(88.8));
-  mixed_list->Set(3, MakeUnique<Value>("foo"));
+  mixed_list->Set(0, WrapUnique(new FundamentalValue(true)));
+  mixed_list->Set(1, WrapUnique(new FundamentalValue(42)));
+  mixed_list->Set(2, WrapUnique(new FundamentalValue(88.8)));
+  mixed_list->Set(3, WrapUnique(new StringValue("foo")));
   ASSERT_EQ(4u, mixed_list->GetSize());
 
   Value *value = NULL;
@@ -417,8 +100,8 @@
   ASSERT_EQ("foo", string_value);
 
   // Try searching in the mixed list.
-  base::Value sought_value(42);
-  base::Value not_found_value(false);
+  base::FundamentalValue sought_value(42);
+  base::FundamentalValue not_found_value(false);
 
   ASSERT_NE(mixed_list->end(), mixed_list->Find(sought_value));
   ASSERT_TRUE((*mixed_list->Find(sought_value))->GetAsInteger(&int_value));
@@ -427,15 +110,16 @@
 }
 
 TEST(ValuesTest, BinaryValue) {
-  // Default constructor creates a BinaryValue with a buffer of size 0.
-  auto binary = MakeUnique<Value>(Value::Type::BINARY);
+  // Default constructor creates a BinaryValue with a null buffer and size 0.
+  std::unique_ptr<BinaryValue> binary(new BinaryValue());
   ASSERT_TRUE(binary.get());
+  ASSERT_EQ(NULL, binary->GetBuffer());
   ASSERT_EQ(0U, binary->GetSize());
 
   // Test the common case of a non-empty buffer
-  std::vector<char> buffer(15);
-  char* original_buffer = buffer.data();
-  binary.reset(new BinaryValue(std::move(buffer)));
+  std::unique_ptr<char[]> buffer(new char[15]);
+  char* original_buffer = buffer.get();
+  binary.reset(new BinaryValue(std::move(buffer), 15));
   ASSERT_TRUE(binary.get());
   ASSERT_TRUE(binary->GetBuffer());
   ASSERT_EQ(original_buffer, binary->GetBuffer());
@@ -459,17 +143,17 @@
 
 TEST(ValuesTest, StringValue) {
   // Test overloaded StringValue constructor.
-  std::unique_ptr<Value> narrow_value(new Value("narrow"));
+  std::unique_ptr<Value> narrow_value(new StringValue("narrow"));
   ASSERT_TRUE(narrow_value.get());
-  ASSERT_TRUE(narrow_value->IsType(Value::Type::STRING));
-  std::unique_ptr<Value> utf16_value(new Value(ASCIIToUTF16("utf16")));
+  ASSERT_TRUE(narrow_value->IsType(Value::TYPE_STRING));
+  std::unique_ptr<Value> utf16_value(new StringValue(ASCIIToUTF16("utf16")));
   ASSERT_TRUE(utf16_value.get());
-  ASSERT_TRUE(utf16_value->IsType(Value::Type::STRING));
+  ASSERT_TRUE(utf16_value->IsType(Value::TYPE_STRING));
 
   // Test overloaded GetAsString.
   std::string narrow = "http://google.com";
   string16 utf16 = ASCIIToUTF16("http://google.com");
-  const Value* string_value = NULL;
+  const StringValue* string_value = NULL;
   ASSERT_TRUE(narrow_value->GetAsString(&narrow));
   ASSERT_TRUE(narrow_value->GetAsString(&utf16));
   ASSERT_TRUE(narrow_value->GetAsString(&string_value));
@@ -487,23 +171,65 @@
   // Don't choke on NULL values.
   ASSERT_TRUE(narrow_value->GetAsString(static_cast<string16*>(NULL)));
   ASSERT_TRUE(narrow_value->GetAsString(static_cast<std::string*>(NULL)));
-  ASSERT_TRUE(narrow_value->GetAsString(static_cast<const Value**>(NULL)));
+  ASSERT_TRUE(narrow_value->GetAsString(
+                  static_cast<const StringValue**>(NULL)));
 }
 
+// This is a Value object that allows us to tell if it's been
+// properly deleted by modifying the value of external flag on destruction.
+class DeletionTestValue : public Value {
+ public:
+  explicit DeletionTestValue(bool* deletion_flag) : Value(TYPE_NULL) {
+    Init(deletion_flag);  // Separate function so that we can use ASSERT_*
+  }
+
+  void Init(bool* deletion_flag) {
+    ASSERT_TRUE(deletion_flag);
+    deletion_flag_ = deletion_flag;
+    *deletion_flag_ = false;
+  }
+
+  ~DeletionTestValue() override { *deletion_flag_ = true; }
+
+ private:
+  bool* deletion_flag_;
+};
+
 TEST(ValuesTest, ListDeletion) {
-  ListValue list;
-  list.Append(MakeUnique<Value>());
-  EXPECT_FALSE(list.empty());
-  list.Clear();
-  EXPECT_TRUE(list.empty());
+  bool deletion_flag = true;
+
+  {
+    ListValue list;
+    list.Append(WrapUnique(new DeletionTestValue(&deletion_flag)));
+    EXPECT_FALSE(deletion_flag);
+  }
+  EXPECT_TRUE(deletion_flag);
+
+  {
+    ListValue list;
+    list.Append(WrapUnique(new DeletionTestValue(&deletion_flag)));
+    EXPECT_FALSE(deletion_flag);
+    list.Clear();
+    EXPECT_TRUE(deletion_flag);
+  }
+
+  {
+    ListValue list;
+    list.Append(WrapUnique(new DeletionTestValue(&deletion_flag)));
+    EXPECT_FALSE(deletion_flag);
+    EXPECT_TRUE(list.Set(0, Value::CreateNullValue()));
+    EXPECT_TRUE(deletion_flag);
+  }
 }
 
 TEST(ValuesTest, ListRemoval) {
+  bool deletion_flag = true;
   std::unique_ptr<Value> removed_item;
 
   {
     ListValue list;
-    list.Append(MakeUnique<Value>());
+    list.Append(WrapUnique(new DeletionTestValue(&deletion_flag)));
+    EXPECT_FALSE(deletion_flag);
     EXPECT_EQ(1U, list.GetSize());
     EXPECT_FALSE(list.Remove(std::numeric_limits<size_t>::max(),
                              &removed_item));
@@ -512,55 +238,88 @@
     ASSERT_TRUE(removed_item);
     EXPECT_EQ(0U, list.GetSize());
   }
+  EXPECT_FALSE(deletion_flag);
   removed_item.reset();
+  EXPECT_TRUE(deletion_flag);
 
   {
     ListValue list;
-    list.Append(MakeUnique<Value>());
+    list.Append(WrapUnique(new DeletionTestValue(&deletion_flag)));
+    EXPECT_FALSE(deletion_flag);
     EXPECT_TRUE(list.Remove(0, NULL));
+    EXPECT_TRUE(deletion_flag);
     EXPECT_EQ(0U, list.GetSize());
   }
 
   {
     ListValue list;
-    auto value = MakeUnique<Value>();
-    Value* original_value = value.get();
+    std::unique_ptr<DeletionTestValue> value(
+        new DeletionTestValue(&deletion_flag));
+    DeletionTestValue* original_value = value.get();
     list.Append(std::move(value));
+    EXPECT_FALSE(deletion_flag);
     size_t index = 0;
     list.Remove(*original_value, &index);
     EXPECT_EQ(0U, index);
+    EXPECT_TRUE(deletion_flag);
     EXPECT_EQ(0U, list.GetSize());
   }
 }
 
 TEST(ValuesTest, DictionaryDeletion) {
   std::string key = "test";
-  DictionaryValue dict;
-  dict.Set(key, MakeUnique<Value>());
-  EXPECT_FALSE(dict.empty());
-  dict.Clear();
-  EXPECT_TRUE(dict.empty());
+  bool deletion_flag = true;
+
+  {
+    DictionaryValue dict;
+    dict.Set(key, WrapUnique(new DeletionTestValue(&deletion_flag)));
+    EXPECT_FALSE(deletion_flag);
+  }
+  EXPECT_TRUE(deletion_flag);
+
+  {
+    DictionaryValue dict;
+    dict.Set(key, WrapUnique(new DeletionTestValue(&deletion_flag)));
+    EXPECT_FALSE(deletion_flag);
+    dict.Clear();
+    EXPECT_TRUE(deletion_flag);
+  }
+
+  {
+    DictionaryValue dict;
+    dict.Set(key, WrapUnique(new DeletionTestValue(&deletion_flag)));
+    EXPECT_FALSE(deletion_flag);
+    dict.Set(key, Value::CreateNullValue());
+    EXPECT_TRUE(deletion_flag);
+  }
 }
 
 TEST(ValuesTest, DictionaryRemoval) {
   std::string key = "test";
+  bool deletion_flag = true;
   std::unique_ptr<Value> removed_item;
 
   {
     DictionaryValue dict;
-    dict.Set(key, MakeUnique<Value>());
+    dict.Set(key, WrapUnique(new DeletionTestValue(&deletion_flag)));
+    EXPECT_FALSE(deletion_flag);
     EXPECT_TRUE(dict.HasKey(key));
     EXPECT_FALSE(dict.Remove("absent key", &removed_item));
     EXPECT_TRUE(dict.Remove(key, &removed_item));
     EXPECT_FALSE(dict.HasKey(key));
     ASSERT_TRUE(removed_item);
   }
+  EXPECT_FALSE(deletion_flag);
+  removed_item.reset();
+  EXPECT_TRUE(deletion_flag);
 
   {
     DictionaryValue dict;
-    dict.Set(key, MakeUnique<Value>());
+    dict.Set(key, WrapUnique(new DeletionTestValue(&deletion_flag)));
+    EXPECT_FALSE(deletion_flag);
     EXPECT_TRUE(dict.HasKey(key));
     EXPECT_TRUE(dict.Remove(key, NULL));
+    EXPECT_TRUE(deletion_flag);
     EXPECT_FALSE(dict.HasKey(key));
   }
 }
@@ -584,7 +343,7 @@
   EXPECT_FALSE(dict.Get("this.isnt.expanded", &value3));
   Value* value4;
   ASSERT_TRUE(dict.GetWithoutPathExpansion("this.isnt.expanded", &value4));
-  EXPECT_EQ(Value::Type::NONE, value4->GetType());
+  EXPECT_EQ(Value::TYPE_NULL, value4->GetType());
 }
 
 // Tests the deprecated version of SetWithoutPathExpansion.
@@ -608,7 +367,7 @@
   EXPECT_FALSE(dict.Get("this.isnt.expanded", &value3));
   Value* value4;
   ASSERT_TRUE(dict.GetWithoutPathExpansion("this.isnt.expanded", &value4));
-  EXPECT_EQ(Value::Type::NONE, value4->GetType());
+  EXPECT_EQ(Value::TYPE_NULL, value4->GetType());
 }
 
 TEST(ValuesTest, DictionaryRemovePath) {
@@ -619,7 +378,7 @@
   std::unique_ptr<Value> removed_item;
   EXPECT_TRUE(dict.RemovePath("a.long.way.down", &removed_item));
   ASSERT_TRUE(removed_item);
-  EXPECT_TRUE(removed_item->IsType(base::Value::Type::INTEGER));
+  EXPECT_TRUE(removed_item->IsType(base::Value::TYPE_INTEGER));
   EXPECT_FALSE(dict.HasKey("a.long.way.down"));
   EXPECT_FALSE(dict.HasKey("a.long.way"));
   EXPECT_TRUE(dict.Get("a.long.key.path", NULL));
@@ -632,7 +391,7 @@
   removed_item.reset();
   EXPECT_TRUE(dict.RemovePath("a.long.key.path", &removed_item));
   ASSERT_TRUE(removed_item);
-  EXPECT_TRUE(removed_item->IsType(base::Value::Type::BOOLEAN));
+  EXPECT_TRUE(removed_item->IsType(base::Value::TYPE_BOOLEAN));
   EXPECT_TRUE(dict.empty());
 }
 
@@ -641,34 +400,38 @@
   std::unique_ptr<Value> scoped_null = Value::CreateNullValue();
   Value* original_null = scoped_null.get();
   original_dict.Set("null", std::move(scoped_null));
-  std::unique_ptr<Value> scoped_bool(new Value(true));
-  Value* original_bool = scoped_bool.get();
+  std::unique_ptr<FundamentalValue> scoped_bool(new FundamentalValue(true));
+  FundamentalValue* original_bool = scoped_bool.get();
   original_dict.Set("bool", std::move(scoped_bool));
-  std::unique_ptr<Value> scoped_int(new Value(42));
-  Value* original_int = scoped_int.get();
+  std::unique_ptr<FundamentalValue> scoped_int(new FundamentalValue(42));
+  FundamentalValue* original_int = scoped_int.get();
   original_dict.Set("int", std::move(scoped_int));
-  std::unique_ptr<Value> scoped_double(new Value(3.14));
-  Value* original_double = scoped_double.get();
+  std::unique_ptr<FundamentalValue> scoped_double(new FundamentalValue(3.14));
+  FundamentalValue* original_double = scoped_double.get();
   original_dict.Set("double", std::move(scoped_double));
-  std::unique_ptr<Value> scoped_string(new Value("hello"));
-  Value* original_string = scoped_string.get();
+  std::unique_ptr<StringValue> scoped_string(new StringValue("hello"));
+  StringValue* original_string = scoped_string.get();
   original_dict.Set("string", std::move(scoped_string));
-  std::unique_ptr<Value> scoped_string16(new Value(ASCIIToUTF16("hello16")));
-  Value* original_string16 = scoped_string16.get();
+  std::unique_ptr<StringValue> scoped_string16(
+      new StringValue(ASCIIToUTF16("hello16")));
+  StringValue* original_string16 = scoped_string16.get();
   original_dict.Set("string16", std::move(scoped_string16));
 
-  std::vector<char> original_buffer(42, '!');
+  std::unique_ptr<char[]> original_buffer(new char[42]);
+  memset(original_buffer.get(), '!', 42);
   std::unique_ptr<BinaryValue> scoped_binary(
-      new BinaryValue(std::move(original_buffer)));
+      new BinaryValue(std::move(original_buffer), 42));
   BinaryValue* original_binary = scoped_binary.get();
   original_dict.Set("binary", std::move(scoped_binary));
 
   std::unique_ptr<ListValue> scoped_list(new ListValue());
   Value* original_list = scoped_list.get();
-  std::unique_ptr<Value> scoped_list_element_0(new Value(0));
+  std::unique_ptr<FundamentalValue> scoped_list_element_0(
+      new FundamentalValue(0));
   Value* original_list_element_0 = scoped_list_element_0.get();
   scoped_list->Append(std::move(scoped_list_element_0));
-  std::unique_ptr<Value> scoped_list_element_1(new Value(1));
+  std::unique_ptr<FundamentalValue> scoped_list_element_1(
+      new FundamentalValue(1));
   Value* original_list_element_1 = scoped_list_element_1.get();
   scoped_list->Append(std::move(scoped_list_element_1));
   original_dict.Set("list", std::move(scoped_list));
@@ -687,13 +450,13 @@
   ASSERT_TRUE(copy_dict->Get("null", &copy_null));
   ASSERT_TRUE(copy_null);
   ASSERT_NE(copy_null, original_null);
-  ASSERT_TRUE(copy_null->IsType(Value::Type::NONE));
+  ASSERT_TRUE(copy_null->IsType(Value::TYPE_NULL));
 
   Value* copy_bool = NULL;
   ASSERT_TRUE(copy_dict->Get("bool", &copy_bool));
   ASSERT_TRUE(copy_bool);
   ASSERT_NE(copy_bool, original_bool);
-  ASSERT_TRUE(copy_bool->IsType(Value::Type::BOOLEAN));
+  ASSERT_TRUE(copy_bool->IsType(Value::TYPE_BOOLEAN));
   bool copy_bool_value = false;
   ASSERT_TRUE(copy_bool->GetAsBoolean(&copy_bool_value));
   ASSERT_TRUE(copy_bool_value);
@@ -702,7 +465,7 @@
   ASSERT_TRUE(copy_dict->Get("int", &copy_int));
   ASSERT_TRUE(copy_int);
   ASSERT_NE(copy_int, original_int);
-  ASSERT_TRUE(copy_int->IsType(Value::Type::INTEGER));
+  ASSERT_TRUE(copy_int->IsType(Value::TYPE_INTEGER));
   int copy_int_value = 0;
   ASSERT_TRUE(copy_int->GetAsInteger(&copy_int_value));
   ASSERT_EQ(42, copy_int_value);
@@ -711,7 +474,7 @@
   ASSERT_TRUE(copy_dict->Get("double", &copy_double));
   ASSERT_TRUE(copy_double);
   ASSERT_NE(copy_double, original_double);
-  ASSERT_TRUE(copy_double->IsType(Value::Type::DOUBLE));
+  ASSERT_TRUE(copy_double->IsType(Value::TYPE_DOUBLE));
   double copy_double_value = 0;
   ASSERT_TRUE(copy_double->GetAsDouble(&copy_double_value));
   ASSERT_EQ(3.14, copy_double_value);
@@ -720,7 +483,7 @@
   ASSERT_TRUE(copy_dict->Get("string", &copy_string));
   ASSERT_TRUE(copy_string);
   ASSERT_NE(copy_string, original_string);
-  ASSERT_TRUE(copy_string->IsType(Value::Type::STRING));
+  ASSERT_TRUE(copy_string->IsType(Value::TYPE_STRING));
   std::string copy_string_value;
   string16 copy_string16_value;
   ASSERT_TRUE(copy_string->GetAsString(&copy_string_value));
@@ -732,7 +495,7 @@
   ASSERT_TRUE(copy_dict->Get("string16", &copy_string16));
   ASSERT_TRUE(copy_string16);
   ASSERT_NE(copy_string16, original_string16);
-  ASSERT_TRUE(copy_string16->IsType(Value::Type::STRING));
+  ASSERT_TRUE(copy_string16->IsType(Value::TYPE_STRING));
   ASSERT_TRUE(copy_string16->GetAsString(&copy_string_value));
   ASSERT_TRUE(copy_string16->GetAsString(&copy_string16_value));
   ASSERT_EQ(std::string("hello16"), copy_string_value);
@@ -742,17 +505,20 @@
   ASSERT_TRUE(copy_dict->Get("binary", &copy_binary));
   ASSERT_TRUE(copy_binary);
   ASSERT_NE(copy_binary, original_binary);
-  ASSERT_TRUE(copy_binary->IsType(Value::Type::BINARY));
-  ASSERT_NE(original_binary->GetBuffer(), copy_binary->GetBuffer());
-  ASSERT_EQ(original_binary->GetSize(), copy_binary->GetSize());
-  ASSERT_EQ(0, memcmp(original_binary->GetBuffer(), copy_binary->GetBuffer(),
-                      original_binary->GetSize()));
+  ASSERT_TRUE(copy_binary->IsType(Value::TYPE_BINARY));
+  ASSERT_NE(original_binary->GetBuffer(),
+    static_cast<BinaryValue*>(copy_binary)->GetBuffer());
+  ASSERT_EQ(original_binary->GetSize(),
+    static_cast<BinaryValue*>(copy_binary)->GetSize());
+  ASSERT_EQ(0, memcmp(original_binary->GetBuffer(),
+               static_cast<BinaryValue*>(copy_binary)->GetBuffer(),
+               original_binary->GetSize()));
 
   Value* copy_value = NULL;
   ASSERT_TRUE(copy_dict->Get("list", &copy_value));
   ASSERT_TRUE(copy_value);
   ASSERT_NE(copy_value, original_list);
-  ASSERT_TRUE(copy_value->IsType(Value::Type::LIST));
+  ASSERT_TRUE(copy_value->IsType(Value::TYPE_LIST));
   ListValue* copy_list = NULL;
   ASSERT_TRUE(copy_value->GetAsList(&copy_list));
   ASSERT_TRUE(copy_list);
@@ -778,7 +544,7 @@
   ASSERT_TRUE(copy_dict->Get("dictionary", &copy_value));
   ASSERT_TRUE(copy_value);
   ASSERT_NE(copy_value, original_nested_dictionary);
-  ASSERT_TRUE(copy_value->IsType(Value::Type::DICTIONARY));
+  ASSERT_TRUE(copy_value->IsType(Value::TYPE_DICTIONARY));
   DictionaryValue* copy_nested_dictionary = NULL;
   ASSERT_TRUE(copy_value->GetAsDictionary(&copy_nested_dictionary));
   ASSERT_TRUE(copy_nested_dictionary);
@@ -791,7 +557,7 @@
   EXPECT_NE(null1.get(), null2.get());
   EXPECT_TRUE(null1->Equals(null2.get()));
 
-  Value boolean(false);
+  FundamentalValue boolean(false);
   EXPECT_FALSE(null1->Equals(&boolean));
 
   DictionaryValue dv;
@@ -816,7 +582,7 @@
   copy->Set("f", std::move(list_copy));
   EXPECT_TRUE(dv.Equals(copy.get()));
 
-  original_list->Append(MakeUnique<Value>(true));
+  original_list->Append(WrapUnique(new FundamentalValue(true)));
   EXPECT_FALSE(dv.Equals(copy.get()));
 
   // Check if Equals detects differences in only the keys.
@@ -833,9 +599,9 @@
   EXPECT_TRUE(Value::Equals(null1.get(), null2.get()));
   EXPECT_TRUE(Value::Equals(NULL, NULL));
 
-  std::unique_ptr<Value> i42(new Value(42));
-  std::unique_ptr<Value> j42(new Value(42));
-  std::unique_ptr<Value> i17(new Value(17));
+  std::unique_ptr<Value> i42(new FundamentalValue(42));
+  std::unique_ptr<Value> j42(new FundamentalValue(42));
+  std::unique_ptr<Value> i17(new FundamentalValue(17));
   EXPECT_TRUE(Value::Equals(i42.get(), i42.get()));
   EXPECT_TRUE(Value::Equals(j42.get(), i42.get()));
   EXPECT_TRUE(Value::Equals(i42.get(), j42.get()));
@@ -855,33 +621,37 @@
   std::unique_ptr<Value> scoped_null(Value::CreateNullValue());
   Value* original_null = scoped_null.get();
   original_dict.Set("null", std::move(scoped_null));
-  std::unique_ptr<Value> scoped_bool(new Value(true));
+  std::unique_ptr<FundamentalValue> scoped_bool(new FundamentalValue(true));
   Value* original_bool = scoped_bool.get();
   original_dict.Set("bool", std::move(scoped_bool));
-  std::unique_ptr<Value> scoped_int(new Value(42));
+  std::unique_ptr<FundamentalValue> scoped_int(new FundamentalValue(42));
   Value* original_int = scoped_int.get();
   original_dict.Set("int", std::move(scoped_int));
-  std::unique_ptr<Value> scoped_double(new Value(3.14));
+  std::unique_ptr<FundamentalValue> scoped_double(new FundamentalValue(3.14));
   Value* original_double = scoped_double.get();
   original_dict.Set("double", std::move(scoped_double));
-  std::unique_ptr<Value> scoped_string(new Value("hello"));
+  std::unique_ptr<StringValue> scoped_string(new StringValue("hello"));
   Value* original_string = scoped_string.get();
   original_dict.Set("string", std::move(scoped_string));
-  std::unique_ptr<Value> scoped_string16(new Value(ASCIIToUTF16("hello16")));
+  std::unique_ptr<StringValue> scoped_string16(
+      new StringValue(ASCIIToUTF16("hello16")));
   Value* original_string16 = scoped_string16.get();
   original_dict.Set("string16", std::move(scoped_string16));
 
-  std::vector<char> original_buffer(42, '!');
+  std::unique_ptr<char[]> original_buffer(new char[42]);
+  memset(original_buffer.get(), '!', 42);
   std::unique_ptr<BinaryValue> scoped_binary(
-      new BinaryValue(std::move(original_buffer)));
+      new BinaryValue(std::move(original_buffer), 42));
   Value* original_binary = scoped_binary.get();
   original_dict.Set("binary", std::move(scoped_binary));
 
   std::unique_ptr<ListValue> scoped_list(new ListValue());
   Value* original_list = scoped_list.get();
-  std::unique_ptr<Value> scoped_list_element_0(new Value(0));
+  std::unique_ptr<FundamentalValue> scoped_list_element_0(
+      new FundamentalValue(0));
   scoped_list->Append(std::move(scoped_list_element_0));
-  std::unique_ptr<Value> scoped_list_element_1(new Value(1));
+  std::unique_ptr<FundamentalValue> scoped_list_element_1(
+      new FundamentalValue(1));
   scoped_list->Append(std::move(scoped_list_element_1));
   original_dict.Set("list", std::move(scoped_list));
 
@@ -969,7 +739,7 @@
   {
     std::unique_ptr<ListValue> inner(new ListValue);
     std::unique_ptr<ListValue> inner2(new ListValue);
-    inner2->Append(MakeUnique<Value>("hello"));
+    inner2->Append(WrapUnique(new StringValue("hello")));
     inner->Append(WrapUnique(new DictionaryValue));
     inner->Append(std::move(inner2));
     root->Set("list_with_empty_children", std::move(inner));
@@ -1067,7 +837,7 @@
     ADD_FAILURE();
   }
 
-  Value value1("value1");
+  StringValue value1("value1");
   dict.Set("key1", value1.CreateDeepCopy());
   bool seen1 = false;
   for (DictionaryValue::Iterator it(dict); !it.IsAtEnd(); it.Advance()) {
@@ -1078,7 +848,7 @@
   }
   EXPECT_TRUE(seen1);
 
-  Value value2("value2");
+  StringValue value2("value2");
   dict.Set("key2", value2.CreateDeepCopy());
   bool seen2 = seen1 = false;
   for (DictionaryValue::Iterator it(dict); !it.IsAtEnd(); it.Advance()) {
@@ -1104,11 +874,11 @@
   DictionaryValue main_dict;
   ListValue main_list;
 
-  Value bool_value(false);
-  Value int_value(1234);
-  Value double_value(12.34567);
-  Value string_value("foo");
-  BinaryValue binary_value(Value::Type::BINARY);
+  FundamentalValue bool_value(false);
+  FundamentalValue int_value(1234);
+  FundamentalValue double_value(12.34567);
+  StringValue string_value("foo");
+  BinaryValue binary_value;
   DictionaryValue dict_value;
   ListValue list_value;
 
diff --git a/base/version.cc b/base/version.cc
index ca97a84..02213fb 100644
--- a/base/version.cc
+++ b/base/version.cc
@@ -93,8 +93,6 @@
   components_.swap(parsed);
 }
 
-Version::Version(std::vector<uint32_t> components) : components_(components) {}
-
 bool Version::IsValid() const {
   return (!components_.empty());
 }
diff --git a/base/version.h b/base/version.h
index b3a0956..25b570a 100644
--- a/base/version.h
+++ b/base/version.h
@@ -25,17 +25,13 @@
 
   Version(const Version& other);
 
+  ~Version();
+
   // Initializes from a decimal dotted version number, like "0.1.1".
   // Each component is limited to a uint16_t. Call IsValid() to learn
   // the outcome.
   explicit Version(const std::string& version_str);
 
-  // Initializes from a vector of components, like {1, 2, 3, 4}. Call IsValid()
-  // to learn the outcome.
-  explicit Version(std::vector<uint32_t> components);
-
-  ~Version();
-
   // Returns true if the object contains a valid version number.
   bool IsValid() const;
 
@@ -73,4 +69,8 @@
 
 }  // namespace base
 
+// TODO(xhwang) remove this when all users are updated to explicitly use the
+// namespace
+using base::Version;
+
 #endif  // BASE_VERSION_H_
diff --git a/base/version_unittest.cc b/base/version_unittest.cc
index 4ca784f..5d9ea99 100644
--- a/base/version_unittest.cc
+++ b/base/version_unittest.cc
@@ -6,7 +6,6 @@
 
 #include <stddef.h>
 #include <stdint.h>
-#include <utility>
 
 #include "base/macros.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -14,17 +13,17 @@
 namespace {
 
 TEST(VersionTest, DefaultConstructor) {
-  base::Version v;
+  Version v;
   EXPECT_FALSE(v.IsValid());
 }
 
 TEST(VersionTest, ValueSemantics) {
-  base::Version v1("1.2.3.4");
+  Version v1("1.2.3.4");
   EXPECT_TRUE(v1.IsValid());
-  base::Version v3;
+  Version v3;
   EXPECT_FALSE(v3.IsValid());
   {
-    base::Version v2(v1);
+    Version v2(v1);
     v3 = v2;
     EXPECT_TRUE(v2.IsValid());
     EXPECT_EQ(v1, v2);
@@ -32,14 +31,6 @@
   EXPECT_EQ(v3, v1);
 }
 
-TEST(VersionTest, MoveSemantics) {
-  const std::vector<uint32_t> components = {1, 2, 3, 4};
-  base::Version v1(std::move(components));
-  EXPECT_TRUE(v1.IsValid());
-  base::Version v2("1.2.3.4");
-  EXPECT_EQ(v1, v2);
-}
-
 TEST(VersionTest, GetVersionFromString) {
   static const struct version_string {
     const char* input;
@@ -76,7 +67,7 @@
   };
 
   for (size_t i = 0; i < arraysize(cases); ++i) {
-    base::Version version(cases[i].input);
+    Version version(cases[i].input);
     EXPECT_EQ(cases[i].success, version.IsValid());
     if (cases[i].success) {
       EXPECT_EQ(cases[i].parts, version.components().size());
@@ -105,8 +96,8 @@
     {"11.0.10", "15.5.28.130162", -1},
   };
   for (size_t i = 0; i < arraysize(cases); ++i) {
-    base::Version lhs(cases[i].lhs);
-    base::Version rhs(cases[i].rhs);
+    Version lhs(cases[i].lhs);
+    Version rhs(cases[i].rhs);
     EXPECT_EQ(lhs.CompareTo(rhs), cases[i].expected) <<
         cases[i].lhs << " ? " << cases[i].rhs;
 
@@ -161,7 +152,7 @@
     {"1.2.0.0.0.0", "1.2.*", 0},
   };
   for (size_t i = 0; i < arraysize(cases); ++i) {
-    const base::Version version(cases[i].lhs);
+    const Version version(cases[i].lhs);
     const int result = version.CompareToWildcardString(cases[i].rhs);
     EXPECT_EQ(result, cases[i].expected) << cases[i].lhs << "?" << cases[i].rhs;
   }
@@ -185,7 +176,7 @@
     {"*.2", false},
   };
   for (size_t i = 0; i < arraysize(cases); ++i) {
-    EXPECT_EQ(base::Version::IsValidWildcardString(cases[i].version),
+    EXPECT_EQ(Version::IsValidWildcardString(cases[i].version),
         cases[i].expected) << cases[i].version << "?" << cases[i].expected;
   }
 }
diff --git a/base/win/scoped_comptr.h b/base/win/scoped_comptr.h
index 9442672..5ce60e2 100644
--- a/base/win/scoped_comptr.h
+++ b/base/win/scoped_comptr.h
@@ -51,7 +51,7 @@
   // Explicit Release() of the held object.  Useful for reuse of the
   // ScopedComPtr instance.
   // Note that this function equates to IUnknown::Release and should not
-  // be confused with e.g. unique_ptr::release().
+  // be confused with e.g. scoped_ptr::release().
   void Release() {
     if (this->ptr_ != NULL) {
       this->ptr_->Release();
diff --git a/base/win/scoped_handle_test_dll.cc b/base/win/scoped_handle_test_dll.cc
index 0d70c0b..c72e459 100644
--- a/base/win/scoped_handle_test_dll.cc
+++ b/base/win/scoped_handle_test_dll.cc
@@ -66,7 +66,7 @@
   ::CloseHandle(ready_event);
 
   if (threads_.size() != kNumThreads) {
-    for (auto* thread : threads_)
+    for (const auto& thread : threads_)
       ::CloseHandle(thread);
     ::CloseHandle(start_event);
     return false;
@@ -74,7 +74,7 @@
 
   ::SetEvent(start_event);
   ::CloseHandle(start_event);
-  for (auto* thread : threads_) {
+  for (const auto& thread : threads_) {
     ::WaitForSingleObject(thread, INFINITE);
     ::CloseHandle(thread);
   }
diff --git a/base/win/scoped_hdc.h b/base/win/scoped_hdc.h
index 890e34a..fa686dd 100644
--- a/base/win/scoped_hdc.h
+++ b/base/win/scoped_hdc.h
@@ -7,7 +7,6 @@
 
 #include <windows.h>
 
-#include "base/debug/gdi_debug_util_win.h"
 #include "base/logging.h"
 #include "base/macros.h"
 #include "base/win/scoped_handle.h"
@@ -29,8 +28,7 @@
       // If GetDC(NULL) returns NULL, something really bad has happened, like
       // GDI handle exhaustion.  In this case Chrome is going to behave badly no
       // matter what, so we may as well just force a crash now.
-      if (!hdc_)
-        base::debug::CollectGDIUsageAndDie();
+      CHECK(hdc_);
     }
   }
 
diff --git a/components/timers/alarm_timer_chromeos.cc b/components/timers/alarm_timer_chromeos.cc
index 601b411..3f1abbf 100644
--- a/components/timers/alarm_timer_chromeos.cc
+++ b/components/timers/alarm_timer_chromeos.cc
@@ -6,146 +6,450 @@
 
 #include <stdint.h>
 #include <sys/timerfd.h>
-
-#include <algorithm>
 #include <utility>
 
 #include "base/bind.h"
-#include "base/debug/task_annotator.h"
+#include "base/bind_helpers.h"
 #include "base/files/file_util.h"
+#include "base/lazy_instance.h"
 #include "base/logging.h"
-#include "base/memory/ptr_util.h"
+#include "base/macros.h"
+#include "base/message_loop/message_loop.h"
 #include "base/pending_task.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "base/trace_event/trace_event.h"
 
 namespace timers {
+namespace {
+// This class represents the IO thread that the AlarmTimer::Delegate may use for
+// watching file descriptors if it gets called from a thread that does not have
+// a MessageLoopForIO.  It is a lazy global instance because it may not always
+// be necessary.
+class RtcAlarmIOThread : public base::Thread {
+ public:
+  RtcAlarmIOThread() : Thread("RTC Alarm IO Thread") {
+    CHECK(
+        StartWithOptions(base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
+  }
+  ~RtcAlarmIOThread() override { Stop(); }
+};
 
-AlarmTimer::AlarmTimer(bool retain_user_task, bool is_repeating)
-    : base::Timer(retain_user_task, is_repeating),
-      alarm_fd_(timerfd_create(CLOCK_REALTIME_ALARM, 0)),
-      weak_factory_(this) {}
+base::LazyInstance<RtcAlarmIOThread> g_io_thread = LAZY_INSTANCE_INITIALIZER;
 
-AlarmTimer::~AlarmTimer() {
-  DCHECK(origin_task_runner_->RunsTasksOnCurrentThread());
-  Stop();
+}  // namespace
+
+// Watches a MessageLoop and runs a callback if that MessageLoop will be
+// destroyed.
+class AlarmTimer::MessageLoopObserver
+    : public base::MessageLoop::DestructionObserver {
+ public:
+  // Constructs a MessageLoopObserver that will observe |message_loop| and will
+  // call |on_will_be_destroyed_callback| when |message_loop| is about to be
+  // destroyed.
+  MessageLoopObserver(base::MessageLoop* message_loop,
+                      base::Closure on_will_be_destroyed_callback)
+      : message_loop_(message_loop),
+        on_will_be_destroyed_callback_(on_will_be_destroyed_callback) {
+    DCHECK(message_loop_);
+    message_loop_->AddDestructionObserver(this);
+  }
+
+  ~MessageLoopObserver() override {
+    // If |message_loop_| was destroyed, then this class will have already
+    // unregistered itself.  Doing it again will trigger a warning.
+    if (message_loop_)
+      message_loop_->RemoveDestructionObserver(this);
+  }
+
+  // base::MessageLoop::DestructionObserver override.
+  void WillDestroyCurrentMessageLoop() override {
+    message_loop_->RemoveDestructionObserver(this);
+    message_loop_ = NULL;
+
+    on_will_be_destroyed_callback_.Run();
+  }
+
+ private:
+  // The MessageLoop that this class should watch.  Is a weak pointer.
+  base::MessageLoop* message_loop_;
+
+  // The callback to run when |message_loop_| will be destroyed.
+  base::Closure on_will_be_destroyed_callback_;
+
+  DISALLOW_COPY_AND_ASSIGN(MessageLoopObserver);
+};
+
+// This class manages a Real Time Clock (RTC) alarm, a feature that is available
+// from linux version 3.11 onwards.  It creates a file descriptor for the RTC
+// alarm timer and then watches that file descriptor to see when it can be read
+// without blocking, indicating that the timer has fired.
+//
+// A major problem for this class is that watching file descriptors is only
+// available on a MessageLoopForIO but there is no guarantee the timer is going
+// to be created on one.  To get around this, the timer has a dedicated thread
+// with a MessageLoopForIO that posts tasks back to the thread that started the
+// timer.
+class AlarmTimer::Delegate
+    : public base::RefCountedThreadSafe<AlarmTimer::Delegate>,
+      public base::MessageLoopForIO::Watcher {
+ public:
+  // Construct a Delegate for the AlarmTimer.  It should be safe to call
+  // |on_timer_fired_callback| multiple times.
+  explicit Delegate(base::Closure on_timer_fired_callback);
+
+  // Returns true if the system timer managed by this delegate is capable of
+  // waking the system from suspend.
+  bool CanWakeFromSuspend();
+
+  // Resets the timer to fire after |delay| has passed.  Cancels any
+  // pre-existing delay.
+  void Reset(base::TimeDelta delay);
+
+  // Stops the currently running timer.  It should be safe to call this even if
+  // the timer is not running.
+  void Stop();
+
+  // Sets a hook that will be called when the timer fires and a task has been
+  // queued on |origin_task_runner_|.  Used by tests to wait until a task is
+  // pending in the MessageLoop.
+  void SetTimerFiredCallbackForTest(base::Closure test_callback);
+
+  // base::MessageLoopForIO::Watcher overrides.
+  void OnFileCanReadWithoutBlocking(int fd) override;
+  void OnFileCanWriteWithoutBlocking(int fd) override;
+
+ private:
+  friend class base::RefCountedThreadSafe<Delegate>;
+  ~Delegate() override;
+
+  // Actually performs the system calls to set up the timer.  This must be
+  // called on a MessageLoopForIO.
+  void ResetImpl(base::TimeDelta delay, int reset_sequence_number);
+
+  // Callback that is run when the timer fires.  Must be run on
+  // |origin_task_runner_|.
+  void OnTimerFired(int reset_sequence_number);
+
+  // File descriptor associated with the alarm timer.
+  int alarm_fd_;
+
+  // Task runner which initially started the timer.
+  scoped_refptr<base::SingleThreadTaskRunner> origin_task_runner_;
+
+  // Callback that should be run when the timer fires.
+  base::Closure on_timer_fired_callback_;
+
+  // Hook used by tests to be notified when the timer has fired and a task has
+  // been queued in the MessageLoop.
+  base::Closure on_timer_fired_callback_for_test_;
+
+  // Manages watching file descriptors.
+  std::unique_ptr<base::MessageLoopForIO::FileDescriptorWatcher> fd_watcher_;
+
+  // The sequence numbers of the last Reset() call handled respectively on
+  // |origin_task_runner_| and on the MessageLoopForIO used for watching the
+  // timer file descriptor.  Note that these can be the same MessageLoop.
+  // OnTimerFired() runs |on_timer_fired_callback_| only if the sequence number
+  // it receives from the MessageLoopForIO matches
+  // |origin_reset_sequence_number_|.
+  int origin_reset_sequence_number_;
+  int io_reset_sequence_number_;
+
+  DISALLOW_COPY_AND_ASSIGN(Delegate);
+};
+
+AlarmTimer::Delegate::Delegate(base::Closure on_timer_fired_callback)
+    : alarm_fd_(timerfd_create(CLOCK_REALTIME_ALARM, 0)),
+      on_timer_fired_callback_(on_timer_fired_callback),
+      origin_reset_sequence_number_(0),
+      io_reset_sequence_number_(0) {
+  // The call to timerfd_create above may fail.  This is the only indication
+  // that CLOCK_REALTIME_ALARM is not supported on this system.
+  DPLOG_IF(INFO, (alarm_fd_ == -1))
+      << "CLOCK_REALTIME_ALARM not supported on this system";
 }
 
-void AlarmTimer::Stop() {
-  DCHECK(origin_task_runner_->RunsTasksOnCurrentThread());
+AlarmTimer::Delegate::~Delegate() {
+  if (alarm_fd_ != -1)
+    close(alarm_fd_);
+}
 
-  if (!base::Timer::is_running())
-    return;
+bool AlarmTimer::Delegate::CanWakeFromSuspend() {
+  return alarm_fd_ != -1;
+}
 
-  if (!CanWakeFromSuspend()) {
-    base::Timer::Stop();
+void AlarmTimer::Delegate::Reset(base::TimeDelta delay) {
+  // Get a task runner for the current message loop.  When the timer fires, we
+  // will
+  // post tasks to this proxy to let the parent timer know.
+  origin_task_runner_ = base::ThreadTaskRunnerHandle::Get();
+
+  // Increment the sequence number.  Used to invalidate any events that have
+  // been queued but not yet run since the last time Reset() was called.
+  origin_reset_sequence_number_++;
+
+  // Calling timerfd_settime with a zero delay actually clears the timer so if
+  // the user has requested a zero delay timer, we need to handle it
+  // differently.  We queue the task here but we still go ahead and call
+  // timerfd_settime with the zero delay anyway to cancel any previous delay
+  // that might have been programmed.
+  if (delay <= base::TimeDelta::FromMicroseconds(0)) {
+    // The timerfd_settime documentation is vague on what happens when it is
+    // passed a negative delay.  We can sidestep the issue by ensuring that
+    // the delay is 0.
+    delay = base::TimeDelta::FromMicroseconds(0);
+    origin_task_runner_->PostTask(
+        FROM_HERE,
+        base::Bind(&Delegate::OnTimerFired, scoped_refptr<Delegate>(this),
+                   origin_reset_sequence_number_));
+  }
+
+  // Run ResetImpl() on a MessageLoopForIO.
+  if (base::MessageLoopForIO::IsCurrent()) {
+    ResetImpl(delay, origin_reset_sequence_number_);
+  } else {
+    g_io_thread.Pointer()->task_runner()->PostTask(
+        FROM_HERE,
+        base::Bind(&Delegate::ResetImpl, scoped_refptr<Delegate>(this), delay,
+                   origin_reset_sequence_number_));
+  }
+}
+
+void AlarmTimer::Delegate::Stop() {
+  // Stop the RTC from a MessageLoopForIO.
+  if (!base::MessageLoopForIO::IsCurrent()) {
+    g_io_thread.Pointer()->task_runner()->PostTask(
+        FROM_HERE, base::Bind(&Delegate::Stop, scoped_refptr<Delegate>(this)));
     return;
   }
 
-  // Cancel any previous callbacks.
-  weak_factory_.InvalidateWeakPtrs();
+  // Stop watching for events.
+  fd_watcher_.reset();
 
-  base::Timer::set_is_running(false);
-  alarm_fd_watcher_.reset();
-  pending_task_.reset();
-
-  if (!base::Timer::retain_user_task())
-    base::Timer::set_user_task(base::Closure());
+  // Now clear the timer.
+  DCHECK_NE(alarm_fd_, -1);
+#if defined(ANDROID)
+  itimerspec blank_time;
+  memset(&blank_time, 0, sizeof(blank_time));
+#else
+  itimerspec blank_time = {};
+#endif  // defined(ANDROID)
+  if (timerfd_settime(alarm_fd_, 0, &blank_time, NULL) < 0)
+    PLOG(ERROR) << "Unable to clear alarm time.  Timer may still fire.";
 }
 
-void AlarmTimer::Reset() {
-  DCHECK(origin_task_runner_->RunsTasksOnCurrentThread());
-  DCHECK(!base::Timer::user_task().is_null());
+void AlarmTimer::Delegate::OnFileCanReadWithoutBlocking(int fd) {
+  DCHECK_EQ(alarm_fd_, fd);
 
-  if (!CanWakeFromSuspend()) {
-    base::Timer::Reset();
-    return;
+  // Read from the fd to ack the event.
+  char val[sizeof(uint64_t)];
+  if (!base::ReadFromFD(alarm_fd_, val, sizeof(uint64_t)))
+    PLOG(DFATAL) << "Unable to read from timer file descriptor.";
+
+  // Make sure that the parent timer is informed on the proper message loop.
+  if (origin_task_runner_->RunsTasksOnCurrentThread()) {
+    OnTimerFired(io_reset_sequence_number_);
+  } else {
+    origin_task_runner_->PostTask(
+        FROM_HERE,
+        base::Bind(&Delegate::OnTimerFired, scoped_refptr<Delegate>(this),
+                   io_reset_sequence_number_));
+  }
+}
+
+void AlarmTimer::Delegate::OnFileCanWriteWithoutBlocking(int /*fd*/) {
+  NOTREACHED();
+}
+
+void AlarmTimer::Delegate::SetTimerFiredCallbackForTest(
+    base::Closure test_callback) {
+  on_timer_fired_callback_for_test_ = test_callback;
+}
+
+void AlarmTimer::Delegate::ResetImpl(base::TimeDelta delay,
+                                     int reset_sequence_number) {
+  DCHECK(base::MessageLoopForIO::IsCurrent());
+  DCHECK_NE(alarm_fd_, -1);
+
+  // Store the sequence number in the IO thread variable.  When the timer
+  // fires, we will bind this value to the OnTimerFired callback to ensure
+  // that we do the right thing if the timer gets reset.
+  io_reset_sequence_number_ = reset_sequence_number;
+
+  // If we were already watching the fd, this will stop watching it.
+  fd_watcher_.reset(new base::MessageLoopForIO::FileDescriptorWatcher);
+
+  // Start watching the fd to see when the timer fires.
+  if (!base::MessageLoopForIO::current()->WatchFileDescriptor(
+          alarm_fd_, false, base::MessageLoopForIO::WATCH_READ,
+          fd_watcher_.get(), this)) {
+    LOG(ERROR) << "Error while attempting to watch file descriptor for RTC "
+               << "alarm.  Timer will not fire.";
   }
 
-  // Cancel any previous callbacks and stop watching |alarm_fd_|.
-  weak_factory_.InvalidateWeakPtrs();
-  alarm_fd_watcher_.reset();
-
-  // Ensure that the delay is not negative.
-  const base::TimeDelta delay =
-      std::max(base::TimeDelta(), base::Timer::GetCurrentDelay());
-
-  // Set up the pending task.
-  base::Timer::set_desired_run_time(
-      delay.is_zero() ? base::TimeTicks() : base::TimeTicks::Now() + delay);
-  pending_task_ = base::MakeUnique<base::PendingTask>(
-      base::Timer::posted_from(), base::Timer::user_task(),
-      base::Timer::desired_run_time(), true /* nestable */);
-
-  // Set |alarm_fd_| to be signaled when the delay expires. If the delay is
-  // zero, |alarm_fd_| will never be signaled. This overrides the previous
-  // delay, if any.
+  // Actually set the timer.  This will also clear the pre-existing timer, if
+  // any.
+#if defined(ANDROID)
+  itimerspec alarm_time;
+  memset(&alarm_time, 0, sizeof(alarm_time));
+#else
   itimerspec alarm_time = {};
+#endif  // defined(ANDROID)
   alarm_time.it_value.tv_sec = delay.InSeconds();
   alarm_time.it_value.tv_nsec =
       (delay.InMicroseconds() % base::Time::kMicrosecondsPerSecond) *
       base::Time::kNanosecondsPerMicrosecond;
   if (timerfd_settime(alarm_fd_, 0, &alarm_time, NULL) < 0)
     PLOG(ERROR) << "Error while setting alarm time.  Timer will not fire";
-
-  // The timer is running.
-  base::Timer::set_is_running(true);
-
-  // If the delay is zero, post the task now.
-  if (delay.is_zero()) {
-    origin_task_runner_->PostTask(
-        FROM_HERE,
-        base::Bind(&AlarmTimer::OnTimerFired, weak_factory_.GetWeakPtr()));
-  } else {
-    // Otherwise, if the delay is not zero, generate a tracing event to indicate
-    // that the task was posted and watch |alarm_fd_|.
-    base::debug::TaskAnnotator().DidQueueTask("AlarmTimer::Reset",
-                                              *pending_task_);
-    alarm_fd_watcher_ = base::FileDescriptorWatcher::WatchReadable(
-        alarm_fd_, base::Bind(&AlarmTimer::OnAlarmFdReadableWithoutBlocking,
-                              weak_factory_.GetWeakPtr()));
-  }
 }
 
-void AlarmTimer::OnAlarmFdReadableWithoutBlocking() {
+void AlarmTimer::Delegate::OnTimerFired(int reset_sequence_number) {
   DCHECK(origin_task_runner_->RunsTasksOnCurrentThread());
-  DCHECK(base::Timer::IsRunning());
 
-  // Read from |alarm_fd_| to ack the event.
-  char val[sizeof(uint64_t)];
-  if (!base::ReadFromFD(alarm_fd_, val, sizeof(uint64_t)))
-    PLOG(DFATAL) << "Unable to read from timer file descriptor.";
+  // If a test wants to be notified when this function is about to run, then
+  // re-queue this task in the MessageLoop and run the test's callback.
+  if (!on_timer_fired_callback_for_test_.is_null()) {
+    origin_task_runner_->PostTask(
+        FROM_HERE,
+        base::Bind(&Delegate::OnTimerFired, scoped_refptr<Delegate>(this),
+                   reset_sequence_number));
 
-  OnTimerFired();
+    on_timer_fired_callback_for_test_.Run();
+    on_timer_fired_callback_for_test_.Reset();
+    return;
+  }
+
+  // Check to make sure that the timer was not reset in the time between when
+  // this task was queued to run and now.  If it was reset, then don't do
+  // anything.
+  if (reset_sequence_number != origin_reset_sequence_number_)
+    return;
+
+  on_timer_fired_callback_.Run();
+}
+
+AlarmTimer::AlarmTimer(bool retain_user_task, bool is_repeating)
+    : base::Timer(retain_user_task, is_repeating),
+      can_wake_from_suspend_(false),
+      origin_message_loop_(NULL),
+      weak_factory_(this) {
+  Init();
+}
+
+AlarmTimer::AlarmTimer(const tracked_objects::Location& posted_from,
+                       base::TimeDelta delay,
+                       const base::Closure& user_task,
+                       bool is_repeating)
+    : base::Timer(posted_from, delay, user_task, is_repeating),
+      can_wake_from_suspend_(false),
+      origin_message_loop_(NULL),
+      weak_factory_(this) {
+  Init();
+}
+
+AlarmTimer::~AlarmTimer() {
+  Stop();
+}
+
+void AlarmTimer::SetTimerFiredCallbackForTest(base::Closure test_callback) {
+  delegate_->SetTimerFiredCallbackForTest(test_callback);
+}
+
+void AlarmTimer::Init() {
+  delegate_ = make_scoped_refptr(new AlarmTimer::Delegate(
+      base::Bind(&AlarmTimer::OnTimerFired, weak_factory_.GetWeakPtr())));
+  can_wake_from_suspend_ = delegate_->CanWakeFromSuspend();
+}
+
+void AlarmTimer::Stop() {
+  if (!base::Timer::is_running())
+    return;
+
+  if (!can_wake_from_suspend_) {
+    base::Timer::Stop();
+    return;
+  }
+
+  // Clear the running flag, stop the delegate, and delete the pending task.
+  base::Timer::set_is_running(false);
+  delegate_->Stop();
+  pending_task_.reset();
+
+  // Stop watching |origin_message_loop_|.
+  origin_message_loop_ = NULL;
+  message_loop_observer_.reset();
+
+  if (!base::Timer::retain_user_task())
+    base::Timer::set_user_task(base::Closure());
+}
+
+void AlarmTimer::Reset() {
+  if (!can_wake_from_suspend_) {
+    base::Timer::Reset();
+    return;
+  }
+
+  DCHECK(!base::Timer::user_task().is_null());
+  DCHECK(!origin_message_loop_ ||
+         origin_message_loop_->task_runner()->RunsTasksOnCurrentThread());
+
+  // Make sure that the timer will stop if the underlying message loop is
+  // destroyed.
+  if (!origin_message_loop_) {
+    origin_message_loop_ = base::MessageLoop::current();
+    message_loop_observer_.reset(new MessageLoopObserver(
+        origin_message_loop_,
+        base::Bind(&AlarmTimer::WillDestroyCurrentMessageLoop,
+                   weak_factory_.GetWeakPtr())));
+  }
+
+  // Set up the pending task.
+  if (base::Timer::GetCurrentDelay() > base::TimeDelta::FromMicroseconds(0)) {
+    base::Timer::set_desired_run_time(base::TimeTicks::Now() +
+                                      base::Timer::GetCurrentDelay());
+    pending_task_.reset(new base::PendingTask(
+        base::Timer::posted_from(), base::Timer::user_task(),
+        base::Timer::desired_run_time(), true /* nestable */));
+  } else {
+    base::Timer::set_desired_run_time(base::TimeTicks());
+    pending_task_.reset(new base::PendingTask(base::Timer::posted_from(),
+                                              base::Timer::user_task()));
+  }
+  base::MessageLoop::current()->task_annotator()->DidQueueTask(
+      "AlarmTimer::Reset", *pending_task_);
+
+  // Now start up the timer.
+  delegate_->Reset(base::Timer::GetCurrentDelay());
+  base::Timer::set_is_running(true);
+}
+
+void AlarmTimer::WillDestroyCurrentMessageLoop() {
+  Stop();
 }
 
 void AlarmTimer::OnTimerFired() {
-  DCHECK(origin_task_runner_->RunsTasksOnCurrentThread());
-  DCHECK(base::Timer::IsRunning());
+  if (!base::Timer::IsRunning())
+    return;
+
   DCHECK(pending_task_.get());
 
-  // Take ownership of the PendingTask to prevent it from being deleted if the
-  // AlarmTimer is deleted.
-  const auto pending_user_task = std::move(pending_task_);
+  // Take ownership of the pending user task, which is going to be cleared by
+  // the Stop() or Reset() functions below.
+  std::unique_ptr<base::PendingTask> pending_user_task(
+      std::move(pending_task_));
 
-  base::WeakPtr<AlarmTimer> weak_ptr = weak_factory_.GetWeakPtr();
+  // Re-schedule or stop the timer as requested.
+  if (base::Timer::is_repeating())
+    Reset();
+  else
+    Stop();
 
-  // Run the task.
   TRACE_TASK_EXECUTION("AlarmTimer::OnTimerFired", *pending_user_task);
-  base::debug::TaskAnnotator().RunTask("AlarmTimer::Reset",
-                                       pending_user_task.get());
 
-  // If the timer wasn't deleted, stopped or reset by the callback, reset or
-  // stop it.
-  if (weak_ptr.get()) {
-    if (base::Timer::is_repeating())
-      Reset();
-    else
-      Stop();
-  }
-}
-
-bool AlarmTimer::CanWakeFromSuspend() const {
-  return alarm_fd_ != -1;
+  // Now run the user task.
+  base::MessageLoop::current()->task_annotator()->RunTask("AlarmTimer::Reset",
+                                                          *pending_user_task);
 }
 
 OneShotAlarmTimer::OneShotAlarmTimer() : AlarmTimer(false, false) {
@@ -157,12 +461,25 @@
 RepeatingAlarmTimer::RepeatingAlarmTimer() : AlarmTimer(true, true) {
 }
 
+RepeatingAlarmTimer::RepeatingAlarmTimer(
+    const tracked_objects::Location& posted_from,
+    base::TimeDelta delay,
+    const base::Closure& user_task)
+    : AlarmTimer(posted_from, delay, user_task, true) {
+}
+
 RepeatingAlarmTimer::~RepeatingAlarmTimer() {
 }
 
 SimpleAlarmTimer::SimpleAlarmTimer() : AlarmTimer(true, false) {
 }
 
+SimpleAlarmTimer::SimpleAlarmTimer(const tracked_objects::Location& posted_from,
+                                   base::TimeDelta delay,
+                                   const base::Closure& user_task)
+    : AlarmTimer(posted_from, delay, user_task, false) {
+}
+
 SimpleAlarmTimer::~SimpleAlarmTimer() {
 }
 
diff --git a/components/timers/alarm_timer_chromeos.h b/components/timers/alarm_timer_chromeos.h
index d861aee..313c9f9 100644
--- a/components/timers/alarm_timer_chromeos.h
+++ b/components/timers/alarm_timer_chromeos.h
@@ -7,69 +7,85 @@
 
 #include <memory>
 
-#include "base/files/file_descriptor_watcher_posix.h"
+#include "base/callback.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
 #include "base/memory/weak_ptr.h"
-#include "base/threading/sequenced_task_runner_handle.h"
 #include "base/time/time.h"
 #include "base/timer/timer.h"
 
 namespace base {
+class MessageLoop;
 struct PendingTask;
 }
 
 namespace timers {
 // The class implements a timer that is capable of waking the system up from a
-// suspended state. For example, this is useful for running tasks that are
+// suspended state.  For example, this is useful for running tasks that are
 // needed for maintaining network connectivity, like sending heartbeat messages.
 // Currently, this feature is only available on Chrome OS systems running linux
-// version 3.11 or higher. On all other platforms, the AlarmTimer behaves
+// version 3.11 or higher.  On all other platforms, the AlarmTimer behaves
 // exactly the same way as a regular Timer.
-//
-// An AlarmTimer instance can only be used from the sequence on which it was
-// instantiated. Start() and Stop() must be called from a thread that supports
-// FileDescriptorWatcher.
 class AlarmTimer : public base::Timer {
  public:
   ~AlarmTimer() override;
 
+  bool can_wake_from_suspend() const { return can_wake_from_suspend_; }
+
+  // Sets a hook that will be called when the timer fires and a task has been
+  // queued on |origin_message_loop_|.  Used by tests to wait until a task is
+  // pending in the MessageLoop.
+  void SetTimerFiredCallbackForTest(base::Closure test_callback);
+
   // Timer overrides.
   void Stop() override;
   void Reset() override;
 
  protected:
+  // The constructors for this class are protected because consumers should
+  // instantiate one of the specialized sub-classes defined below instead.
   AlarmTimer(bool retain_user_task, bool is_repeating);
+  AlarmTimer(const tracked_objects::Location& posted_from,
+             base::TimeDelta delay,
+             const base::Closure& user_task,
+             bool is_repeating);
 
  private:
-  // Called when |alarm_fd_| is readable without blocking. Reads data from
-  // |alarm_fd_| and calls OnTimerFired().
-  void OnAlarmFdReadableWithoutBlocking();
+  // Common initialization that must be performed by both constructors.  This
+  // really should live in a delegated constructor but the way base::Timer's
+  // constructors are written makes it really hard to do so.
+  void Init();
 
-  // Called when the timer fires. Runs the callback.
+  // Will be called by the delegate to indicate that the timer has fired and
+  // that the user task should be run.
   void OnTimerFired();
 
-  // Tracks whether the timer has the ability to wake the system up from
-  // suspend. This is a runtime check because we won't know if the system
-  // supports being woken up from suspend until the constructor actually tries
-  // to set it up.
-  bool CanWakeFromSuspend() const;
+  // Called when |origin_message_loop_| will be destroyed.
+  void WillDestroyCurrentMessageLoop();
 
-  // Timer file descriptor.
-  const int alarm_fd_;
+  // Delegate that will manage actually setting the timer.
+  class Delegate;
+  scoped_refptr<Delegate> delegate_;
 
-  // Watches |alarm_fd_|.
-  std::unique_ptr<base::FileDescriptorWatcher::Controller> alarm_fd_watcher_;
-
-  // Posts tasks to the sequence on which this AlarmTimer was instantiated.
-  const scoped_refptr<base::SequencedTaskRunner> origin_task_runner_ =
-      base::SequencedTaskRunnerHandle::Get();
-
-  // Keeps track of the user task we want to run. A new one is constructed every
-  // time Reset() is called.
+  // Keeps track of the user task we want to run.  A new one is constructed
+  // every time Reset() is called.
   std::unique_ptr<base::PendingTask> pending_task_;
 
-  // Used to invalidate pending callbacks.
+  // Tracks whether the timer has the ability to wake the system up from
+  // suspend.  This is a runtime check because we won't know if the system
+  // supports being woken up from suspend until the delegate actually tries to
+  // set it up.
+  bool can_wake_from_suspend_;
+
+  // Pointer to the message loop that started the timer.  Used to track the
+  // destruction of that message loop.
+  base::MessageLoop* origin_message_loop_;
+
+  // Observes |origin_message_loop_| and informs this class if it will be
+  // destroyed.
+  class MessageLoopObserver;
+  std::unique_ptr<MessageLoopObserver> message_loop_observer_;
+
   base::WeakPtrFactory<AlarmTimer> weak_factory_;
 
   DISALLOW_COPY_AND_ASSIGN(AlarmTimer);
@@ -80,6 +96,8 @@
 // repeat.  Useful for fire-and-forget tasks.
 class OneShotAlarmTimer : public AlarmTimer {
  public:
+  // Constructs a basic OneShotAlarmTimer.  An AlarmTimer constructed this way
+  // requires that Start() is called before Reset() is called.
   OneShotAlarmTimer();
   ~OneShotAlarmTimer() override;
 };
@@ -90,7 +108,18 @@
 // after it fires.
 class RepeatingAlarmTimer : public AlarmTimer {
  public:
+  // Constructs a basic RepeatingAlarmTimer.  An AlarmTimer constructed this way
+  // requires that Start() is called before Reset() is called.
   RepeatingAlarmTimer();
+
+  // Constructs a RepeatingAlarmTimer with pre-populated parameters but does not
+  // start it.  Useful if |user_task| or |delay| are not going to change.
+  // Reset() can be called immediately after constructing an AlarmTimer in this
+  // way.
+  RepeatingAlarmTimer(const tracked_objects::Location& posted_from,
+                      base::TimeDelta delay,
+                      const base::Closure& user_task);
+
   ~RepeatingAlarmTimer() override;
 };
 
@@ -99,7 +128,18 @@
 // times but not at a regular interval.
 class SimpleAlarmTimer : public AlarmTimer {
  public:
+  // Constructs a basic SimpleAlarmTimer.  An AlarmTimer constructed this way
+  // requires that Start() is called before Reset() is called.
   SimpleAlarmTimer();
+
+  // Constructs a SimpleAlarmTimer with pre-populated parameters but does not
+  // start it.  Useful if |user_task| or |delay| are not going to change.
+  // Reset() can be called immediately after constructing an AlarmTimer in this
+  // way.
+  SimpleAlarmTimer(const tracked_objects::Location& posted_from,
+                   base::TimeDelta delay,
+                   const base::Closure& user_task);
+
   ~SimpleAlarmTimer() override;
 };
 
diff --git a/crypto/BUILD.gn b/crypto/BUILD.gn
index 6b45c9d..a912d93 100644
--- a/crypto/BUILD.gn
+++ b/crypto/BUILD.gn
@@ -13,11 +13,14 @@
     "apple_keychain.h",
     "apple_keychain_ios.mm",
     "apple_keychain_mac.mm",
+    "auto_cbb.h",
     "capi_util.cc",
     "capi_util.h",
     "crypto_export.h",
     "cssm_init.cc",
     "cssm_init.h",
+    "curve25519.cc",
+    "curve25519.h",
     "ec_private_key.cc",
     "ec_private_key.h",
     "ec_signature_creator.cc",
@@ -44,6 +47,8 @@
     "nss_util.cc",
     "nss_util.h",
     "nss_util_internal.h",
+    "openssl_bio_string.cc",
+    "openssl_bio_string.h",
     "openssl_util.cc",
     "openssl_util.h",
     "p224.cc",
@@ -79,10 +84,6 @@
     "//base/third_party/dynamic_annotations",
   ]
 
-  public_deps = [
-    "//third_party/boringssl",
-  ]
-
   if (!is_mac && !is_ios) {
     sources -= [
       "apple_keychain.h",
@@ -132,6 +133,7 @@
 test("crypto_unittests") {
   sources = [
     "aead_unittest.cc",
+    "curve25519_unittest.cc",
     "ec_private_key_unittest.cc",
     "ec_signature_creator_unittest.cc",
     "encryptor_unittest.cc",
@@ -139,6 +141,7 @@
     "hmac_unittest.cc",
     "nss_key_util_unittest.cc",
     "nss_util_unittest.cc",
+    "openssl_bio_string_unittest.cc",
     "p224_spake_unittest.cc",
     "p224_unittest.cc",
     "random_unittest.cc",
diff --git a/crypto/apple_keychain.h b/crypto/apple_keychain.h
index 1037b7e..1ea2473 100644
--- a/crypto/apple_keychain.h
+++ b/crypto/apple_keychain.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CRYPTO_APPLE_KEYCHAIN_H_
-#define CRYPTO_APPLE_KEYCHAIN_H_
+#ifndef CRYPTO_KEYCHAIN_MAC_H_
+#define CRYPTO_KEYCHAIN_MAC_H_
 
 #include <Security/Security.h>
 
@@ -106,4 +106,4 @@
 
 }  // namespace crypto
 
-#endif  // CRYPTO_APPLE_KEYCHAIN_H_
+#endif  // CRYPTO_KEYCHAIN_MAC_H_
diff --git a/crypto/auto_cbb.h b/crypto/auto_cbb.h
new file mode 100644
index 0000000..5206a21
--- /dev/null
+++ b/crypto/auto_cbb.h
@@ -0,0 +1,35 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_AUTO_CBB_H_
+#define CRYPTO_AUTO_CBB_H_
+
+#include <openssl/bytestring.h>
+
+#include "base/macros.h"
+
+namespace crypto {
+
+// AutoCBB is a wrapper over OpenSSL's CBB type that automatically releases
+// resources when going out of scope.
+class AutoCBB {
+ public:
+  AutoCBB() { CBB_zero(&cbb_); }
+  ~AutoCBB() { CBB_cleanup(&cbb_); }
+
+  CBB* get() { return &cbb_; }
+
+  void Reset() {
+    CBB_cleanup(&cbb_);
+    CBB_zero(&cbb_);
+  }
+
+ private:
+  CBB cbb_;
+  DISALLOW_COPY_AND_ASSIGN(AutoCBB);
+};
+
+}  // namespace crypto
+
+#endif   // CRYPTO_AUTO_CBB_H_
diff --git a/crypto/crypto.gyp b/crypto/crypto.gyp
new file mode 100644
index 0000000..8ed2ab2
--- /dev/null
+++ b/crypto/crypto.gyp
@@ -0,0 +1,236 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'variables': {
+    'chromium_code': 1,
+  },
+  'includes': [
+    'crypto.gypi',
+  ],
+  'targets': [
+    {
+      'target_name': 'crypto',
+      'type': '<(component)',
+      'product_name': 'crcrypto',  # Avoid colliding with OpenSSL's libcrypto
+      'dependencies': [
+        '../base/base.gyp:base',
+        '../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
+        '../third_party/boringssl/boringssl.gyp:boringssl',
+      ],
+      'defines': [
+        'CRYPTO_IMPLEMENTATION',
+      ],
+      'conditions': [
+        [ 'os_posix == 1 and OS != "mac" and OS != "ios" and OS != "android"', {
+          'dependencies': [
+            '../build/linux/system.gyp:nss',
+          ],
+          'export_dependent_settings': [
+            '../build/linux/system.gyp:nss',
+          ],
+          'conditions': [
+            [ 'chromeos==1', {
+                'sources/': [ ['include', '_chromeos\\.cc$'] ]
+              },
+            ],
+          ],
+        }],
+        [ 'OS != "mac" and OS != "ios"', {
+          'sources!': [
+            'apple_keychain.h',
+            'mock_apple_keychain.cc',
+            'mock_apple_keychain.h',
+          ],
+        }],
+        [ 'os_bsd==1', {
+          'link_settings': {
+            'libraries': [
+              '-L/usr/local/lib -lexecinfo',
+              ],
+            },
+          },
+        ],
+        [ 'OS == "mac"', {
+          'link_settings': {
+            'libraries': [
+              '$(SDKROOT)/System/Library/Frameworks/Security.framework',
+            ],
+          },
+        }, {  # OS != "mac"
+          'sources!': [
+            'cssm_init.cc',
+            'cssm_init.h',
+            'mac_security_services_lock.cc',
+            'mac_security_services_lock.h',
+          ],
+        }],
+        [ 'OS != "win"', {
+          'sources!': [
+            'capi_util.h',
+            'capi_util.cc',
+          ],
+        }],
+        [ 'OS == "win"', {
+          'msvs_disabled_warnings': [
+            4267,  # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+          ],
+        }],
+        [ 'use_nss_certs==0', {
+            # Some files are built when NSS is used for the platform certificate library.
+            'sources!': [
+              'nss_key_util.cc',
+              'nss_key_util.h',
+              'nss_util.cc',
+              'nss_util.h',
+              'nss_util_internal.h',
+            ],
+        },],
+      ],
+      'sources': [
+        '<@(crypto_sources)',
+      ],
+    },
+    {
+      'target_name': 'crypto_unittests',
+      'type': 'executable',
+      'sources': [
+        'aead_unittest.cc',
+        'curve25519_unittest.cc',
+        'ec_private_key_unittest.cc',
+        'ec_signature_creator_unittest.cc',
+        'encryptor_unittest.cc',
+        'hkdf_unittest.cc',
+        'hmac_unittest.cc',
+        'nss_key_util_unittest.cc',
+        'nss_util_unittest.cc',
+        'openssl_bio_string_unittest.cc',
+        'p224_unittest.cc',
+        'p224_spake_unittest.cc',
+        'random_unittest.cc',
+        'rsa_private_key_unittest.cc',
+        'secure_hash_unittest.cc',
+        'sha2_unittest.cc',
+        'signature_creator_unittest.cc',
+        'signature_verifier_unittest.cc',
+        'symmetric_key_unittest.cc',
+      ],
+      'dependencies': [
+        'crypto',
+        'crypto_test_support',
+        '../base/base.gyp:base',
+        '../base/base.gyp:run_all_unittests',
+        '../base/base.gyp:test_support_base',
+        '../testing/gmock.gyp:gmock',
+        '../testing/gtest.gyp:gtest',
+        '../third_party/boringssl/boringssl.gyp:boringssl',
+      ],
+      'conditions': [
+        [ 'use_nss_certs == 1', {
+          'dependencies': [
+            '../build/linux/system.gyp:nss',
+          ],
+        }],
+        [ 'use_nss_certs == 0', {
+          # Some files are built when NSS is used for the platform certificate library.
+          'sources!': [
+            'nss_key_util_unittest.cc',
+            'nss_util_unittest.cc',
+          ],
+        }],
+        [ 'OS == "win"', {
+          # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+          'msvs_disabled_warnings': [4267, ],
+        }],
+      ],
+    },
+  ],
+  'conditions': [
+    ['OS == "win" and target_arch=="ia32"', {
+      'targets': [
+        {
+          'target_name': 'crypto_nacl_win64',
+          # We use the native APIs for the helper.
+          'type': '<(component)',
+          'dependencies': [
+            '../base/base.gyp:base_win64',
+            '../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations_win64',
+            '../third_party/boringssl/boringssl.gyp:boringssl_nacl_win64',
+          ],
+          'sources': [
+            '<@(nacl_win64_sources)',
+          ],
+          'defines': [
+           'CRYPTO_IMPLEMENTATION',
+           '<@(nacl_win64_defines)',
+          ],
+          'configurations': {
+            'Common_Base': {
+              'msvs_target_platform': 'x64',
+            },
+          },
+        },
+      ],
+    }],
+    ['use_nss_certs==1', {
+      'targets': [
+        {
+          'target_name': 'crypto_test_support',
+          'type': 'static_library',
+          'dependencies': [
+            '../base/base.gyp:base',
+            'crypto',
+          ],
+          'sources': [
+            'scoped_test_nss_db.cc',
+            'scoped_test_nss_db.h',
+            'scoped_test_nss_chromeos_user.cc',
+            'scoped_test_nss_chromeos_user.h',
+            'scoped_test_system_nss_key_slot.cc',
+            'scoped_test_system_nss_key_slot.h',
+          ],
+          'conditions': [
+            ['use_nss_certs==0', {
+              'sources!': [
+                'scoped_test_nss_db.cc',
+                'scoped_test_nss_db.h',
+              ],
+            }],
+            [ 'chromeos==0', {
+              'sources!': [
+                'scoped_test_nss_chromeos_user.cc',
+                'scoped_test_nss_chromeos_user.h',
+                'scoped_test_system_nss_key_slot.cc',
+                'scoped_test_system_nss_key_slot.h',
+              ],
+            }],
+          ],
+        }
+      ]}, {  # use_nss_certs==0
+      'targets': [
+        {
+          'target_name': 'crypto_test_support',
+          'type': 'none',
+          'sources': [],
+        }
+    ]}],
+    ['test_isolation_mode != "noop"', {
+      'targets': [
+        {
+          'target_name': 'crypto_unittests_run',
+          'type': 'none',
+          'dependencies': [
+            'crypto_unittests',
+          ],
+          'includes': [
+            '../build/isolate.gypi',
+                      ],
+          'sources': [
+            'crypto_unittests.isolate',
+          ],
+        },
+      ],
+    }],
+  ],
+}
diff --git a/crypto/crypto.gypi b/crypto/crypto.gypi
new file mode 100644
index 0000000..dadc0ea
--- /dev/null
+++ b/crypto/crypto.gypi
@@ -0,0 +1,88 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'variables': {
+    # Put all transitive dependencies for Windows HMAC here.
+    # This is required so that we can build them for nacl win64.
+    'variables': {
+      'hmac_win64_related_sources': [
+        'crypto_export.h',
+        'hmac.cc',
+        'hmac.h',
+        'openssl_util.cc',
+        'openssl_util.h',
+        'secure_util.cc',
+        'secure_util.h',
+        'symmetric_key.cc',
+        'symmetric_key.h',
+      ],
+    },
+    'crypto_sources': [
+      # NOTE: all transitive dependencies of HMAC on windows need
+      #     to be placed in the source list above.
+      '<@(hmac_win64_related_sources)',
+      'aead.cc',
+      'aead.h',
+      'apple_keychain.h',
+      'apple_keychain_ios.mm',
+      'apple_keychain_mac.mm',
+      'auto_cbb.h',
+      'capi_util.cc',
+      'capi_util.h',
+      'cssm_init.cc',
+      'cssm_init.h',
+      'curve25519.cc',
+      'curve25519.h',
+      'ec_private_key.cc',
+      'ec_private_key.h',
+      'ec_signature_creator.cc',
+      'ec_signature_creator.h',
+      'ec_signature_creator_impl.cc',
+      'ec_signature_creator_impl.h',
+      'encryptor.cc',
+      'encryptor.h',
+      'hkdf.cc',
+      'hkdf.h',
+      'mac_security_services_lock.cc',
+      'mac_security_services_lock.h',
+      'mock_apple_keychain.cc',
+      'mock_apple_keychain.h',
+      'mock_apple_keychain_ios.cc',
+      'mock_apple_keychain_mac.cc',
+      'p224_spake.cc',
+      'p224_spake.h',
+      'nss_crypto_module_delegate.h',
+      'nss_key_util.cc',
+      'nss_key_util.h',
+      'nss_util.cc',
+      'nss_util.h',
+      'nss_util_internal.h',
+      'openssl_bio_string.cc',
+      'openssl_bio_string.h',
+      'p224.cc',
+      'p224.h',
+      'random.h',
+      'random.cc',
+      'rsa_private_key.cc',
+      'rsa_private_key.h',
+      'scoped_capi_types.h',
+      'scoped_nss_types.h',
+      'secure_hash.cc',
+      'secure_hash.h',
+      'sha2.cc',
+      'sha2.h',
+      'signature_creator.cc',
+      'signature_creator.h',
+      'signature_verifier.cc',
+      'signature_verifier.h',
+      'wincrypt_shim.h',
+    ],
+    'nacl_win64_sources': [
+      '<@(hmac_win64_related_sources)',
+      'random.cc',
+      'random.h',
+    ],
+  }
+}
diff --git a/crypto/crypto_nacl.gyp b/crypto/crypto_nacl.gyp
new file mode 100644
index 0000000..c7c01a8
--- /dev/null
+++ b/crypto/crypto_nacl.gyp
@@ -0,0 +1,44 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'variables': {
+    'chromium_code': 1,
+  },
+  'includes': [
+    '../native_client/build/untrusted.gypi',
+    'crypto.gypi',
+  ],
+  'targets': [
+    {
+      'target_name': 'crypto_nacl',
+      'type': 'none',
+      'variables': {
+        'nacl_untrusted_build': 1,
+        'nlib_target': 'libcrypto_nacl.a',
+        'build_glibc': 0,
+        'build_newlib': 0,
+        'build_pnacl_newlib': 1,
+      },
+      'dependencies': [
+        '../third_party/boringssl/boringssl_nacl.gyp:boringssl_nacl',
+        '../native_client_sdk/native_client_sdk_untrusted.gyp:nacl_io_untrusted',
+      ],
+      'defines': [
+        'CRYPTO_IMPLEMENTATION',
+      ],
+      'sources': [
+        '<@(crypto_sources)',
+      ],
+      'sources/': [
+        ['exclude', '_nss\.(cc|h)$'],
+        ['exclude', '^(mock_)?apple_'],
+        ['exclude', '^capi_'],
+        ['exclude', '^cssm_'],
+        ['exclude', '^nss_'],
+        ['exclude', '^mac_'],
+      ],
+    },
+  ],
+}
diff --git a/crypto/crypto_unittests.isolate b/crypto/crypto_unittests.isolate
new file mode 100644
index 0000000..de13aa2
--- /dev/null
+++ b/crypto/crypto_unittests.isolate
@@ -0,0 +1,42 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+  'variables': {
+    'command': [
+      '../testing/test_env.py',
+      '<(PRODUCT_DIR)/crypto_unittests<(EXECUTABLE_SUFFIX)',
+      '--brave-new-test-launcher',
+      '--test-launcher-bot-mode',
+      '--asan=<(asan)',
+      '--msan=<(msan)',
+      '--tsan=<(tsan)',
+    ],
+  },
+  'conditions': [
+    ['OS=="linux" or OS=="mac" or OS=="win"', {
+      'variables': {
+        'files': [
+          '../testing/test_env.py',
+        ],
+      },
+    }],
+    ['OS=="mac" and asan==1 and fastbuild==0', {
+      'variables': {
+        'files': [
+          '<(PRODUCT_DIR)/crypto_unittests.dSYM/',
+        ],
+      },
+    }],
+    ['OS=="win" and (fastbuild==0 or fastbuild==1)', {
+      'variables': {
+        'files': [
+          '<(PRODUCT_DIR)/crypto_unittests.exe.pdb',
+        ],
+      },
+    }],
+  ],
+  'includes': [
+    '../base/base.isolate',
+  ],
+}
diff --git a/crypto/ec_private_key.h b/crypto/ec_private_key.h
index 432019b..a24219b 100644
--- a/crypto/ec_private_key.h
+++ b/crypto/ec_private_key.h
@@ -15,7 +15,17 @@
 #include "base/macros.h"
 #include "build/build_config.h"
 #include "crypto/crypto_export.h"
-#include "third_party/boringssl/src/include/openssl/base.h"
+
+#if defined(USE_OPENSSL)
+// Forward declaration for openssl/*.h
+typedef struct evp_pkey_st EVP_PKEY;
+#else
+// Forward declaration.
+typedef struct CERTSubjectPublicKeyInfoStr CERTSubjectPublicKeyInfo;
+typedef struct PK11SlotInfoStr PK11SlotInfo;
+typedef struct SECKEYPrivateKeyStr SECKEYPrivateKey;
+typedef struct SECKEYPublicKeyStr SECKEYPublicKey;
+#endif
 
 namespace crypto {
 
@@ -41,30 +51,57 @@
 
   // Creates a new instance by importing an existing key pair.
   // The key pair is given as an ASN.1-encoded PKCS #8 EncryptedPrivateKeyInfo
-  // block with empty password and an X.509 SubjectPublicKeyInfo block.
+  // block and an X.509 SubjectPublicKeyInfo block.
   // Returns nullptr if initialization fails.
   //
   // This function is deprecated. Use CreateFromPrivateKeyInfo for new code.
   // See https://crbug.com/603319.
   static std::unique_ptr<ECPrivateKey> CreateFromEncryptedPrivateKeyInfo(
+      const std::string& password,
       const std::vector<uint8_t>& encrypted_private_key_info,
       const std::vector<uint8_t>& subject_public_key_info);
 
+#if !defined(USE_OPENSSL)
+  // Imports the key pair into |slot| and returns in |public_key| and |key|.
+  // Shortcut for code that needs to keep a reference directly to NSS types
+  // without having to create a ECPrivateKey object and make a copy of them.
+  // TODO(mattm): move this function to some NSS util file.
+  static bool ImportFromEncryptedPrivateKeyInfo(
+      PK11SlotInfo* slot,
+      const std::string& password,
+      const uint8_t* encrypted_private_key_info,
+      size_t encrypted_private_key_info_len,
+      CERTSubjectPublicKeyInfo* decoded_spki,
+      bool permanent,
+      bool sensitive,
+      SECKEYPrivateKey** key,
+      SECKEYPublicKey** public_key);
+#endif
+
   // Returns a copy of the object.
   std::unique_ptr<ECPrivateKey> Copy() const;
 
-  EVP_PKEY* key() { return key_.get(); }
+#if defined(USE_OPENSSL)
+  EVP_PKEY* key() { return key_; }
+#else
+  SECKEYPrivateKey* key() { return key_; }
+  SECKEYPublicKey* public_key() { return public_key_; }
+#endif
 
   // Exports the private key to a PKCS #8 PrivateKeyInfo block.
   bool ExportPrivateKey(std::vector<uint8_t>* output) const;
 
   // Exports the private key as an ASN.1-encoded PKCS #8 EncryptedPrivateKeyInfo
-  // block wth empty password. This was historically used as a workaround for
-  // NSS API deficiencies and does not provide security.
+  // block and the public key as an X.509 SubjectPublicKeyInfo block.
+  // The |password| and |iterations| are used as inputs to the key derivation
+  // function for generating the encryption key.  PKCS #5 recommends a minimum
+  // of 1000 iterations, on modern systems a larger value may be preferrable.
   //
   // This function is deprecated. Use ExportPrivateKey for new code. See
   // https://crbug.com/603319.
-  bool ExportEncryptedPrivateKey(std::vector<uint8_t>* output) const;
+  bool ExportEncryptedPrivateKey(const std::string& password,
+                                 int iterations,
+                                 std::vector<uint8_t>* output) const;
 
   // Exports the public key to an X.509 SubjectPublicKeyInfo block.
   bool ExportPublicKey(std::vector<uint8_t>* output) const;
@@ -76,7 +113,12 @@
   // Constructor is private. Use one of the Create*() methods above instead.
   ECPrivateKey();
 
-  bssl::UniquePtr<EVP_PKEY> key_;
+#if defined(USE_OPENSSL)
+  EVP_PKEY* key_;
+#else
+  SECKEYPrivateKey* key_;
+  SECKEYPublicKey* public_key_;
+#endif
 
   DISALLOW_COPY_AND_ASSIGN(ECPrivateKey);
 };
diff --git a/crypto/ec_signature_creator_impl.h b/crypto/ec_signature_creator_impl.h
index bd06e25..21614f8 100644
--- a/crypto/ec_signature_creator_impl.h
+++ b/crypto/ec_signature_creator_impl.h
@@ -7,8 +7,6 @@
 
 #include <stdint.h>
 
-#include <vector>
-
 #include "base/compiler_specific.h"
 #include "base/macros.h"
 #include "crypto/ec_signature_creator.h"
diff --git a/crypto/hmac.cc b/crypto/hmac.cc
index bf89e18..af5580b 100644
--- a/crypto/hmac.cc
+++ b/crypto/hmac.cc
@@ -7,26 +7,20 @@
 #include <stddef.h>
 
 #include <algorithm>
-#include <string>
 
 #include "base/logging.h"
-#include "base/stl_util.h"
-#include "crypto/openssl_util.h"
 #include "crypto/secure_util.h"
 #include "crypto/symmetric_key.h"
-#include "third_party/boringssl/src/include/openssl/hmac.h"
 
 namespace crypto {
 
-HMAC::HMAC(HashAlgorithm hash_alg) : hash_alg_(hash_alg), initialized_(false) {
-  // Only SHA-1 and SHA-256 hash algorithms are supported now.
-  DCHECK(hash_alg_ == SHA1 || hash_alg_ == SHA256);
-}
-
-HMAC::~HMAC() {
-  // Zero out key copy.
-  key_.assign(key_.size(), 0);
-  base::STLClearObject(&key_);
+bool HMAC::Init(SymmetricKey* key) {
+  std::string raw_key;
+  bool result = key->GetRawKey(&raw_key) && Init(raw_key);
+  // Zero out key copy.  This might get optimized away, but one can hope.
+  // Using std::string to store key info at all is a larger problem.
+  std::fill(raw_key.begin(), raw_key.end(), 0);
+  return result;
 }
 
 size_t HMAC::DigestLength() const {
@@ -41,35 +35,6 @@
   }
 }
 
-bool HMAC::Init(const unsigned char* key, size_t key_length) {
-  // Init must not be called more than once on the same HMAC object.
-  DCHECK(!initialized_);
-  initialized_ = true;
-  key_.assign(key, key + key_length);
-  return true;
-}
-
-bool HMAC::Init(SymmetricKey* key) {
-  std::string raw_key;
-  bool result = key->GetRawKey(&raw_key) && Init(raw_key);
-  // Zero out key copy.  This might get optimized away, but one can hope.
-  // Using std::string to store key info at all is a larger problem.
-  std::fill(raw_key.begin(), raw_key.end(), 0);
-  return result;
-}
-
-bool HMAC::Sign(const base::StringPiece& data,
-                unsigned char* digest,
-                size_t digest_length) const {
-  DCHECK(initialized_);
-
-  ScopedOpenSSLSafeSizeBuffer<EVP_MAX_MD_SIZE> result(digest, digest_length);
-  return !!::HMAC(hash_alg_ == SHA1 ? EVP_sha1() : EVP_sha256(), key_.data(),
-                  key_.size(),
-                  reinterpret_cast<const unsigned char*>(data.data()),
-                  data.size(), result.safe_buffer(), nullptr);
-}
-
 bool HMAC::Verify(const base::StringPiece& data,
                   const base::StringPiece& digest) const {
   if (digest.size() != DigestLength())
diff --git a/crypto/hmac.h b/crypto/hmac.h
index 2421333..ec32ed7 100644
--- a/crypto/hmac.h
+++ b/crypto/hmac.h
@@ -11,7 +11,6 @@
 #include <stddef.h>
 
 #include <memory>
-#include <vector>
 
 #include "base/compiler_specific.h"
 #include "base/macros.h"
@@ -21,6 +20,7 @@
 namespace crypto {
 
 // Simplify the interface and reduce includes by abstracting out the internals.
+struct HMACPlatformData;
 class SymmetricKey;
 
 class CRYPTO_EXPORT HMAC {
@@ -86,8 +86,7 @@
 
  private:
   HashAlgorithm hash_alg_;
-  bool initialized_;
-  std::vector<unsigned char> key_;
+  std::unique_ptr<HMACPlatformData> plat_;
 
   DISALLOW_COPY_AND_ASSIGN(HMAC);
 };
diff --git a/crypto/hmac_nss.cc b/crypto/hmac_nss.cc
new file mode 100644
index 0000000..9d759b5
--- /dev/null
+++ b/crypto/hmac_nss.cc
@@ -0,0 +1,118 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/hmac.h"
+
+#include <nss.h>
+#include <pk11pub.h>
+#include <stddef.h>
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "crypto/nss_util.h"
+#include "crypto/scoped_nss_types.h"
+
+namespace crypto {
+
+struct HMACPlatformData {
+  CK_MECHANISM_TYPE mechanism_;
+  ScopedPK11Slot slot_;
+  ScopedPK11SymKey sym_key_;
+};
+
+HMAC::HMAC(HashAlgorithm hash_alg)
+    : hash_alg_(hash_alg), plat_(new HMACPlatformData()) {
+  // Only SHA-1 and SHA-256 hash algorithms are supported.
+  switch (hash_alg_) {
+    case SHA1:
+      plat_->mechanism_ = CKM_SHA_1_HMAC;
+      break;
+    case SHA256:
+      plat_->mechanism_ = CKM_SHA256_HMAC;
+      break;
+    default:
+      NOTREACHED() << "Unsupported hash algorithm";
+      break;
+  }
+}
+
+HMAC::~HMAC() {
+}
+
+bool HMAC::Init(const unsigned char *key, size_t key_length) {
+  EnsureNSSInit();
+
+  if (plat_->slot_.get()) {
+    // Init must not be called more than twice on the same HMAC object.
+    NOTREACHED();
+    return false;
+  }
+
+  plat_->slot_.reset(PK11_GetInternalSlot());
+  if (!plat_->slot_.get()) {
+    NOTREACHED();
+    return false;
+  }
+
+  SECItem key_item;
+  key_item.type = siBuffer;
+  key_item.data = const_cast<unsigned char*>(key);  // NSS API isn't const.
+  key_item.len = key_length;
+
+  plat_->sym_key_.reset(PK11_ImportSymKey(plat_->slot_.get(),
+                                          plat_->mechanism_,
+                                          PK11_OriginUnwrap,
+                                          CKA_SIGN,
+                                          &key_item,
+                                          NULL));
+  if (!plat_->sym_key_.get()) {
+    NOTREACHED();
+    return false;
+  }
+
+  return true;
+}
+
+bool HMAC::Sign(const base::StringPiece& data,
+                unsigned char* digest,
+                size_t digest_length) const {
+  if (!plat_->sym_key_.get()) {
+    // Init has not been called before Sign.
+    NOTREACHED();
+    return false;
+  }
+
+  SECItem param = { siBuffer, NULL, 0 };
+  ScopedPK11Context context(PK11_CreateContextBySymKey(plat_->mechanism_,
+                                                       CKA_SIGN,
+                                                       plat_->sym_key_.get(),
+                                                       &param));
+  if (!context.get()) {
+    NOTREACHED();
+    return false;
+  }
+
+  if (PK11_DigestBegin(context.get()) != SECSuccess) {
+    NOTREACHED();
+    return false;
+  }
+
+  if (PK11_DigestOp(context.get(),
+                    reinterpret_cast<const unsigned char*>(data.data()),
+                    data.length()) != SECSuccess) {
+    NOTREACHED();
+    return false;
+  }
+
+  unsigned int len = 0;
+  if (PK11_DigestFinal(context.get(),
+                       digest, &len, digest_length) != SECSuccess) {
+    NOTREACHED();
+    return false;
+  }
+
+  return true;
+}
+
+}  // namespace crypto
diff --git a/crypto/nss_crypto_module_delegate.h b/crypto/nss_crypto_module_delegate.h
index cf08f28..6c1da68 100644
--- a/crypto/nss_crypto_module_delegate.h
+++ b/crypto/nss_crypto_module_delegate.h
@@ -35,6 +35,7 @@
   // user entered.
   virtual std::string RequestPassword(const std::string& slot_name, bool retry,
                                       bool* cancelled) = 0;
+
 };
 
 // Extends CryptoModuleBlockingPasswordDelegate with the ability to return a
diff --git a/crypto/nss_util.cc b/crypto/nss_util.cc
index 5ed2fa0..96ee060 100644
--- a/crypto/nss_util.cc
+++ b/crypto/nss_util.cc
@@ -15,9 +15,6 @@
 #include <memory>
 #include <utility>
 
-#include "base/location.h"
-#include "base/single_thread_task_runner.h"
-#include "base/threading/thread_task_runner_handle.h"
 #include "crypto/nss_util_internal.h"
 
 #if defined(OS_OPENBSD)
@@ -32,7 +29,6 @@
 #include <map>
 #include <vector>
 
-#include "base/base_paths.h"
 #include "base/bind.h"
 #include "base/cpu.h"
 #include "base/debug/alias.h"
@@ -42,16 +38,27 @@
 #include "base/files/file_util.h"
 #include "base/lazy_instance.h"
 #include "base/logging.h"
-#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_loop.h"
 #include "base/native_library.h"
-#include "base/path_service.h"
+#include "base/stl_util.h"
 #include "base/strings/stringprintf.h"
-#include "base/synchronization/lock.h"
 #include "base/threading/thread_checker.h"
 #include "base/threading/thread_restrictions.h"
 #include "base/threading/worker_pool.h"
 #include "build/build_config.h"
+
+#if !defined(OS_CHROMEOS)
+#include "base/base_paths.h"
+#include "base/path_service.h"
+#endif
+
+// USE_NSS_CERTS means NSS is used for certificates and platform integration.
+// This requires additional support to manage the platform certificate and key
+// stores.
+#if defined(USE_NSS_CERTS)
+#include "base/synchronization/lock.h"
 #include "crypto/nss_crypto_module_delegate.h"
+#endif  // defined(USE_NSS_CERTS)
 
 namespace crypto {
 
@@ -81,6 +88,7 @@
   return result;
 }
 
+#if defined(USE_NSS_CERTS)
 #if !defined(OS_CHROMEOS)
 base::FilePath GetDefaultConfigDirectory() {
   base::FilePath dir;
@@ -126,13 +134,13 @@
                                                      retry != PR_FALSE,
                                                      &cancelled);
     if (cancelled)
-      return nullptr;
+      return NULL;
     char* result = PORT_Strdup(password.c_str());
     password.replace(0, password.size(), password.size(), 0);
     return result;
   }
-  DLOG(ERROR) << "PK11 password requested with nullptr arg";
-  return nullptr;
+  DLOG(ERROR) << "PK11 password requested with NULL arg";
+  return NULL;
 }
 
 // NSS creates a local cache of the sqlite database if it detects that the
@@ -142,6 +150,10 @@
 // the NSS environment variable NSS_SDB_USE_CACHE to "yes" to override NSS's
 // detection when database_dir is on NFS.  See http://crbug.com/48585.
 //
+// TODO(wtc): port this function to other USE_NSS_CERTS platforms.  It is
+// defined only for OS_LINUX and OS_OPENBSD simply because the statfs structure
+// is OS-specific.
+//
 // Because this function sets an environment variable it must be run before we
 // go multi-threaded.
 void UseLocalCacheOfNSSDatabaseIfNFS(const base::FilePath& database_dir) {
@@ -166,13 +178,15 @@
   }
 }
 
+#endif  // defined(USE_NSS_CERTS)
+
 // A singleton to initialize/deinitialize NSPR.
 // Separate from the NSS singleton because we initialize NSPR on the UI thread.
 // Now that we're leaking the singleton, we could merge back with the NSS
 // singleton.
 class NSPRInitSingleton {
  private:
-  friend struct base::LazyInstanceTraitsBase<NSPRInitSingleton>;
+  friend struct base::DefaultLazyInstanceTraits<NSPRInitSingleton>;
 
   NSPRInitSingleton() {
     PR_Init(PR_USER_THREAD, PR_PRIORITY_NORMAL, 0);
@@ -218,8 +232,8 @@
   }
 
   ScopedPK11Slot GetPublicSlot() {
-    return ScopedPK11Slot(public_slot_ ? PK11_ReferenceSlot(public_slot_.get())
-                                       : nullptr);
+    return ScopedPK11Slot(
+        public_slot_ ? PK11_ReferenceSlot(public_slot_.get()) : NULL);
   }
 
   ScopedPK11Slot GetPrivateSlot(
@@ -264,13 +278,13 @@
 };
 
 class ScopedChapsLoadFixup {
- public:
-  ScopedChapsLoadFixup();
-  ~ScopedChapsLoadFixup();
+  public:
+    ScopedChapsLoadFixup();
+    ~ScopedChapsLoadFixup();
 
- private:
+  private:
 #if defined(COMPONENT_BUILD)
-  void* chaps_handle_;
+    void *chaps_handle_;
 #endif
 };
 
@@ -346,17 +360,17 @@
     DCHECK(!initializing_tpm_token_);
     // If EnableTPMTokenForNSS hasn't been called, return false.
     if (!tpm_token_enabled_for_nss_) {
-      base::ThreadTaskRunnerHandle::Get()->PostTask(
-          FROM_HERE, base::Bind(callback, false));
+      base::MessageLoop::current()->PostTask(FROM_HERE,
+                                             base::Bind(callback, false));
       return;
     }
 
     // If everything is already initialized, then return true.
     // Note that only |tpm_slot_| is checked, since |chaps_module_| could be
-    // nullptr in tests while |tpm_slot_| has been set to the test DB.
+    // NULL in tests while |tpm_slot_| has been set to the test DB.
     if (tpm_slot_) {
-      base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
-                                                    base::Bind(callback, true));
+      base::MessageLoop::current()->PostTask(FROM_HERE,
+                                             base::Bind(callback, true));
       return;
     }
 
@@ -368,15 +382,18 @@
     if (base::WorkerPool::PostTaskAndReply(
             FROM_HERE,
             base::Bind(&NSSInitSingleton::InitializeTPMTokenOnWorkerThread,
-                       system_slot_id, tpm_args_ptr),
+                       system_slot_id,
+                       tpm_args_ptr),
             base::Bind(&NSSInitSingleton::OnInitializedTPMTokenAndSystemSlot,
                        base::Unretained(this),  // NSSInitSingleton is leaky
-                       callback, base::Passed(&tpm_args)),
-            true /* task_is_slow */)) {
+                       callback,
+                       base::Passed(&tpm_args)),
+            true /* task_is_slow */
+            )) {
       initializing_tpm_token_ = true;
     } else {
-      base::ThreadTaskRunnerHandle::Get()->PostTask(
-          FROM_HERE, base::Bind(callback, false));
+      base::MessageLoop::current()->PostTask(FROM_HERE,
+                                             base::Bind(callback, false));
     }
   }
 
@@ -490,7 +507,7 @@
         "%s %s", kUserNSSDatabaseName, username_hash.c_str());
     ScopedPK11Slot public_slot(OpenPersistentNSSDBForPath(db_name, path));
     chromeos_user_map_[username_hash] =
-        base::MakeUnique<ChromeOSUserData>(std::move(public_slot));
+        new ChromeOSUserData(std::move(public_slot));
     return true;
   }
 
@@ -527,12 +544,15 @@
     TPMModuleAndSlot* tpm_args_ptr = tpm_args.get();
     base::WorkerPool::PostTaskAndReply(
         FROM_HERE,
-        base::Bind(&NSSInitSingleton::InitializeTPMTokenOnWorkerThread, slot_id,
+        base::Bind(&NSSInitSingleton::InitializeTPMTokenOnWorkerThread,
+                   slot_id,
                    tpm_args_ptr),
         base::Bind(&NSSInitSingleton::OnInitializedTPMForChromeOSUser,
                    base::Unretained(this),  // NSSInitSingleton is leaky
-                   username_hash, base::Passed(&tpm_args)),
-        true /* task_is_slow */);
+                   username_hash,
+                   base::Passed(&tpm_args)),
+        true /* task_is_slow */
+        );
   }
 
   void OnInitializedTPMForChromeOSUser(
@@ -581,7 +601,7 @@
     if (username_hash.empty()) {
       DVLOG(2) << "empty username_hash";
       if (!callback.is_null()) {
-        base::ThreadTaskRunnerHandle::Get()->PostTask(
+        base::MessageLoop::current()->PostTask(
             FROM_HERE, base::Bind(callback, base::Passed(ScopedPK11Slot())));
       }
       return ScopedPK11Slot();
@@ -594,14 +614,15 @@
 
   void CloseChromeOSUserForTesting(const std::string& username_hash) {
     DCHECK(thread_checker_.CalledOnValidThread());
-    auto i = chromeos_user_map_.find(username_hash);
+    ChromeOSUserMap::iterator i = chromeos_user_map_.find(username_hash);
     DCHECK(i != chromeos_user_map_.end());
+    delete i->second;
     chromeos_user_map_.erase(i);
   }
 
   void SetSystemKeySlotForTesting(ScopedPK11Slot slot) {
     // Ensure that a previous value of test_system_slot_ is not overwritten.
-    // Unsetting, i.e. setting a nullptr, however is allowed.
+    // Unsetting, i.e. setting a NULL, however is allowed.
     DCHECK(!slot || !test_system_slot_);
     test_system_slot_ = std::move(slot);
     if (test_system_slot_) {
@@ -637,7 +658,7 @@
     // TODO(mattm): chromeos::TPMTokenloader always calls
     // InitializeTPMTokenAndSystemSlot with slot 0.  If the system slot is
     // disabled, tpm_slot_ will be the first user's slot instead. Can that be
-    // detected and return nullptr instead?
+    // detected and return NULL instead?
 
     base::Closure wrapped_callback;
     if (!callback.is_null()) {
@@ -652,18 +673,20 @@
   }
 #endif
 
+#if defined(USE_NSS_CERTS)
   base::Lock* write_lock() {
     return &write_lock_;
   }
+#endif  // defined(USE_NSS_CERTS)
 
  private:
-  friend struct base::LazyInstanceTraitsBase<NSSInitSingleton>;
+  friend struct base::DefaultLazyInstanceTraits<NSSInitSingleton>;
 
   NSSInitSingleton()
       : tpm_token_enabled_for_nss_(false),
         initializing_tpm_token_(false),
-        chaps_module_(nullptr),
-        root_(nullptr) {
+        chaps_module_(NULL),
+        root_(NULL) {
     // It's safe to construct on any thread, since LazyInstance will prevent any
     // other threads from accessing until the constructor is done.
     thread_checker_.DetachFromThread();
@@ -686,53 +709,73 @@
     }
 
     SECStatus status = SECFailure;
-    base::FilePath database_dir = GetInitialConfigDirectory();
-    if (!database_dir.empty()) {
-      // This duplicates the work which should have been done in
-      // EarlySetupForNSSInit. However, this function is idempotent so
-      // there's no harm done.
-      UseLocalCacheOfNSSDatabaseIfNFS(database_dir);
+    bool nodb_init = false;
 
-      // Initialize with a persistent database (likely, ~/.pki/nssdb).
-      // Use "sql:" which can be shared by multiple processes safely.
-      std::string nss_config_dir =
-          base::StringPrintf("sql:%s", database_dir.value().c_str());
-#if defined(OS_CHROMEOS)
-      status = NSS_Init(nss_config_dir.c_str());
-#else
-      status = NSS_InitReadWrite(nss_config_dir.c_str());
+#if !defined(USE_NSS_CERTS)
+    // Use the system certificate store, so initialize NSS without database.
+    nodb_init = true;
 #endif
-      if (status != SECSuccess) {
-        LOG(ERROR) << "Error initializing NSS with a persistent "
-                      "database (" << nss_config_dir
-                   << "): " << GetNSSErrorMessage();
-      }
-    }
-    if (status != SECSuccess) {
-      VLOG(1) << "Initializing NSS without a persistent database.";
-      status = NSS_NoDB_Init(nullptr);
+
+    if (nodb_init) {
+      status = NSS_NoDB_Init(NULL);
       if (status != SECSuccess) {
         CrashOnNSSInitFailure();
         return;
       }
+#if defined(OS_IOS)
+      root_ = InitDefaultRootCerts();
+#endif  // defined(OS_IOS)
+    } else {
+#if defined(USE_NSS_CERTS)
+      base::FilePath database_dir = GetInitialConfigDirectory();
+      if (!database_dir.empty()) {
+        // This duplicates the work which should have been done in
+        // EarlySetupForNSSInit. However, this function is idempotent so
+        // there's no harm done.
+        UseLocalCacheOfNSSDatabaseIfNFS(database_dir);
+
+        // Initialize with a persistent database (likely, ~/.pki/nssdb).
+        // Use "sql:" which can be shared by multiple processes safely.
+        std::string nss_config_dir =
+            base::StringPrintf("sql:%s", database_dir.value().c_str());
+#if defined(OS_CHROMEOS)
+        status = NSS_Init(nss_config_dir.c_str());
+#else
+        status = NSS_InitReadWrite(nss_config_dir.c_str());
+#endif
+        if (status != SECSuccess) {
+          LOG(ERROR) << "Error initializing NSS with a persistent "
+                        "database (" << nss_config_dir
+                     << "): " << GetNSSErrorMessage();
+        }
+      }
+      if (status != SECSuccess) {
+        VLOG(1) << "Initializing NSS without a persistent database.";
+        status = NSS_NoDB_Init(NULL);
+        if (status != SECSuccess) {
+          CrashOnNSSInitFailure();
+          return;
+        }
+      }
+
+      PK11_SetPasswordFunc(PKCS11PasswordFunc);
+
+      // If we haven't initialized the password for the NSS databases,
+      // initialize an empty-string password so that we don't need to
+      // log in.
+      PK11SlotInfo* slot = PK11_GetInternalKeySlot();
+      if (slot) {
+        // PK11_InitPin may write to the keyDB, but no other thread can use NSS
+        // yet, so we don't need to lock.
+        if (PK11_NeedUserInit(slot))
+          PK11_InitPin(slot, NULL, NULL);
+        PK11_FreeSlot(slot);
+      }
+
+      root_ = InitDefaultRootCerts();
+#endif  // defined(USE_NSS_CERTS)
     }
 
-    PK11_SetPasswordFunc(PKCS11PasswordFunc);
-
-    // If we haven't initialized the password for the NSS databases,
-    // initialize an empty-string password so that we don't need to
-    // log in.
-    PK11SlotInfo* slot = PK11_GetInternalKeySlot();
-    if (slot) {
-      // PK11_InitPin may write to the keyDB, but no other thread can use NSS
-      // yet, so we don't need to lock.
-      if (PK11_NeedUserInit(slot))
-        PK11_InitPin(slot, nullptr, nullptr);
-      PK11_FreeSlot(slot);
-    }
-
-    root_ = InitDefaultRootCerts();
-
     // Disable MD5 certificate signatures. (They are disabled by default in
     // NSS 3.14.)
     NSS_SetAlgorithmPolicy(SEC_OID_MD5, 0, NSS_USE_ALG_IN_CERT_SIGNATURE);
@@ -745,18 +788,18 @@
   // down.
   ~NSSInitSingleton() {
 #if defined(OS_CHROMEOS)
-    chromeos_user_map_.clear();
+    STLDeleteValues(&chromeos_user_map_);
 #endif
     tpm_slot_.reset();
     if (root_) {
       SECMOD_UnloadUserModule(root_);
       SECMOD_DestroyModule(root_);
-      root_ = nullptr;
+      root_ = NULL;
     }
     if (chaps_module_) {
       SECMOD_UnloadUserModule(chaps_module_);
       SECMOD_DestroyModule(chaps_module_);
-      chaps_module_ = nullptr;
+      chaps_module_ = NULL;
     }
 
     SECStatus status = NSS_Shutdown();
@@ -769,14 +812,14 @@
 
   // Load nss's built-in root certs.
   SECMODModule* InitDefaultRootCerts() {
-    SECMODModule* root = LoadModule("Root Certs", "libnssckbi.so", nullptr);
+    SECMODModule* root = LoadModule("Root Certs", "libnssckbi.so", NULL);
     if (root)
       return root;
 
     // Aw, snap.  Can't find/load root cert shared library.
     // This will make it hard to talk to anybody via https.
     // TODO(mattm): Re-add the NOTREACHED here when crbug.com/310972 is fixed.
-    return nullptr;
+    return NULL;
   }
 
   // Load the given module for this NSS session.
@@ -792,17 +835,17 @@
     // https://bugzilla.mozilla.org/show_bug.cgi?id=642546 was filed
     // on NSS codebase to address this.
     SECMODModule* module = SECMOD_LoadUserModule(
-        const_cast<char*>(modparams.c_str()), nullptr, PR_FALSE);
+        const_cast<char*>(modparams.c_str()), NULL, PR_FALSE);
     if (!module) {
       LOG(ERROR) << "Error loading " << name << " module into NSS: "
                  << GetNSSErrorMessage();
-      return nullptr;
+      return NULL;
     }
     if (!module->loaded) {
       LOG(ERROR) << "After loading " << name << ", loaded==false: "
                  << GetNSSErrorMessage();
       SECMOD_DestroyModule(module);
-      return nullptr;
+      return NULL;
     }
     return module;
   }
@@ -815,12 +858,15 @@
   crypto::ScopedPK11Slot tpm_slot_;
   SECMODModule* root_;
 #if defined(OS_CHROMEOS)
-  std::map<std::string, std::unique_ptr<ChromeOSUserData>> chromeos_user_map_;
+  typedef std::map<std::string, ChromeOSUserData*> ChromeOSUserMap;
+  ChromeOSUserMap chromeos_user_map_;
   ScopedPK11Slot test_system_slot_;
 #endif
+#if defined(USE_NSS_CERTS)
   // TODO(davidben): When https://bugzilla.mozilla.org/show_bug.cgi?id=564011
   // is fixed, we will no longer need the lock.
   base::Lock write_lock_;
+#endif  // defined(USE_NSS_CERTS)
 
   base::ThreadChecker thread_checker_;
 };
@@ -829,6 +875,7 @@
     g_nss_singleton = LAZY_INSTANCE_INITIALIZER;
 }  // namespace
 
+#if defined(USE_NSS_CERTS)
 ScopedPK11Slot OpenSoftwareNSSDB(const base::FilePath& path,
                                  const std::string& description) {
   const std::string modspec =
@@ -838,7 +885,7 @@
   PK11SlotInfo* db_slot = SECMOD_OpenUserDB(modspec.c_str());
   if (db_slot) {
     if (PK11_NeedUserInit(db_slot))
-      PK11_InitPin(db_slot, nullptr, nullptr);
+      PK11_InitPin(db_slot, NULL, NULL);
   } else {
     LOG(ERROR) << "Error opening persistent database (" << modspec
                << "): " << GetNSSErrorMessage();
@@ -851,6 +898,7 @@
   if (!database_dir.empty())
     UseLocalCacheOfNSSDatabaseIfNFS(database_dir);
 }
+#endif
 
 void EnsureNSPRInit() {
   g_nspr_singleton.Get();
@@ -868,12 +916,13 @@
   return !!NSS_VersionCheck(version);
 }
 
+#if defined(USE_NSS_CERTS)
 base::Lock* GetNSSWriteLock() {
   return g_nss_singleton.Get().write_lock();
 }
 
 AutoNSSWriteLock::AutoNSSWriteLock() : lock_(GetNSSWriteLock()) {
-  // May be nullptr if the lock is not needed in our version of NSS.
+  // May be NULL if the lock is not needed in our version of NSS.
   if (lock_)
     lock_->Acquire();
 }
@@ -893,6 +942,7 @@
 AutoSECMODListReadLock::~AutoSECMODListReadLock() {
   SECMOD_ReleaseReadLock(lock_);
 }
+#endif  // defined(USE_NSS_CERTS)
 
 #if defined(OS_CHROMEOS)
 ScopedPK11Slot GetSystemNSSKeySlot(
diff --git a/crypto/nss_util.h b/crypto/nss_util.h
index 5c34fc8..a8b57ff 100644
--- a/crypto/nss_util.h
+++ b/crypto/nss_util.h
@@ -14,6 +14,7 @@
 #include "crypto/crypto_export.h"
 
 namespace base {
+class FilePath;
 class Lock;
 class Time;
 }  // namespace base
diff --git a/crypto/nss_util_internal.h b/crypto/nss_util_internal.h
index 080ac10..697e376 100644
--- a/crypto/nss_util_internal.h
+++ b/crypto/nss_util_internal.h
@@ -7,8 +7,6 @@
 
 #include <secmodt.h>
 
-#include <string>
-
 #include "base/callback.h"
 #include "base/compiler_specific.h"
 #include "base/macros.h"
diff --git a/crypto/openssl_bio_string.cc b/crypto/openssl_bio_string.cc
new file mode 100644
index 0000000..4880500
--- /dev/null
+++ b/crypto/openssl_bio_string.cc
@@ -0,0 +1,77 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/openssl_bio_string.h"
+
+#include <openssl/bio.h>
+#include <string.h>
+
+namespace crypto {
+
+namespace {
+
+int bio_string_write(BIO* bio, const char* data, int len) {
+  reinterpret_cast<std::string*>(bio->ptr)->append(data, len);
+  return len;
+}
+
+int bio_string_puts(BIO* bio, const char* data) {
+  // Note: unlike puts(), BIO_puts does not add a newline.
+  return bio_string_write(bio, data, strlen(data));
+}
+
+long bio_string_ctrl(BIO* bio, int cmd, long num, void* ptr) {
+  std::string* str = reinterpret_cast<std::string*>(bio->ptr);
+  switch (cmd) {
+    case BIO_CTRL_RESET:
+      str->clear();
+      return 1;
+    case BIO_C_FILE_SEEK:
+      return -1;
+    case BIO_C_FILE_TELL:
+      return str->size();
+    case BIO_CTRL_FLUSH:
+      return 1;
+    default:
+      return 0;
+  }
+}
+
+int bio_string_new(BIO* bio) {
+  bio->ptr = NULL;
+  bio->init = 0;
+  return 1;
+}
+
+int bio_string_free(BIO* bio) {
+  // The string is owned by the caller, so there's nothing to do here.
+  return bio != NULL;
+}
+
+BIO_METHOD bio_string_methods = {
+    // TODO(mattm): Should add some type number too? (bio.h uses 1-24)
+    BIO_TYPE_SOURCE_SINK,
+    "bio_string",
+    bio_string_write,
+    NULL, /* read */
+    bio_string_puts,
+    NULL, /* gets */
+    bio_string_ctrl,
+    bio_string_new,
+    bio_string_free,
+    NULL, /* callback_ctrl */
+};
+
+}  // namespace
+
+BIO* BIO_new_string(std::string* out) {
+  BIO* bio = BIO_new(&bio_string_methods);
+  if (!bio)
+    return bio;
+  bio->ptr = out;
+  bio->init = 1;
+  return bio;
+}
+
+}  // namespace crypto
diff --git a/crypto/openssl_bio_string.h b/crypto/openssl_bio_string.h
new file mode 100644
index 0000000..ca46c12
--- /dev/null
+++ b/crypto/openssl_bio_string.h
@@ -0,0 +1,29 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_OPENSSL_BIO_STRING_H_
+#define CRYPTO_OPENSSL_BIO_STRING_H_
+
+#include <string>
+
+#include "crypto/crypto_export.h"
+
+// From <openssl/bio.h>
+typedef struct bio_st BIO;
+
+namespace crypto {
+
+// Creates a new BIO that can be used with OpenSSL's various output functions,
+// and which will write all output directly into |out|. This is primarily
+// intended as a utility to reduce the amount of copying and separate
+// allocations when performing extensive string modifications or streaming
+// within OpenSSL.
+//
+// Note: |out| must remain valid for the duration of the BIO.
+CRYPTO_EXPORT BIO* BIO_new_string(std::string* out);
+
+}  // namespace crypto
+
+#endif  // CRYPTO_OPENSSL_BIO_STRING_H_
+
diff --git a/crypto/openssl_bio_string_unittest.cc b/crypto/openssl_bio_string_unittest.cc
new file mode 100644
index 0000000..9dfa0e7
--- /dev/null
+++ b/crypto/openssl_bio_string_unittest.cc
@@ -0,0 +1,63 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/openssl_bio_string.h"
+
+#include <openssl/bio.h>
+
+#include "crypto/scoped_openssl_types.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace crypto {
+
+TEST(OpenSSLBIOString, TestWrite) {
+  std::string s;
+  const std::string expected1("a one\nb 2\n");
+  const std::string expected2("c d e f");
+  const std::string expected3("g h i");
+  {
+    ScopedBIO bio(BIO_new_string(&s));
+    ASSERT_TRUE(bio.get());
+
+    EXPECT_EQ(static_cast<int>(expected1.size()),
+              BIO_printf(bio.get(), "a %s\nb %i\n", "one", 2));
+    EXPECT_EQ(expected1, s);
+
+    EXPECT_EQ(1, BIO_flush(bio.get()));
+    EXPECT_EQ(expected1, s);
+
+    EXPECT_EQ(static_cast<int>(expected2.size()),
+              BIO_write(bio.get(), expected2.data(), expected2.size()));
+    EXPECT_EQ(expected1 + expected2, s);
+
+    EXPECT_EQ(static_cast<int>(expected3.size()),
+              BIO_puts(bio.get(), expected3.c_str()));
+    EXPECT_EQ(expected1 + expected2 + expected3, s);
+  }
+  EXPECT_EQ(expected1 + expected2 + expected3, s);
+}
+
+TEST(OpenSSLBIOString, TestReset) {
+  std::string s;
+  const std::string expected1("a b c\n");
+  const std::string expected2("d e f g\n");
+  {
+    ScopedBIO bio(BIO_new_string(&s));
+    ASSERT_TRUE(bio.get());
+
+    EXPECT_EQ(static_cast<int>(expected1.size()),
+              BIO_write(bio.get(), expected1.data(), expected1.size()));
+    EXPECT_EQ(expected1, s);
+
+    EXPECT_EQ(1, BIO_reset(bio.get()));
+    EXPECT_EQ(std::string(), s);
+
+    EXPECT_EQ(static_cast<int>(expected2.size()),
+              BIO_write(bio.get(), expected2.data(), expected2.size()));
+    EXPECT_EQ(expected2, s);
+  }
+  EXPECT_EQ(expected2, s);
+}
+
+}  // namespace crypto
diff --git a/crypto/openssl_util.cc b/crypto/openssl_util.cc
index 2349d42..78c6cbb 100644
--- a/crypto/openssl_util.cc
+++ b/crypto/openssl_util.cc
@@ -14,8 +14,6 @@
 #include <stddef.h>
 #include <stdint.h>
 
-#include <string>
-
 #include "base/logging.h"
 #include "base/strings/string_piece.h"
 
@@ -49,7 +47,7 @@
 }
 
 void ClearOpenSSLERRStack(const tracked_objects::Location& location) {
-  if (DCHECK_IS_ON() && VLOG_IS_ON(1)) {
+  if (logging::DEBUG_MODE && VLOG_IS_ON(1)) {
     uint32_t error_num = ERR_peek_error();
     if (error_num == 0)
       return;
diff --git a/crypto/openssl_util.h b/crypto/openssl_util.h
index 54f06d3..d608cde 100644
--- a/crypto/openssl_util.h
+++ b/crypto/openssl_util.h
@@ -15,7 +15,7 @@
 
 // Provides a buffer of at least MIN_SIZE bytes, for use when calling OpenSSL's
 // SHA256, HMAC, etc functions, adapting the buffer sizing rules to meet those
-// of our base wrapper APIs.
+// of the our base wrapper APIs.
 // This allows the library to write directly to the caller's buffer if it is of
 // sufficient size, but if not it will write to temporary |min_sized_buffer_|
 // of required size and then its content is automatically copied out on
diff --git a/crypto/p224_spake.cc b/crypto/p224_spake.cc
index 7275a45..1574105 100644
--- a/crypto/p224_spake.cc
+++ b/crypto/p224_spake.cc
@@ -5,14 +5,14 @@
 // This code implements SPAKE2, a variant of EKE:
 //  http://www.di.ens.fr/~pointche/pub.php?reference=AbPo04
 
-#include "crypto/p224_spake.h"
+#include <crypto/p224_spake.h>
 
 #include <algorithm>
 
-#include "base/logging.h"
-#include "crypto/p224.h"
-#include "crypto/random.h"
-#include "crypto/secure_util.h"
+#include <base/logging.h>
+#include <crypto/p224.h>
+#include <crypto/random.h>
+#include <crypto/secure_util.h>
 
 namespace {
 
@@ -27,9 +27,6 @@
 // #include <openssl/obj_mac.h>
 // #include <openssl/sha.h>
 //
-// // Silence a presubmit.
-// #define PRINTF printf
-//
 // static const char kSeed1[] = "P224 point generation seed (M)";
 // static const char kSeed2[] = "P224 point generation seed (N)";
 //
@@ -55,7 +52,7 @@
 //       EC_POINT_get_affine_coordinates_GFp(p224, p, &x, &y, NULL);
 //       char* x_str = BN_bn2hex(&x);
 //       char* y_str = BN_bn2hex(&y);
-//       PRINTF("Found after %u iterations:\n%s\n%s\n", i, x_str, y_str);
+//       printf("Found after %u iterations:\n%s\n%s\n", i, x_str, y_str);
 //       OPENSSL_free(x_str);
 //       OPENSSL_free(y_str);
 //       BN_free(&x);
diff --git a/crypto/p224_spake.h b/crypto/p224_spake.h
index b5cc70a..f9a44e7 100644
--- a/crypto/p224_spake.h
+++ b/crypto/p224_spake.h
@@ -5,14 +5,12 @@
 #ifndef CRYPTO_P224_SPAKE_H_
 #define CRYPTO_P224_SPAKE_H_
 
+#include <crypto/p224.h>
+#include <crypto/sha2.h>
 #include <stdint.h>
 
-#include <string>
-
 #include "base/gtest_prod_util.h"
 #include "base/strings/string_piece.h"
-#include "crypto/p224.h"
-#include "crypto/sha2.h"
 
 namespace crypto {
 
diff --git a/crypto/p224_spake_unittest.cc b/crypto/p224_spake_unittest.cc
index 5ecb6fd..3bca430 100644
--- a/crypto/p224_spake_unittest.cc
+++ b/crypto/p224_spake_unittest.cc
@@ -127,7 +127,7 @@
 
     // We'll only be testing small values of i, but we don't want that to bias
     // the test coverage. So we disperse the value of i by multiplying by the
-    // FNV, 32-bit prime, producing a simplistic PRNG.
+    // FNV, 32-bit prime, producing a poor-man's PRNG.
     const uint32_t rand = i * 16777619;
 
     for (unsigned round = 0;; round++) {
diff --git a/crypto/p224_unittest.cc b/crypto/p224_unittest.cc
index 8cfe6e7..faa08eb 100644
--- a/crypto/p224_unittest.cc
+++ b/crypto/p224_unittest.cc
@@ -778,8 +778,8 @@
   const std::string external = point.ToString();
 
   ASSERT_EQ(external.size(), 56u);
-  EXPECT_EQ(0, memcmp(external.data(), kBasePointExternal,
-                      sizeof(kBasePointExternal)));
+  EXPECT_TRUE(memcmp(external.data(), kBasePointExternal,
+                     sizeof(kBasePointExternal)) == 0);
 }
 
 TEST(P224, ScalarBaseMult) {
@@ -789,8 +789,8 @@
     p224::ScalarBaseMult(kNISTTestVectors[i].scalar, &point);
     const std::string external = point.ToString();
     ASSERT_EQ(external.size(), 56u);
-    EXPECT_EQ(0, memcmp(external.data(), kNISTTestVectors[i].affine,
-                        external.size()));
+    EXPECT_TRUE(memcmp(external.data(), kNISTTestVectors[i].affine,
+                       external.size()) == 0);
   }
 }
 
@@ -804,9 +804,9 @@
 
   p224::Negate(b, &minus_b);
   p224::Add(a, b, &sum);
-  EXPECT_NE(0, memcmp(&sum, &a, sizeof(sum)));
+  EXPECT_TRUE(memcmp(&sum, &a, sizeof(sum)) != 0);
   p224::Add(minus_b, sum, &a_again);
-  EXPECT_EQ(a_again.ToString(), a.ToString());
+  EXPECT_TRUE(a_again.ToString() == a.ToString());
 }
 
 TEST(P224, Infinity) {
@@ -816,7 +816,7 @@
   // Test that x^0 = ∞.
   Point a;
   p224::ScalarBaseMult(reinterpret_cast<const uint8_t*>(zeros), &a);
-  EXPECT_EQ(0, memcmp(zeros, a.ToString().data(), sizeof(zeros)));
+  EXPECT_TRUE(memcmp(zeros, a.ToString().data(), sizeof(zeros)) == 0);
 
   // We shouldn't allow ∞ to be imported.
   EXPECT_FALSE(a.SetFromString(std::string(zeros, sizeof(zeros))));
diff --git a/crypto/random.h b/crypto/random.h
index 61cde80..002616b 100644
--- a/crypto/random.h
+++ b/crypto/random.h
@@ -18,4 +18,4 @@
 
 }
 
-#endif  // CRYPTO_RANDOM_H_
+#endif
diff --git a/crypto/random_unittest.cc b/crypto/random_unittest.cc
index dfdcfd5..caee512 100644
--- a/crypto/random_unittest.cc
+++ b/crypto/random_unittest.cc
@@ -6,8 +6,6 @@
 
 #include <stddef.h>
 
-#include <string>
-
 #include "base/strings/string_util.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
diff --git a/crypto/rsa_private_key.cc b/crypto/rsa_private_key.cc
index 075f5e4..c546c91 100644
--- a/crypto/rsa_private_key.cc
+++ b/crypto/rsa_private_key.cc
@@ -4,110 +4,385 @@
 
 #include "crypto/rsa_private_key.h"
 
+#include <stddef.h>
 #include <stdint.h>
 
-#include <memory>
-#include <utility>
+#include <algorithm>
 
 #include "base/logging.h"
-#include "crypto/openssl_util.h"
-#include "third_party/boringssl/src/include/openssl/bn.h"
-#include "third_party/boringssl/src/include/openssl/bytestring.h"
-#include "third_party/boringssl/src/include/openssl/evp.h"
-#include "third_party/boringssl/src/include/openssl/mem.h"
-#include "third_party/boringssl/src/include/openssl/rsa.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/string_util.h"
+
+// This file manually encodes and decodes RSA private keys using PrivateKeyInfo
+// from PKCS #8 and RSAPrivateKey from PKCS #1. These structures are:
+//
+// PrivateKeyInfo ::= SEQUENCE {
+//   version Version,
+//   privateKeyAlgorithm PrivateKeyAlgorithmIdentifier,
+//   privateKey PrivateKey,
+//   attributes [0] IMPLICIT Attributes OPTIONAL
+// }
+//
+// RSAPrivateKey ::= SEQUENCE {
+//   version Version,
+//   modulus INTEGER,
+//   publicExponent INTEGER,
+//   privateExponent INTEGER,
+//   prime1 INTEGER,
+//   prime2 INTEGER,
+//   exponent1 INTEGER,
+//   exponent2 INTEGER,
+//   coefficient INTEGER
+// }
+
+namespace {
+// Helper for error handling during key import.
+#define READ_ASSERT(truth) \
+  if (!(truth)) { \
+    NOTREACHED(); \
+    return false; \
+  }
+}  // namespace
 
 namespace crypto {
 
-// static
-std::unique_ptr<RSAPrivateKey> RSAPrivateKey::Create(uint16_t num_bits) {
-  OpenSSLErrStackTracer err_tracer(FROM_HERE);
+const uint8_t PrivateKeyInfoCodec::kRsaAlgorithmIdentifier[] = {
+    0x30, 0x0D, 0x06, 0x09, 0x2A, 0x86, 0x48, 0x86,
+    0xF7, 0x0D, 0x01, 0x01, 0x01, 0x05, 0x00};
 
-  bssl::UniquePtr<RSA> rsa_key(RSA_new());
-  bssl::UniquePtr<BIGNUM> bn(BN_new());
-  if (!rsa_key.get() || !bn.get() || !BN_set_word(bn.get(), 65537L))
-    return nullptr;
+PrivateKeyInfoCodec::PrivateKeyInfoCodec(bool big_endian)
+    : big_endian_(big_endian) {}
 
-  if (!RSA_generate_key_ex(rsa_key.get(), num_bits, bn.get(), nullptr))
-    return nullptr;
+PrivateKeyInfoCodec::~PrivateKeyInfoCodec() {}
 
-  std::unique_ptr<RSAPrivateKey> result(new RSAPrivateKey);
-  result->key_.reset(EVP_PKEY_new());
-  if (!result->key_ || !EVP_PKEY_set1_RSA(result->key_.get(), rsa_key.get()))
-    return nullptr;
+bool PrivateKeyInfoCodec::Export(std::vector<uint8_t>* output) {
+  std::list<uint8_t> content;
 
-  return result;
-}
+  // Version (always zero)
+  uint8_t version = 0;
 
-// static
-std::unique_ptr<RSAPrivateKey> RSAPrivateKey::CreateFromPrivateKeyInfo(
-    const std::vector<uint8_t>& input) {
-  OpenSSLErrStackTracer err_tracer(FROM_HERE);
+  PrependInteger(coefficient_, &content);
+  PrependInteger(exponent2_, &content);
+  PrependInteger(exponent1_, &content);
+  PrependInteger(prime2_, &content);
+  PrependInteger(prime1_, &content);
+  PrependInteger(private_exponent_, &content);
+  PrependInteger(public_exponent_, &content);
+  PrependInteger(modulus_, &content);
+  PrependInteger(&version, 1, &content);
+  PrependTypeHeaderAndLength(kSequenceTag, content.size(), &content);
+  PrependTypeHeaderAndLength(kOctetStringTag, content.size(), &content);
 
-  CBS cbs;
-  CBS_init(&cbs, input.data(), input.size());
-  bssl::UniquePtr<EVP_PKEY> pkey(EVP_parse_private_key(&cbs));
-  if (!pkey || CBS_len(&cbs) != 0 || EVP_PKEY_id(pkey.get()) != EVP_PKEY_RSA)
-    return nullptr;
+  // RSA algorithm OID
+  for (size_t i = sizeof(kRsaAlgorithmIdentifier); i > 0; --i)
+    content.push_front(kRsaAlgorithmIdentifier[i - 1]);
 
-  std::unique_ptr<RSAPrivateKey> result(new RSAPrivateKey);
-  result->key_ = std::move(pkey);
-  return result;
-}
+  PrependInteger(&version, 1, &content);
+  PrependTypeHeaderAndLength(kSequenceTag, content.size(), &content);
 
-// static
-std::unique_ptr<RSAPrivateKey> RSAPrivateKey::CreateFromKey(EVP_PKEY* key) {
-  DCHECK(key);
-  if (EVP_PKEY_type(key->type) != EVP_PKEY_RSA)
-    return nullptr;
-  std::unique_ptr<RSAPrivateKey> copy(new RSAPrivateKey);
-  EVP_PKEY_up_ref(key);
-  copy->key_.reset(key);
-  return copy;
-}
+  // Copy everying into the output.
+  output->reserve(content.size());
+  output->assign(content.begin(), content.end());
 
-RSAPrivateKey::RSAPrivateKey() {}
-
-RSAPrivateKey::~RSAPrivateKey() {}
-
-std::unique_ptr<RSAPrivateKey> RSAPrivateKey::Copy() const {
-  std::unique_ptr<RSAPrivateKey> copy(new RSAPrivateKey);
-  bssl::UniquePtr<RSA> rsa(EVP_PKEY_get1_RSA(key_.get()));
-  if (!rsa)
-    return nullptr;
-  copy->key_.reset(EVP_PKEY_new());
-  if (!EVP_PKEY_set1_RSA(copy->key_.get(), rsa.get()))
-    return nullptr;
-  return copy;
-}
-
-bool RSAPrivateKey::ExportPrivateKey(std::vector<uint8_t>* output) const {
-  OpenSSLErrStackTracer err_tracer(FROM_HERE);
-  uint8_t *der;
-  size_t der_len;
-  bssl::ScopedCBB cbb;
-  if (!CBB_init(cbb.get(), 0) ||
-      !EVP_marshal_private_key(cbb.get(), key_.get()) ||
-      !CBB_finish(cbb.get(), &der, &der_len)) {
-    return false;
-  }
-  output->assign(der, der + der_len);
-  OPENSSL_free(der);
   return true;
 }
 
-bool RSAPrivateKey::ExportPublicKey(std::vector<uint8_t>* output) const {
-  OpenSSLErrStackTracer err_tracer(FROM_HERE);
-  uint8_t *der;
-  size_t der_len;
-  bssl::ScopedCBB cbb;
-  if (!CBB_init(cbb.get(), 0) ||
-      !EVP_marshal_public_key(cbb.get(), key_.get()) ||
-      !CBB_finish(cbb.get(), &der, &der_len)) {
+bool PrivateKeyInfoCodec::ExportPublicKeyInfo(std::vector<uint8_t>* output) {
+  // Create a sequence with the modulus (n) and public exponent (e).
+  std::vector<uint8_t> bit_string;
+  if (!ExportPublicKey(&bit_string))
+    return false;
+
+  // Add the sequence as the contents of a bit string.
+  std::list<uint8_t> content;
+  PrependBitString(&bit_string[0], static_cast<int>(bit_string.size()),
+                   &content);
+
+  // Add the RSA algorithm OID.
+  for (size_t i = sizeof(kRsaAlgorithmIdentifier); i > 0; --i)
+    content.push_front(kRsaAlgorithmIdentifier[i - 1]);
+
+  // Finally, wrap everything in a sequence.
+  PrependTypeHeaderAndLength(kSequenceTag, content.size(), &content);
+
+  // Copy everything into the output.
+  output->reserve(content.size());
+  output->assign(content.begin(), content.end());
+
+  return true;
+}
+
+bool PrivateKeyInfoCodec::ExportPublicKey(std::vector<uint8_t>* output) {
+  // Create a sequence with the modulus (n) and public exponent (e).
+  std::list<uint8_t> content;
+  PrependInteger(&public_exponent_[0],
+                 static_cast<int>(public_exponent_.size()),
+                 &content);
+  PrependInteger(&modulus_[0],  static_cast<int>(modulus_.size()), &content);
+  PrependTypeHeaderAndLength(kSequenceTag, content.size(), &content);
+
+  // Copy everything into the output.
+  output->reserve(content.size());
+  output->assign(content.begin(), content.end());
+
+  return true;
+}
+
+bool PrivateKeyInfoCodec::Import(const std::vector<uint8_t>& input) {
+  if (input.empty()) {
     return false;
   }
-  output->assign(der, der + der_len);
-  OPENSSL_free(der);
+
+  // Parse the private key info up to the public key values, ignoring
+  // the subsequent private key values.
+  uint8_t* src = const_cast<uint8_t*>(&input.front());
+  uint8_t* end = src + input.size();
+  if (!ReadSequence(&src, end) ||
+      !ReadVersion(&src, end) ||
+      !ReadAlgorithmIdentifier(&src, end) ||
+      !ReadTypeHeaderAndLength(&src, end, kOctetStringTag, NULL) ||
+      !ReadSequence(&src, end) ||
+      !ReadVersion(&src, end) ||
+      !ReadInteger(&src, end, &modulus_))
+    return false;
+
+  int mod_size = modulus_.size();
+  READ_ASSERT(mod_size % 2 == 0);
+  int primes_size = mod_size / 2;
+
+  if (!ReadIntegerWithExpectedSize(&src, end, 4, &public_exponent_) ||
+      !ReadIntegerWithExpectedSize(&src, end, mod_size, &private_exponent_) ||
+      !ReadIntegerWithExpectedSize(&src, end, primes_size, &prime1_) ||
+      !ReadIntegerWithExpectedSize(&src, end, primes_size, &prime2_) ||
+      !ReadIntegerWithExpectedSize(&src, end, primes_size, &exponent1_) ||
+      !ReadIntegerWithExpectedSize(&src, end, primes_size, &exponent2_) ||
+      !ReadIntegerWithExpectedSize(&src, end, primes_size, &coefficient_))
+    return false;
+
+  READ_ASSERT(src == end);
+
+
+  return true;
+}
+
+void PrivateKeyInfoCodec::PrependInteger(const std::vector<uint8_t>& in,
+                                         std::list<uint8_t>* out) {
+  uint8_t* ptr = const_cast<uint8_t*>(&in.front());
+  PrependIntegerImpl(ptr, in.size(), out, big_endian_);
+}
+
+// Helper to prepend an ASN.1 integer.
+void PrivateKeyInfoCodec::PrependInteger(uint8_t* val,
+                                         int num_bytes,
+                                         std::list<uint8_t>* data) {
+  PrependIntegerImpl(val, num_bytes, data, big_endian_);
+}
+
+void PrivateKeyInfoCodec::PrependIntegerImpl(uint8_t* val,
+                                             int num_bytes,
+                                             std::list<uint8_t>* data,
+                                             bool big_endian) {
+ // Reverse input if little-endian.
+ std::vector<uint8_t> tmp;
+ if (!big_endian) {
+   tmp.assign(val, val + num_bytes);
+   std::reverse(tmp.begin(), tmp.end());
+   val = &tmp.front();
+ }
+
+  // ASN.1 integers are unpadded byte arrays, so skip any null padding bytes
+  // from the most-significant end of the integer.
+  int start = 0;
+  while (start < (num_bytes - 1) && val[start] == 0x00) {
+    start++;
+    num_bytes--;
+  }
+  PrependBytes(val, start, num_bytes, data);
+
+  // ASN.1 integers are signed. To encode a positive integer whose sign bit
+  // (the most significant bit) would otherwise be set and make the number
+  // negative, ASN.1 requires a leading null byte to force the integer to be
+  // positive.
+  uint8_t front = data->front();
+  if ((front & 0x80) != 0) {
+    data->push_front(0x00);
+    num_bytes++;
+  }
+
+  PrependTypeHeaderAndLength(kIntegerTag, num_bytes, data);
+}
+
+bool PrivateKeyInfoCodec::ReadInteger(uint8_t** pos,
+                                      uint8_t* end,
+                                      std::vector<uint8_t>* out) {
+  return ReadIntegerImpl(pos, end, out, big_endian_);
+}
+
+bool PrivateKeyInfoCodec::ReadIntegerWithExpectedSize(
+    uint8_t** pos,
+    uint8_t* end,
+    size_t expected_size,
+    std::vector<uint8_t>* out) {
+  std::vector<uint8_t> temp;
+  if (!ReadIntegerImpl(pos, end, &temp, true))  // Big-Endian
+    return false;
+
+  int pad = expected_size - temp.size();
+  int index = 0;
+  if (out->size() == expected_size + 1) {
+    READ_ASSERT(out->front() == 0x00);
+    pad++;
+    index++;
+  } else {
+    READ_ASSERT(out->size() <= expected_size);
+  }
+
+  out->insert(out->end(), pad, 0x00);
+  out->insert(out->end(), temp.begin(), temp.end());
+
+  // Reverse output if little-endian.
+  if (!big_endian_)
+    std::reverse(out->begin(), out->end());
+  return true;
+}
+
+bool PrivateKeyInfoCodec::ReadIntegerImpl(uint8_t** pos,
+                                          uint8_t* end,
+                                          std::vector<uint8_t>* out,
+                                          bool big_endian) {
+  uint32_t length = 0;
+  if (!ReadTypeHeaderAndLength(pos, end, kIntegerTag, &length) || !length)
+    return false;
+
+  // The first byte can be zero to force positiveness. We can ignore this.
+  if (**pos == 0x00) {
+    ++(*pos);
+    --length;
+  }
+
+  if (length)
+    out->insert(out->end(), *pos, (*pos) + length);
+
+  (*pos) += length;
+
+  // Reverse output if little-endian.
+  if (!big_endian)
+    std::reverse(out->begin(), out->end());
+  return true;
+}
+
+void PrivateKeyInfoCodec::PrependBytes(uint8_t* val,
+                                       int start,
+                                       int num_bytes,
+                                       std::list<uint8_t>* data) {
+  while (num_bytes > 0) {
+    --num_bytes;
+    data->push_front(val[start + num_bytes]);
+  }
+}
+
+void PrivateKeyInfoCodec::PrependLength(size_t size, std::list<uint8_t>* data) {
+  // The high bit is used to indicate whether additional octets are needed to
+  // represent the length.
+  if (size < 0x80) {
+    data->push_front(static_cast<uint8_t>(size));
+  } else {
+    uint8_t num_bytes = 0;
+    while (size > 0) {
+      data->push_front(static_cast<uint8_t>(size & 0xFF));
+      size >>= 8;
+      num_bytes++;
+    }
+    CHECK_LE(num_bytes, 4);
+    data->push_front(0x80 | num_bytes);
+  }
+}
+
+void PrivateKeyInfoCodec::PrependTypeHeaderAndLength(
+    uint8_t type,
+    uint32_t length,
+    std::list<uint8_t>* output) {
+  PrependLength(length, output);
+  output->push_front(type);
+}
+
+void PrivateKeyInfoCodec::PrependBitString(uint8_t* val,
+                                           int num_bytes,
+                                           std::list<uint8_t>* output) {
+  // Start with the data.
+  PrependBytes(val, 0, num_bytes, output);
+  // Zero unused bits.
+  output->push_front(0);
+  // Add the length.
+  PrependLength(num_bytes + 1, output);
+  // Finally, add the bit string tag.
+  output->push_front((uint8_t)kBitStringTag);
+}
+
+bool PrivateKeyInfoCodec::ReadLength(uint8_t** pos,
+                                     uint8_t* end,
+                                     uint32_t* result) {
+  READ_ASSERT(*pos < end);
+  int length = 0;
+
+  // If the MSB is not set, the length is just the byte itself.
+  if (!(**pos & 0x80)) {
+    length = **pos;
+    (*pos)++;
+  } else {
+    // Otherwise, the lower 7 indicate the length of the length.
+    int length_of_length = **pos & 0x7F;
+    READ_ASSERT(length_of_length <= 4);
+    (*pos)++;
+    READ_ASSERT(*pos + length_of_length < end);
+
+    length = 0;
+    for (int i = 0; i < length_of_length; ++i) {
+      length <<= 8;
+      length |= **pos;
+      (*pos)++;
+    }
+  }
+
+  READ_ASSERT(*pos + length <= end);
+  if (result) *result = length;
+  return true;
+}
+
+bool PrivateKeyInfoCodec::ReadTypeHeaderAndLength(uint8_t** pos,
+                                                  uint8_t* end,
+                                                  uint8_t expected_tag,
+                                                  uint32_t* length) {
+  READ_ASSERT(*pos < end);
+  READ_ASSERT(**pos == expected_tag);
+  (*pos)++;
+
+  return ReadLength(pos, end, length);
+}
+
+bool PrivateKeyInfoCodec::ReadSequence(uint8_t** pos, uint8_t* end) {
+  return ReadTypeHeaderAndLength(pos, end, kSequenceTag, NULL);
+}
+
+bool PrivateKeyInfoCodec::ReadAlgorithmIdentifier(uint8_t** pos, uint8_t* end) {
+  READ_ASSERT(*pos + sizeof(kRsaAlgorithmIdentifier) < end);
+  READ_ASSERT(memcmp(*pos, kRsaAlgorithmIdentifier,
+                     sizeof(kRsaAlgorithmIdentifier)) == 0);
+  (*pos) += sizeof(kRsaAlgorithmIdentifier);
+  return true;
+}
+
+bool PrivateKeyInfoCodec::ReadVersion(uint8_t** pos, uint8_t* end) {
+  uint32_t length = 0;
+  if (!ReadTypeHeaderAndLength(pos, end, kIntegerTag, &length))
+    return false;
+
+  // The version should be zero.
+  for (uint32_t i = 0; i < length; ++i) {
+    READ_ASSERT(**pos == 0x00);
+    (*pos)++;
+  }
+
   return true;
 }
 
diff --git a/crypto/rsa_private_key.h b/crypto/rsa_private_key.h
index fc4c80c..d4808f5 100644
--- a/crypto/rsa_private_key.h
+++ b/crypto/rsa_private_key.h
@@ -7,17 +7,162 @@
 
 #include <stddef.h>
 #include <stdint.h>
-#include <openssl/base.h>
 
-#include <memory>
+#include <list>
 #include <vector>
 
 #include "base/macros.h"
 #include "build/build_config.h"
 #include "crypto/crypto_export.h"
 
+#if defined(USE_OPENSSL)
+// Forward declaration for openssl/*.h
+typedef struct evp_pkey_st EVP_PKEY;
+#else
+// Forward declaration.
+typedef struct PK11SlotInfoStr PK11SlotInfo;
+typedef struct SECKEYPrivateKeyStr SECKEYPrivateKey;
+typedef struct SECKEYPublicKeyStr SECKEYPublicKey;
+#endif
+
+
 namespace crypto {
 
+// Used internally by RSAPrivateKey for serializing and deserializing
+// PKCS #8 PrivateKeyInfo and PublicKeyInfo.
+class PrivateKeyInfoCodec {
+ public:
+  // ASN.1 encoding of the AlgorithmIdentifier from PKCS #8.
+  static const uint8_t kRsaAlgorithmIdentifier[];
+
+  // ASN.1 tags for some types we use.
+  static const uint8_t kBitStringTag = 0x03;
+  static const uint8_t kIntegerTag = 0x02;
+  static const uint8_t kNullTag = 0x05;
+  static const uint8_t kOctetStringTag = 0x04;
+  static const uint8_t kSequenceTag = 0x30;
+
+  // |big_endian| here specifies the byte-significance of the integer components
+  // that will be parsed & serialized (modulus(), etc...) during Import(),
+  // Export() and ExportPublicKeyInfo() -- not the ASN.1 DER encoding of the
+  // PrivateKeyInfo/PublicKeyInfo (which is always big-endian).
+  explicit PrivateKeyInfoCodec(bool big_endian);
+
+  ~PrivateKeyInfoCodec();
+
+  // Exports the contents of the integer components to the ASN.1 DER encoding
+  // of the PrivateKeyInfo structure to |output|.
+  bool Export(std::vector<uint8_t>* output);
+
+  // Exports the contents of the integer components to the ASN.1 DER encoding
+  // of the PublicKeyInfo structure to |output|.
+  bool ExportPublicKeyInfo(std::vector<uint8_t>* output);
+
+  // Exports the contents of the integer components to the ASN.1 DER encoding
+  // of the RSAPublicKey structure to |output|.
+  bool ExportPublicKey(std::vector<uint8_t>* output);
+
+  // Parses the ASN.1 DER encoding of the PrivateKeyInfo structure in |input|
+  // and populates the integer components with |big_endian_| byte-significance.
+  // IMPORTANT NOTE: This is currently *not* security-approved for importing
+  // keys from unstrusted sources.
+  bool Import(const std::vector<uint8_t>& input);
+
+  // Accessors to the contents of the integer components of the PrivateKeyInfo
+  // structure.
+  std::vector<uint8_t>* modulus() { return &modulus_; }
+  std::vector<uint8_t>* public_exponent() { return &public_exponent_; }
+  std::vector<uint8_t>* private_exponent() { return &private_exponent_; }
+  std::vector<uint8_t>* prime1() { return &prime1_; }
+  std::vector<uint8_t>* prime2() { return &prime2_; }
+  std::vector<uint8_t>* exponent1() { return &exponent1_; }
+  std::vector<uint8_t>* exponent2() { return &exponent2_; }
+  std::vector<uint8_t>* coefficient() { return &coefficient_; }
+
+ private:
+  // Utility wrappers for PrependIntegerImpl that use the class's |big_endian_|
+  // value.
+  void PrependInteger(const std::vector<uint8_t>& in, std::list<uint8_t>* out);
+  void PrependInteger(uint8_t* val, int num_bytes, std::list<uint8_t>* data);
+
+  // Prepends the integer stored in |val| - |val + num_bytes| with |big_endian|
+  // byte-significance into |data| as an ASN.1 integer.
+  void PrependIntegerImpl(uint8_t* val,
+                          int num_bytes,
+                          std::list<uint8_t>* data,
+                          bool big_endian);
+
+  // Utility wrappers for ReadIntegerImpl that use the class's |big_endian_|
+  // value.
+  bool ReadInteger(uint8_t** pos, uint8_t* end, std::vector<uint8_t>* out);
+  bool ReadIntegerWithExpectedSize(uint8_t** pos,
+                                   uint8_t* end,
+                                   size_t expected_size,
+                                   std::vector<uint8_t>* out);
+
+  // Reads an ASN.1 integer from |pos|, and stores the result into |out| with
+  // |big_endian| byte-significance.
+  bool ReadIntegerImpl(uint8_t** pos,
+                       uint8_t* end,
+                       std::vector<uint8_t>* out,
+                       bool big_endian);
+
+  // Prepends the integer stored in |val|, starting a index |start|, for
+  // |num_bytes| bytes onto |data|.
+  void PrependBytes(uint8_t* val,
+                    int start,
+                    int num_bytes,
+                    std::list<uint8_t>* data);
+
+  // Helper to prepend an ASN.1 length field.
+  void PrependLength(size_t size, std::list<uint8_t>* data);
+
+  // Helper to prepend an ASN.1 type header.
+  void PrependTypeHeaderAndLength(uint8_t type,
+                                  uint32_t length,
+                                  std::list<uint8_t>* output);
+
+  // Helper to prepend an ASN.1 bit string
+  void PrependBitString(uint8_t* val,
+                        int num_bytes,
+                        std::list<uint8_t>* output);
+
+  // Read an ASN.1 length field. This also checks that the length does not
+  // extend beyond |end|.
+  bool ReadLength(uint8_t** pos, uint8_t* end, uint32_t* result);
+
+  // Read an ASN.1 type header and its length.
+  bool ReadTypeHeaderAndLength(uint8_t** pos,
+                               uint8_t* end,
+                               uint8_t expected_tag,
+                               uint32_t* length);
+
+  // Read an ASN.1 sequence declaration. This consumes the type header and
+  // length field, but not the contents of the sequence.
+  bool ReadSequence(uint8_t** pos, uint8_t* end);
+
+  // Read the RSA AlgorithmIdentifier.
+  bool ReadAlgorithmIdentifier(uint8_t** pos, uint8_t* end);
+
+  // Read one of the two version fields in PrivateKeyInfo.
+  bool ReadVersion(uint8_t** pos, uint8_t* end);
+
+  // The byte-significance of the stored components (modulus, etc..).
+  bool big_endian_;
+
+  // Component integers of the PrivateKeyInfo
+  std::vector<uint8_t> modulus_;
+  std::vector<uint8_t> public_exponent_;
+  std::vector<uint8_t> private_exponent_;
+  std::vector<uint8_t> prime1_;
+  std::vector<uint8_t> prime2_;
+  std::vector<uint8_t> exponent1_;
+  std::vector<uint8_t> exponent2_;
+  std::vector<uint8_t> coefficient_;
+
+  DISALLOW_COPY_AND_ASSIGN(PrivateKeyInfoCodec);
+};
+
 // Encapsulates an RSA private key. Can be used to generate new keys, export
 // keys to other formats, or to extract a public key.
 // TODO(hclam): This class should be ref-counted so it can be reused easily.
@@ -26,23 +171,34 @@
   ~RSAPrivateKey();
 
   // Create a new random instance. Can return NULL if initialization fails.
-  static std::unique_ptr<RSAPrivateKey> Create(uint16_t num_bits);
+  static RSAPrivateKey* Create(uint16_t num_bits);
 
   // Create a new instance by importing an existing private key. The format is
   // an ASN.1-encoded PrivateKeyInfo block from PKCS #8. This can return NULL if
   // initialization fails.
-  static std::unique_ptr<RSAPrivateKey> CreateFromPrivateKeyInfo(
+  static RSAPrivateKey* CreateFromPrivateKeyInfo(
       const std::vector<uint8_t>& input);
 
+#if defined(USE_OPENSSL)
   // Create a new instance from an existing EVP_PKEY, taking a
   // reference to it. |key| must be an RSA key. Returns NULL on
   // failure.
-  static std::unique_ptr<RSAPrivateKey> CreateFromKey(EVP_PKEY* key);
+  static RSAPrivateKey* CreateFromKey(EVP_PKEY* key);
+#else
+  // Create a new instance by referencing an existing private key
+  // structure.  Does not import the key.
+  static RSAPrivateKey* CreateFromKey(SECKEYPrivateKey* key);
+#endif
 
-  EVP_PKEY* key() { return key_.get(); }
+#if defined(USE_OPENSSL)
+  EVP_PKEY* key() { return key_; }
+#else
+  SECKEYPrivateKey* key() { return key_; }
+  SECKEYPublicKey* public_key() { return public_key_; }
+#endif
 
   // Creates a copy of the object.
-  std::unique_ptr<RSAPrivateKey> Copy() const;
+  RSAPrivateKey* Copy() const;
 
   // Exports the private key to a PKCS #8 PrivateKeyInfo block.
   bool ExportPrivateKey(std::vector<uint8_t>* output) const;
@@ -54,7 +210,12 @@
   // Constructor is private. Use one of the Create*() methods above instead.
   RSAPrivateKey();
 
-  bssl::UniquePtr<EVP_PKEY> key_;
+#if defined(USE_OPENSSL)
+  EVP_PKEY* key_;
+#else
+  SECKEYPrivateKey* key_;
+  SECKEYPublicKey* public_key_;
+#endif
 
   DISALLOW_COPY_AND_ASSIGN(RSAPrivateKey);
 };
diff --git a/crypto/rsa_private_key_nss.cc b/crypto/rsa_private_key_nss.cc
new file mode 100644
index 0000000..b1026c1
--- /dev/null
+++ b/crypto/rsa_private_key_nss.cc
@@ -0,0 +1,151 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/rsa_private_key.h"
+
+#include <cryptohi.h>
+#include <keyhi.h>
+#include <pk11pub.h>
+#include <stdint.h>
+
+#include <list>
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/string_util.h"
+#include "crypto/nss_key_util.h"
+#include "crypto/nss_util.h"
+#include "crypto/scoped_nss_types.h"
+
+// TODO(rafaelw): Consider using NSS's ASN.1 encoder.
+namespace {
+
+static bool ReadAttribute(SECKEYPrivateKey* key,
+                          CK_ATTRIBUTE_TYPE type,
+                          std::vector<uint8_t>* output) {
+  SECItem item;
+  SECStatus rv;
+  rv = PK11_ReadRawAttribute(PK11_TypePrivKey, key, type, &item);
+  if (rv != SECSuccess) {
+    NOTREACHED();
+    return false;
+  }
+
+  output->assign(item.data, item.data + item.len);
+  SECITEM_FreeItem(&item, PR_FALSE);
+  return true;
+}
+
+}  // namespace
+
+namespace crypto {
+
+RSAPrivateKey::~RSAPrivateKey() {
+  if (key_)
+    SECKEY_DestroyPrivateKey(key_);
+  if (public_key_)
+    SECKEY_DestroyPublicKey(public_key_);
+}
+
+// static
+RSAPrivateKey* RSAPrivateKey::Create(uint16_t num_bits) {
+  EnsureNSSInit();
+
+  ScopedPK11Slot slot(PK11_GetInternalSlot());
+  if (!slot) {
+    NOTREACHED();
+    return nullptr;
+  }
+
+  ScopedSECKEYPublicKey public_key;
+  ScopedSECKEYPrivateKey private_key;
+  if (!GenerateRSAKeyPairNSS(slot.get(), num_bits, false /* not permanent */,
+                             &public_key, &private_key)) {
+    return nullptr;
+  }
+
+  RSAPrivateKey* rsa_key = new RSAPrivateKey;
+  rsa_key->public_key_ = public_key.release();
+  rsa_key->key_ = private_key.release();
+  return rsa_key;
+}
+
+// static
+RSAPrivateKey* RSAPrivateKey::CreateFromPrivateKeyInfo(
+    const std::vector<uint8_t>& input) {
+  EnsureNSSInit();
+
+  ScopedPK11Slot slot(PK11_GetInternalSlot());
+  if (!slot) {
+    NOTREACHED();
+    return nullptr;
+  }
+  ScopedSECKEYPrivateKey key(ImportNSSKeyFromPrivateKeyInfo(
+      slot.get(), input, false /* not permanent */));
+  if (!key || SECKEY_GetPrivateKeyType(key.get()) != rsaKey)
+    return nullptr;
+  return RSAPrivateKey::CreateFromKey(key.get());
+}
+
+// static
+RSAPrivateKey* RSAPrivateKey::CreateFromKey(SECKEYPrivateKey* key) {
+  DCHECK(key);
+  if (SECKEY_GetPrivateKeyType(key) != rsaKey)
+    return NULL;
+  RSAPrivateKey* copy = new RSAPrivateKey();
+  copy->key_ = SECKEY_CopyPrivateKey(key);
+  copy->public_key_ = SECKEY_ConvertToPublicKey(key);
+  if (!copy->key_ || !copy->public_key_) {
+    NOTREACHED();
+    delete copy;
+    return NULL;
+  }
+  return copy;
+}
+
+RSAPrivateKey* RSAPrivateKey::Copy() const {
+  RSAPrivateKey* copy = new RSAPrivateKey();
+  copy->key_ = SECKEY_CopyPrivateKey(key_);
+  copy->public_key_ = SECKEY_CopyPublicKey(public_key_);
+  return copy;
+}
+
+bool RSAPrivateKey::ExportPrivateKey(std::vector<uint8_t>* output) const {
+  PrivateKeyInfoCodec private_key_info(true);
+
+  // Manually read the component attributes of the private key and build up
+  // the PrivateKeyInfo.
+  if (!ReadAttribute(key_, CKA_MODULUS, private_key_info.modulus()) ||
+      !ReadAttribute(key_, CKA_PUBLIC_EXPONENT,
+          private_key_info.public_exponent()) ||
+      !ReadAttribute(key_, CKA_PRIVATE_EXPONENT,
+          private_key_info.private_exponent()) ||
+      !ReadAttribute(key_, CKA_PRIME_1, private_key_info.prime1()) ||
+      !ReadAttribute(key_, CKA_PRIME_2, private_key_info.prime2()) ||
+      !ReadAttribute(key_, CKA_EXPONENT_1, private_key_info.exponent1()) ||
+      !ReadAttribute(key_, CKA_EXPONENT_2, private_key_info.exponent2()) ||
+      !ReadAttribute(key_, CKA_COEFFICIENT, private_key_info.coefficient())) {
+    NOTREACHED();
+    return false;
+  }
+
+  return private_key_info.Export(output);
+}
+
+bool RSAPrivateKey::ExportPublicKey(std::vector<uint8_t>* output) const {
+  ScopedSECItem der_pubkey(SECKEY_EncodeDERSubjectPublicKeyInfo(public_key_));
+  if (!der_pubkey.get()) {
+    NOTREACHED();
+    return false;
+  }
+
+  output->assign(der_pubkey->data, der_pubkey->data + der_pubkey->len);
+  return true;
+}
+
+RSAPrivateKey::RSAPrivateKey() : key_(NULL), public_key_(NULL) {
+  EnsureNSSInit();
+}
+
+}  // namespace crypto
diff --git a/crypto/rsa_private_key_unittest.cc b/crypto/rsa_private_key_unittest.cc
index f9549f3..393a24c 100644
--- a/crypto/rsa_private_key_unittest.cc
+++ b/crypto/rsa_private_key_unittest.cc
@@ -103,8 +103,10 @@
 
   ASSERT_EQ(privkey1.size(), privkey3.size());
   ASSERT_EQ(privkey2.size(), privkey4.size());
-  ASSERT_EQ(0, memcmp(&privkey1.front(), &privkey3.front(), privkey1.size()));
-  ASSERT_EQ(0, memcmp(&privkey2.front(), &privkey4.front(), privkey2.size()));
+  ASSERT_TRUE(0 == memcmp(&privkey1.front(), &privkey3.front(),
+                          privkey1.size()));
+  ASSERT_TRUE(0 == memcmp(&privkey2.front(), &privkey4.front(),
+                          privkey2.size()));
 }
 
 // Test Copy() method.
@@ -193,8 +195,8 @@
   std::vector<uint8_t> output;
   ASSERT_TRUE(key->ExportPublicKey(&output));
 
-  ASSERT_EQ(0,
-            memcmp(expected_public_key_info, &output.front(), output.size()));
+  ASSERT_TRUE(
+      memcmp(expected_public_key_info, &output.front(), output.size()) == 0);
 }
 
 // These two test keys each contain an integer that has 0x00 for its most
@@ -347,8 +349,10 @@
 
   ASSERT_EQ(input1.size(), output1.size());
   ASSERT_EQ(input2.size(), output2.size());
-  ASSERT_EQ(0, memcmp(&output1.front(), &input1.front(), input1.size()));
-  ASSERT_EQ(0, memcmp(&output2.front(), &input2.front(), input2.size()));
+  ASSERT_TRUE(0 == memcmp(&output1.front(), &input1.front(),
+                          input1.size()));
+  ASSERT_TRUE(0 == memcmp(&output2.front(), &input2.front(),
+                          input2.size()));
 }
 
 TEST(RSAPrivateKeyUnitTest, CreateFromKeyTest) {
diff --git a/crypto/scoped_openssl_types.h b/crypto/scoped_openssl_types.h
new file mode 100644
index 0000000..622fed2
--- /dev/null
+++ b/crypto/scoped_openssl_types.h
@@ -0,0 +1,62 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_SCOPED_OPENSSL_TYPES_H_
+#define CRYPTO_SCOPED_OPENSSL_TYPES_H_
+
+#include <openssl/bio.h>
+#include <openssl/bn.h>
+#include <openssl/dsa.h>
+#include <openssl/ec.h>
+#include <openssl/ecdsa.h>
+#include <openssl/evp.h>
+#ifdef OPENSSL_IS_BORINGSSL
+#include <openssl/mem.h>
+#endif
+#include <openssl/rsa.h>
+#include <stdint.h>
+
+#include <memory>
+
+namespace crypto {
+
+// Simplistic helper that wraps a call to a deleter function. In a C++11 world,
+// this would be std::function<>. An alternative would be to re-use
+// base::internal::RunnableAdapter<>, but that's far too heavy weight.
+template <typename Type, void (*Destroyer)(Type*)>
+struct OpenSSLDestroyer {
+  void operator()(Type* ptr) const { Destroyer(ptr); }
+};
+
+template <typename PointerType, void (*Destroyer)(PointerType*)>
+using ScopedOpenSSL =
+    std::unique_ptr<PointerType, OpenSSLDestroyer<PointerType, Destroyer>>;
+
+struct OpenSSLFree {
+  void operator()(uint8_t* ptr) const { OPENSSL_free(ptr); }
+};
+
+// Several typedefs are provided for crypto-specific primitives, for
+// short-hand and prevalence. Note that OpenSSL types related to X.509 are
+// intentionally not included, as crypto/ does not generally deal with
+// certificates or PKI.
+using ScopedBIGNUM = ScopedOpenSSL<BIGNUM, BN_free>;
+using ScopedEC_Key = ScopedOpenSSL<EC_KEY, EC_KEY_free>;
+using ScopedBIO = ScopedOpenSSL<BIO, BIO_free_all>;
+using ScopedDSA = ScopedOpenSSL<DSA, DSA_free>;
+using ScopedECDSA_SIG = ScopedOpenSSL<ECDSA_SIG, ECDSA_SIG_free>;
+using ScopedEC_GROUP = ScopedOpenSSL<EC_GROUP, EC_GROUP_free>;
+using ScopedEC_KEY = ScopedOpenSSL<EC_KEY, EC_KEY_free>;
+using ScopedEC_POINT = ScopedOpenSSL<EC_POINT, EC_POINT_free>;
+using ScopedEVP_MD_CTX = ScopedOpenSSL<EVP_MD_CTX, EVP_MD_CTX_destroy>;
+using ScopedEVP_PKEY = ScopedOpenSSL<EVP_PKEY, EVP_PKEY_free>;
+using ScopedEVP_PKEY_CTX = ScopedOpenSSL<EVP_PKEY_CTX, EVP_PKEY_CTX_free>;
+using ScopedRSA = ScopedOpenSSL<RSA, RSA_free>;
+
+// The bytes must have been allocated with OPENSSL_malloc.
+using ScopedOpenSSLBytes = std::unique_ptr<uint8_t, OpenSSLFree>;
+
+}  // namespace crypto
+
+#endif  // CRYPTO_SCOPED_OPENSSL_TYPES_H_
diff --git a/crypto/scoped_test_nss_chromeos_user.cc b/crypto/scoped_test_nss_chromeos_user.cc
index 49b92d4..aec25d8 100644
--- a/crypto/scoped_test_nss_chromeos_user.cc
+++ b/crypto/scoped_test_nss_chromeos_user.cc
@@ -18,7 +18,7 @@
   // This opens a software DB in the given folder. In production code that is in
   // the home folder, but for testing the temp folder is used.
   constructed_successfully_ =
-      InitializeNSSForChromeOSUser(username_hash, temp_dir_.GetPath());
+      InitializeNSSForChromeOSUser(username_hash, temp_dir_.path());
 }
 
 ScopedTestNSSChromeOSUser::~ScopedTestNSSChromeOSUser() {
diff --git a/crypto/scoped_test_nss_db.cc b/crypto/scoped_test_nss_db.cc
index b334109..dc58031 100644
--- a/crypto/scoped_test_nss_db.cc
+++ b/crypto/scoped_test_nss_db.cc
@@ -24,7 +24,7 @@
     return;
 
   const char kTestDescription[] = "Test DB";
-  slot_ = OpenSoftwareNSSDB(temp_dir_.GetPath(), kTestDescription);
+  slot_ = OpenSoftwareNSSDB(temp_dir_.path(), kTestDescription);
 }
 
 ScopedTestNSSDB::~ScopedTestNSSDB() {
diff --git a/crypto/scoped_test_system_nss_key_slot.h b/crypto/scoped_test_system_nss_key_slot.h
index ae9b2cd..eb8fbc9 100644
--- a/crypto/scoped_test_system_nss_key_slot.h
+++ b/crypto/scoped_test_system_nss_key_slot.h
@@ -27,7 +27,7 @@
 // At most one instance of this helper must be used at a time.
 class CRYPTO_EXPORT ScopedTestSystemNSSKeySlot {
  public:
-  ScopedTestSystemNSSKeySlot();
+  explicit ScopedTestSystemNSSKeySlot();
   ~ScopedTestSystemNSSKeySlot();
 
   bool ConstructedSuccessfully() const;
diff --git a/crypto/secure_hash.cc b/crypto/secure_hash.cc
index d47f783..9003b9c 100644
--- a/crypto/secure_hash.cc
+++ b/crypto/secure_hash.cc
@@ -27,7 +27,7 @@
     SHA256_Init(&ctx_);
   }
 
-  SecureHashSHA256(const SecureHashSHA256& other) {
+  SecureHashSHA256(const SecureHashSHA256& other) : SecureHash() {
     memcpy(&ctx_, &other.ctx_, sizeof(ctx_));
   }
 
diff --git a/crypto/sha2.cc b/crypto/sha2.cc
index 1b302b3..e97b8f4 100644
--- a/crypto/sha2.cc
+++ b/crypto/sha2.cc
@@ -21,7 +21,7 @@
 
 std::string SHA256HashString(const base::StringPiece& str) {
   std::string output(kSHA256Length, 0);
-  SHA256HashString(str, base::string_as_array(&output), output.size());
+  SHA256HashString(str, string_as_array(&output), output.size());
   return output;
 }
 
diff --git a/crypto/signature_creator.h b/crypto/signature_creator.h
index 674bd4c..1e8e856 100644
--- a/crypto/signature_creator.h
+++ b/crypto/signature_creator.h
@@ -14,8 +14,13 @@
 #include "build/build_config.h"
 #include "crypto/crypto_export.h"
 
+#if defined(USE_OPENSSL)
 // Forward declaration for openssl/*.h
 typedef struct env_md_ctx_st EVP_MD_CTX;
+#elif defined(USE_NSS_CERTS) || defined(OS_WIN) || defined(OS_MACOSX)
+// Forward declaration.
+struct SGNContextStr;
+#endif
 
 namespace crypto {
 
@@ -57,7 +62,11 @@
   // Private constructor. Use the Create() method instead.
   SignatureCreator();
 
+#if defined(USE_OPENSSL)
   EVP_MD_CTX* sign_context_;
+#elif defined(USE_NSS_CERTS) || defined(OS_WIN) || defined(OS_MACOSX)
+  SGNContextStr* sign_context_;
+#endif
 
   DISALLOW_COPY_AND_ASSIGN(SignatureCreator);
 };
diff --git a/crypto/signature_creator_nss.cc b/crypto/signature_creator_nss.cc
new file mode 100644
index 0000000..bf20413
--- /dev/null
+++ b/crypto/signature_creator_nss.cc
@@ -0,0 +1,119 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/signature_creator.h"
+
+#include <cryptohi.h>
+#include <keyhi.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "crypto/nss_util.h"
+#include "crypto/rsa_private_key.h"
+
+namespace crypto {
+
+namespace {
+
+SECOidTag ToNSSSigOid(SignatureCreator::HashAlgorithm hash_alg) {
+  switch (hash_alg) {
+    case SignatureCreator::SHA1:
+      return SEC_OID_PKCS1_SHA1_WITH_RSA_ENCRYPTION;
+    case SignatureCreator::SHA256:
+      return SEC_OID_PKCS1_SHA256_WITH_RSA_ENCRYPTION;
+  }
+  return SEC_OID_UNKNOWN;
+}
+
+SECOidTag ToNSSHashOid(SignatureCreator::HashAlgorithm hash_alg) {
+  switch (hash_alg) {
+    case SignatureCreator::SHA1:
+      return SEC_OID_SHA1;
+    case SignatureCreator::SHA256:
+      return SEC_OID_SHA256;
+  }
+  return SEC_OID_UNKNOWN;
+}
+
+}  // namespace
+
+SignatureCreator::~SignatureCreator() {
+  if (sign_context_) {
+    SGN_DestroyContext(sign_context_, PR_TRUE);
+    sign_context_ = NULL;
+  }
+}
+
+// static
+SignatureCreator* SignatureCreator::Create(RSAPrivateKey* key,
+                                           HashAlgorithm hash_alg) {
+  scoped_ptr<SignatureCreator> result(new SignatureCreator);
+  result->sign_context_ = SGN_NewContext(ToNSSSigOid(hash_alg), key->key());
+  if (!result->sign_context_) {
+    NOTREACHED();
+    return NULL;
+  }
+
+  SECStatus rv = SGN_Begin(result->sign_context_);
+  if (rv != SECSuccess) {
+    NOTREACHED();
+    return NULL;
+  }
+
+  return result.release();
+}
+
+// static
+bool SignatureCreator::Sign(RSAPrivateKey* key,
+                            HashAlgorithm hash_alg,
+                            const uint8_t* data,
+                            int data_len,
+                            std::vector<uint8_t>* signature) {
+  SECItem data_item;
+  data_item.type = siBuffer;
+  data_item.data = const_cast<unsigned char*>(data);
+  data_item.len = data_len;
+
+  SECItem signature_item;
+  SECStatus rv = SGN_Digest(key->key(), ToNSSHashOid(hash_alg), &signature_item,
+                            &data_item);
+  if (rv != SECSuccess) {
+    NOTREACHED();
+    return false;
+  }
+  signature->assign(signature_item.data,
+                    signature_item.data + signature_item.len);
+  SECITEM_FreeItem(&signature_item, PR_FALSE);
+  return true;
+}
+
+bool SignatureCreator::Update(const uint8_t* data_part, int data_part_len) {
+  SECStatus rv = SGN_Update(sign_context_, data_part, data_part_len);
+  if (rv != SECSuccess) {
+    NOTREACHED();
+    return false;
+  }
+
+  return true;
+}
+
+bool SignatureCreator::Final(std::vector<uint8_t>* signature) {
+  SECItem signature_item;
+  SECStatus rv = SGN_End(sign_context_, &signature_item);
+  if (rv != SECSuccess) {
+    return false;
+  }
+  signature->assign(signature_item.data,
+                    signature_item.data + signature_item.len);
+  SECITEM_FreeItem(&signature_item, PR_FALSE);
+  return true;
+}
+
+SignatureCreator::SignatureCreator() : sign_context_(NULL) {
+  EnsureNSSInit();
+}
+
+}  // namespace crypto
diff --git a/crypto/signature_creator_unittest.cc b/crypto/signature_creator_unittest.cc
index 2f135cc..819e663 100644
--- a/crypto/signature_creator_unittest.cc
+++ b/crypto/signature_creator_unittest.cc
@@ -7,7 +7,6 @@
 #include <stdint.h>
 
 #include <memory>
-#include <string>
 #include <vector>
 
 #include "base/sha1.h"
diff --git a/crypto/signature_verifier.h b/crypto/signature_verifier.h
index f1ea580..5b7369f 100644
--- a/crypto/signature_verifier.h
+++ b/crypto/signature_verifier.h
@@ -7,14 +7,19 @@
 
 #include <stdint.h>
 
-#include <memory>
 #include <vector>
 
 #include "build/build_config.h"
 #include "crypto/crypto_export.h"
 
+#if defined(USE_OPENSSL)
 typedef struct env_md_st EVP_MD;
 typedef struct evp_pkey_ctx_st EVP_PKEY_CTX;
+#else
+typedef struct HASHContextStr HASHContext;
+typedef struct SECKEYPublicKeyStr SECKEYPublicKey;
+typedef struct VFYContextStr VFYContext;
+#endif
 
 namespace crypto {
 
@@ -91,6 +96,7 @@
   bool VerifyFinal();
 
  private:
+#if defined(USE_OPENSSL)
   bool CommonInit(int pkey_type,
                   const EVP_MD* digest,
                   const uint8_t* signature,
@@ -98,13 +104,29 @@
                   const uint8_t* public_key_info,
                   int public_key_info_len,
                   EVP_PKEY_CTX** pkey_ctx);
+#else
+  static SECKEYPublicKey* DecodePublicKeyInfo(const uint8_t* public_key_info,
+                                              int public_key_info_len);
+#endif
 
   void Reset();
 
   std::vector<uint8_t> signature_;
 
+#if defined(USE_OPENSSL)
   struct VerifyContext;
-  std::unique_ptr<VerifyContext> verify_context_;
+  VerifyContext* verify_context_;
+#else
+  // Used for all signature types except RSA-PSS.
+  VFYContext* vfy_context_;
+
+  // Used for RSA-PSS signatures.
+  HashAlgorithm hash_alg_;
+  HashAlgorithm mask_hash_alg_;
+  unsigned int salt_len_;
+  SECKEYPublicKey* public_key_;
+  HASHContext* hash_context_;
+#endif
 };
 
 }  // namespace crypto
diff --git a/crypto/signature_verifier_nss.cc b/crypto/signature_verifier_nss.cc
new file mode 100644
index 0000000..edbd3f6
--- /dev/null
+++ b/crypto/signature_verifier_nss.cc
@@ -0,0 +1,213 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/signature_verifier.h"
+
+#include <cryptohi.h>
+#include <keyhi.h>
+#include <pk11pub.h>
+#include <secerr.h>
+#include <sechash.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "base/logging.h"
+#include "crypto/nss_util.h"
+#include "crypto/third_party/nss/chromium-nss.h"
+
+namespace crypto {
+
+namespace {
+
+HASH_HashType ToNSSHashType(SignatureVerifier::HashAlgorithm hash_alg) {
+  switch (hash_alg) {
+    case SignatureVerifier::SHA1:
+      return HASH_AlgSHA1;
+    case SignatureVerifier::SHA256:
+      return HASH_AlgSHA256;
+  }
+  return HASH_AlgNULL;
+}
+
+SECOidTag ToNSSSignatureType(SignatureVerifier::SignatureAlgorithm sig_alg) {
+  switch (sig_alg) {
+    case SignatureVerifier::RSA_PKCS1_SHA1:
+      return SEC_OID_PKCS1_SHA1_WITH_RSA_ENCRYPTION;
+    case SignatureVerifier::RSA_PKCS1_SHA256:
+      return SEC_OID_PKCS1_SHA256_WITH_RSA_ENCRYPTION;
+    case SignatureVerifier::ECDSA_SHA256:
+      return SEC_OID_ANSIX962_ECDSA_SHA256_SIGNATURE;
+  }
+  return SEC_OID_UNKNOWN;
+}
+
+SECStatus VerifyRSAPSS_End(SECKEYPublicKey* public_key,
+                           HASHContext* hash_context,
+                           HASH_HashType mask_hash_alg,
+                           unsigned int salt_len,
+                           const unsigned char* signature,
+                           unsigned int signature_len) {
+  unsigned int hash_len = HASH_ResultLenContext(hash_context);
+  std::vector<unsigned char> hash(hash_len);
+  HASH_End(hash_context, &hash[0], &hash_len, hash.size());
+
+  unsigned int modulus_len = SECKEY_PublicKeyStrength(public_key);
+  if (signature_len != modulus_len) {
+    PORT_SetError(SEC_ERROR_BAD_SIGNATURE);
+    return SECFailure;
+  }
+  std::vector<unsigned char> enc(signature_len);
+  SECStatus rv = PK11_PubEncryptRaw(public_key, &enc[0],
+                                    const_cast<unsigned char*>(signature),
+                                    signature_len, NULL);
+  if (rv != SECSuccess) {
+    LOG(WARNING) << "PK11_PubEncryptRaw failed";
+    return rv;
+  }
+  return emsa_pss_verify(&hash[0], &enc[0], enc.size(),
+                         HASH_GetType(hash_context), mask_hash_alg,
+                         salt_len);
+}
+
+}  // namespace
+
+SignatureVerifier::SignatureVerifier()
+    : vfy_context_(NULL),
+      hash_alg_(SHA1),
+      mask_hash_alg_(SHA1),
+      salt_len_(0),
+      public_key_(NULL),
+      hash_context_(NULL) {
+  EnsureNSSInit();
+}
+
+SignatureVerifier::~SignatureVerifier() {
+  Reset();
+}
+
+bool SignatureVerifier::VerifyInit(SignatureAlgorithm signature_algorithm,
+                                   const uint8_t* signature,
+                                   int signature_len,
+                                   const uint8_t* public_key_info,
+                                   int public_key_info_len) {
+  if (vfy_context_ || hash_context_)
+    return false;
+
+  signature_.assign(signature, signature + signature_len);
+
+  SECKEYPublicKey* public_key = DecodePublicKeyInfo(public_key_info,
+                                                    public_key_info_len);
+  if (!public_key)
+    return false;
+
+  SECItem sig;
+  sig.type = siBuffer;
+  sig.data = const_cast<uint8_t*>(signature);
+  sig.len = signature_len;
+  vfy_context_ = VFY_CreateContext(
+      public_key, &sig, ToNSSSignatureType(signature_algorithm), nullptr);
+  SECKEY_DestroyPublicKey(public_key);  // Done with public_key.
+  if (!vfy_context_) {
+    // A corrupted RSA signature could be detected without the data, so
+    // VFY_CreateContextWithAlgorithmID may fail with SEC_ERROR_BAD_SIGNATURE
+    // (-8182).
+    return false;
+  }
+
+  if (VFY_Begin(vfy_context_) != SECSuccess) {
+    NOTREACHED();
+    return false;
+  }
+  return true;
+}
+
+bool SignatureVerifier::VerifyInitRSAPSS(HashAlgorithm hash_alg,
+                                         HashAlgorithm mask_hash_alg,
+                                         int salt_len,
+                                         const uint8_t* signature,
+                                         int signature_len,
+                                         const uint8_t* public_key_info,
+                                         int public_key_info_len) {
+  if (vfy_context_ || hash_context_)
+    return false;
+
+  signature_.assign(signature, signature + signature_len);
+
+  SECKEYPublicKey* public_key = DecodePublicKeyInfo(public_key_info,
+                                                    public_key_info_len);
+  if (!public_key)
+    return false;
+
+  public_key_ = public_key;
+  hash_alg_ = hash_alg;
+  mask_hash_alg_ = mask_hash_alg;
+  salt_len_ = salt_len;
+  hash_context_ = HASH_Create(ToNSSHashType(hash_alg_));
+  if (!hash_context_)
+    return false;
+  HASH_Begin(hash_context_);
+  return true;
+}
+
+void SignatureVerifier::VerifyUpdate(const uint8_t* data_part,
+                                     int data_part_len) {
+  if (vfy_context_) {
+    SECStatus rv = VFY_Update(vfy_context_, data_part, data_part_len);
+    DCHECK_EQ(SECSuccess, rv);
+  } else {
+    HASH_Update(hash_context_, data_part, data_part_len);
+  }
+}
+
+bool SignatureVerifier::VerifyFinal() {
+  SECStatus rv;
+  if (vfy_context_) {
+    rv = VFY_End(vfy_context_);
+  } else {
+    rv = VerifyRSAPSS_End(public_key_, hash_context_,
+                          ToNSSHashType(mask_hash_alg_), salt_len_,
+                          signature_.data(),
+                          signature_.size());
+  }
+  Reset();
+
+  // If signature verification fails, the error code is
+  // SEC_ERROR_BAD_SIGNATURE (-8182).
+  return (rv == SECSuccess);
+}
+
+// static
+SECKEYPublicKey* SignatureVerifier::DecodePublicKeyInfo(
+    const uint8_t* public_key_info,
+    int public_key_info_len) {
+  CERTSubjectPublicKeyInfo* spki = NULL;
+  SECItem spki_der;
+  spki_der.type = siBuffer;
+  spki_der.data = const_cast<uint8_t*>(public_key_info);
+  spki_der.len = public_key_info_len;
+  spki = SECKEY_DecodeDERSubjectPublicKeyInfo(&spki_der);
+  if (!spki)
+    return NULL;
+  SECKEYPublicKey* public_key = SECKEY_ExtractPublicKey(spki);
+  SECKEY_DestroySubjectPublicKeyInfo(spki);  // Done with spki.
+  return public_key;
+}
+
+void SignatureVerifier::Reset() {
+  if (vfy_context_) {
+    VFY_DestroyContext(vfy_context_, PR_TRUE);
+    vfy_context_ = NULL;
+  }
+  if (hash_context_) {
+    HASH_Destroy(hash_context_);
+    hash_context_ = NULL;
+  }
+  if (public_key_) {
+    SECKEY_DestroyPublicKey(public_key_);
+    public_key_ = NULL;
+  }
+  signature_.clear();
+}
+
+}  // namespace crypto
diff --git a/crypto/signature_verifier_unittest.cc b/crypto/signature_verifier_unittest.cc
index 2cda459..d71ea82 100644
--- a/crypto/signature_verifier_unittest.cc
+++ b/crypto/signature_verifier_unittest.cc
@@ -7,7 +7,6 @@
 #include <stddef.h>
 #include <stdint.h>
 
-#include "base/logging.h"
 #include "base/macros.h"
 #include "base/numerics/safe_conversions.h"
 #include "testing/gtest/include/gtest/gtest.h"
diff --git a/crypto/symmetric_key.cc b/crypto/symmetric_key.cc
index 6a19f84..e3ecf62 100644
--- a/crypto/symmetric_key.cc
+++ b/crypto/symmetric_key.cc
@@ -4,6 +4,8 @@
 
 #include "crypto/symmetric_key.h"
 
+#include <openssl/evp.h>
+#include <openssl/rand.h>
 #include <stddef.h>
 #include <stdint.h>
 
@@ -13,8 +15,6 @@
 #include "base/logging.h"
 #include "base/strings/string_util.h"
 #include "crypto/openssl_util.h"
-#include "third_party/boringssl/src/include/openssl/evp.h"
-#include "third_party/boringssl/src/include/openssl/rand.h"
 
 namespace crypto {
 
diff --git a/crypto/symmetric_key.h b/crypto/symmetric_key.h
index 7494634..8862708 100644
--- a/crypto/symmetric_key.h
+++ b/crypto/symmetric_key.h
@@ -14,6 +14,15 @@
 #include "build/build_config.h"
 #include "crypto/crypto_export.h"
 
+#if defined(NACL_WIN64)
+// See comments for crypto_nacl_win64 in crypto.gyp.
+// Must test for NACL_WIN64 before OS_WIN since former is a subset of latter.
+#include "crypto/scoped_capi_types.h"
+#elif defined(USE_NSS_CERTS) || \
+    (!defined(USE_OPENSSL) && (defined(OS_WIN) || defined(OS_MACOSX)))
+#include "crypto/scoped_nss_types.h"
+#endif
+
 namespace crypto {
 
 // Wraps a platform-specific symmetric key and allows it to be held in a
@@ -54,8 +63,13 @@
   // size for use with |algorithm|. The caller owns the returned SymmetricKey.
   static std::unique_ptr<SymmetricKey> Import(Algorithm algorithm,
                                               const std::string& raw_key);
-
+#if defined(NACL_WIN64)
+  HCRYPTKEY key() const { return key_.get(); }
+#elif defined(USE_OPENSSL)
   const std::string& key() { return key_; }
+#elif defined(USE_NSS_CERTS) || defined(OS_WIN) || defined(OS_MACOSX)
+  PK11SymKey* key() const { return key_.get(); }
+#endif
 
   // Extracts the raw key from the platform specific data.
   // Warning: |raw_key| holds the raw key as bytes and thus must be handled
@@ -63,9 +77,27 @@
   bool GetRawKey(std::string* raw_key);
 
  private:
-  SymmetricKey();
+#if defined(NACL_WIN64)
+  SymmetricKey(HCRYPTPROV provider, HCRYPTKEY key,
+               const void* key_data, size_t key_size_in_bytes);
 
+  ScopedHCRYPTPROV provider_;
+  ScopedHCRYPTKEY key_;
+
+  // Contains the raw key, if it is known during initialization and when it
+  // is likely that the associated |provider_| will be unable to export the
+  // |key_|. This is the case of HMAC keys when the key size exceeds 16 bytes
+  // when using the default RSA provider.
+  // TODO(rsleevi): See if KP_EFFECTIVE_KEYLEN is the reason why CryptExportKey
+  // fails with NTE_BAD_KEY/NTE_BAD_LEN
+  std::string raw_key_;
+#elif defined(USE_OPENSSL)
+  SymmetricKey() {}
   std::string key_;
+#elif defined(USE_NSS_CERTS) || defined(OS_WIN) || defined(OS_MACOSX)
+  explicit SymmetricKey(PK11SymKey* key);
+  ScopedPK11SymKey key_;
+#endif
 
   DISALLOW_COPY_AND_ASSIGN(SymmetricKey);
 };
diff --git a/crypto/symmetric_key_nss.cc b/crypto/symmetric_key_nss.cc
new file mode 100644
index 0000000..e3aacc7
--- /dev/null
+++ b/crypto/symmetric_key_nss.cc
@@ -0,0 +1,151 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/symmetric_key.h"
+
+#include <nss.h>
+#include <pk11pub.h>
+#include <stddef.h>
+
+#include "base/logging.h"
+#include "crypto/nss_util.h"
+#include "crypto/scoped_nss_types.h"
+
+namespace crypto {
+
+SymmetricKey::~SymmetricKey() {}
+
+// static
+SymmetricKey* SymmetricKey::GenerateRandomKey(Algorithm algorithm,
+                                              size_t key_size_in_bits) {
+  DCHECK_EQ(AES, algorithm);
+
+  EnsureNSSInit();
+
+  // Whitelist supported key sizes to avoid accidentaly relying on
+  // algorithms available in NSS but not BoringSSL and vice
+  // versa. Note that BoringSSL does not support AES-192.
+  if (key_size_in_bits != 128 && key_size_in_bits != 256)
+    return NULL;
+
+  ScopedPK11Slot slot(PK11_GetInternalSlot());
+  if (!slot.get())
+    return NULL;
+
+  PK11SymKey* sym_key = PK11_KeyGen(slot.get(), CKM_AES_KEY_GEN, NULL,
+                                    key_size_in_bits / 8, NULL);
+  if (!sym_key)
+    return NULL;
+
+  return new SymmetricKey(sym_key);
+}
+
+// static
+SymmetricKey* SymmetricKey::DeriveKeyFromPassword(Algorithm algorithm,
+                                                  const std::string& password,
+                                                  const std::string& salt,
+                                                  size_t iterations,
+                                                  size_t key_size_in_bits) {
+  EnsureNSSInit();
+  if (salt.empty() || iterations == 0 || key_size_in_bits == 0)
+    return NULL;
+
+  if (algorithm == AES) {
+    // Whitelist supported key sizes to avoid accidentaly relying on
+    // algorithms available in NSS but not BoringSSL and vice
+    // versa. Note that BoringSSL does not support AES-192.
+    if (key_size_in_bits != 128 && key_size_in_bits != 256)
+      return NULL;
+  }
+
+  SECItem password_item;
+  password_item.type = siBuffer;
+  password_item.data = reinterpret_cast<unsigned char*>(
+      const_cast<char *>(password.data()));
+  password_item.len = password.size();
+
+  SECItem salt_item;
+  salt_item.type = siBuffer;
+  salt_item.data = reinterpret_cast<unsigned char*>(
+      const_cast<char *>(salt.data()));
+  salt_item.len = salt.size();
+
+  SECOidTag cipher_algorithm =
+      algorithm == AES ? SEC_OID_AES_256_CBC : SEC_OID_HMAC_SHA1;
+  ScopedSECAlgorithmID alg_id(PK11_CreatePBEV2AlgorithmID(SEC_OID_PKCS5_PBKDF2,
+                                                          cipher_algorithm,
+                                                          SEC_OID_HMAC_SHA1,
+                                                          key_size_in_bits / 8,
+                                                          iterations,
+                                                          &salt_item));
+  if (!alg_id.get())
+    return NULL;
+
+  ScopedPK11Slot slot(PK11_GetInternalSlot());
+  if (!slot.get())
+    return NULL;
+
+  PK11SymKey* sym_key = PK11_PBEKeyGen(slot.get(), alg_id.get(), &password_item,
+                                       PR_FALSE, NULL);
+  if (!sym_key)
+    return NULL;
+
+  return new SymmetricKey(sym_key);
+}
+
+// static
+SymmetricKey* SymmetricKey::Import(Algorithm algorithm,
+                                   const std::string& raw_key) {
+  EnsureNSSInit();
+
+  if (algorithm == AES) {
+    // Whitelist supported key sizes to avoid accidentaly relying on
+    // algorithms available in NSS but not BoringSSL and vice
+    // versa. Note that BoringSSL does not support AES-192.
+    if (raw_key.size() != 128/8 && raw_key.size() != 256/8)
+      return NULL;
+  }
+
+  CK_MECHANISM_TYPE cipher =
+      algorithm == AES ? CKM_AES_CBC : CKM_SHA_1_HMAC;
+
+  SECItem key_item;
+  key_item.type = siBuffer;
+  key_item.data = reinterpret_cast<unsigned char*>(
+      const_cast<char *>(raw_key.data()));
+  key_item.len = raw_key.size();
+
+  ScopedPK11Slot slot(PK11_GetInternalSlot());
+  if (!slot.get())
+    return NULL;
+
+  // The exact value of the |origin| argument doesn't matter to NSS as long as
+  // it's not PK11_OriginFortezzaHack, so we pass PK11_OriginUnwrap as a
+  // placeholder.
+  PK11SymKey* sym_key = PK11_ImportSymKey(slot.get(), cipher, PK11_OriginUnwrap,
+                                          CKA_ENCRYPT, &key_item, NULL);
+  if (!sym_key)
+    return NULL;
+
+  return new SymmetricKey(sym_key);
+}
+
+bool SymmetricKey::GetRawKey(std::string* raw_key) {
+  SECStatus rv = PK11_ExtractKeyValue(key_.get());
+  if (SECSuccess != rv)
+    return false;
+
+  SECItem* key_item = PK11_GetKeyData(key_.get());
+  if (!key_item)
+    return false;
+
+  raw_key->assign(reinterpret_cast<char*>(key_item->data), key_item->len);
+  return true;
+}
+
+SymmetricKey::SymmetricKey(PK11SymKey* key) : key_(key) {
+  DCHECK(key);
+}
+
+}  // namespace crypto
diff --git a/crypto/third_party/nss/chromium-blapi.h b/crypto/third_party/nss/chromium-blapi.h
new file mode 100644
index 0000000..2ca772e
--- /dev/null
+++ b/crypto/third_party/nss/chromium-blapi.h
@@ -0,0 +1,101 @@
+/*
+ * crypto.h - public data structures and prototypes for the crypto library
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is the Netscape security libraries.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1994-2000
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   Dr Vipul Gupta <vipul.gupta@sun.com>, Sun Microsystems Laboratories
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+/* $Id: blapi.h,v 1.27 2007/11/09 18:49:32 wtc%google.com Exp $ */
+
+#ifndef CRYPTO_THIRD_PARTY_NSS_CHROMIUM_BLAPI_H_
+#define CRYPTO_THIRD_PARTY_NSS_CHROMIUM_BLAPI_H_
+
+#include "crypto/third_party/nss/chromium-blapit.h"
+
+/******************************************/
+
+extern SHA256Context *SHA256_NewContext(void);
+extern void SHA256_DestroyContext(SHA256Context *cx, PRBool freeit);
+extern void SHA256_Begin(SHA256Context *cx);
+extern void SHA256_Update(SHA256Context *cx, const unsigned char *input,
+			unsigned int inputLen);
+extern void SHA256_End(SHA256Context *cx, unsigned char *digest,
+		     unsigned int *digestLen, unsigned int maxDigestLen);
+extern SECStatus SHA256_HashBuf(unsigned char *dest, const unsigned char *src,
+			      unsigned int src_length);
+extern SECStatus SHA256_Hash(unsigned char *dest, const char *src);
+extern void SHA256_TraceState(SHA256Context *cx);
+extern unsigned int SHA256_FlattenSize(SHA256Context *cx);
+extern SECStatus SHA256_Flatten(SHA256Context *cx,unsigned char *space);
+extern SHA256Context * SHA256_Resurrect(unsigned char *space, void *arg);
+extern void SHA256_Clone(SHA256Context *dest, SHA256Context *src);
+
+/******************************************/
+
+extern SHA512Context *SHA512_NewContext(void);
+extern void SHA512_DestroyContext(SHA512Context *cx, PRBool freeit);
+extern void SHA512_Begin(SHA512Context *cx);
+extern void SHA512_Update(SHA512Context *cx, const unsigned char *input,
+			unsigned int inputLen);
+extern void SHA512_End(SHA512Context *cx, unsigned char *digest,
+		     unsigned int *digestLen, unsigned int maxDigestLen);
+extern SECStatus SHA512_HashBuf(unsigned char *dest, const unsigned char *src,
+			      unsigned int src_length);
+extern SECStatus SHA512_Hash(unsigned char *dest, const char *src);
+extern void SHA512_TraceState(SHA512Context *cx);
+extern unsigned int SHA512_FlattenSize(SHA512Context *cx);
+extern SECStatus SHA512_Flatten(SHA512Context *cx,unsigned char *space);
+extern SHA512Context * SHA512_Resurrect(unsigned char *space, void *arg);
+extern void SHA512_Clone(SHA512Context *dest, SHA512Context *src);
+
+/******************************************/
+
+extern SHA384Context *SHA384_NewContext(void);
+extern void SHA384_DestroyContext(SHA384Context *cx, PRBool freeit);
+extern void SHA384_Begin(SHA384Context *cx);
+extern void SHA384_Update(SHA384Context *cx, const unsigned char *input,
+			unsigned int inputLen);
+extern void SHA384_End(SHA384Context *cx, unsigned char *digest,
+		     unsigned int *digestLen, unsigned int maxDigestLen);
+extern SECStatus SHA384_HashBuf(unsigned char *dest, const unsigned char *src,
+			      unsigned int src_length);
+extern SECStatus SHA384_Hash(unsigned char *dest, const char *src);
+extern void SHA384_TraceState(SHA384Context *cx);
+extern unsigned int SHA384_FlattenSize(SHA384Context *cx);
+extern SECStatus SHA384_Flatten(SHA384Context *cx,unsigned char *space);
+extern SHA384Context * SHA384_Resurrect(unsigned char *space, void *arg);
+extern void SHA384_Clone(SHA384Context *dest, SHA384Context *src);
+
+#endif /* CRYPTO_THIRD_PARTY_NSS_CHROMIUM_BLAPI_H_ */
diff --git a/crypto/third_party/nss/chromium-blapit.h b/crypto/third_party/nss/chromium-blapit.h
new file mode 100644
index 0000000..938547a
--- /dev/null
+++ b/crypto/third_party/nss/chromium-blapit.h
@@ -0,0 +1,91 @@
+/*
+ * blapit.h - public data structures for the crypto library
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is the Netscape security libraries.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1994-2000
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   Dr Vipul Gupta <vipul.gupta@sun.com> and
+ *   Douglas Stebila <douglas@stebila.ca>, Sun Microsystems Laboratories
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+/* $Id: blapit.h,v 1.20 2007/02/28 19:47:37 rrelyea%redhat.com Exp $ */
+
+#ifndef CRYPTO_THIRD_PARTY_NSS_CHROMIUM_BLAPIT_H_
+#define CRYPTO_THIRD_PARTY_NSS_CHROMIUM_BLAPIT_H_
+
+#include "crypto/third_party/nss/chromium-prtypes.h"
+
+/*
+** A status code. Status's are used by procedures that return status
+** values. Again the motivation is so that a compiler can generate
+** warnings when return values are wrong. Correct testing of status codes:
+**
+**      SECStatus rv;
+**      rv = some_function (some_argument);
+**      if (rv != SECSuccess)
+**              do_an_error_thing();
+**
+*/
+typedef enum _SECStatus {
+    SECWouldBlock = -2,
+    SECFailure = -1,
+    SECSuccess = 0
+} SECStatus;
+
+#define SHA256_LENGTH 		32 	/* bytes */
+#define SHA384_LENGTH 		48 	/* bytes */
+#define SHA512_LENGTH 		64 	/* bytes */
+#define HASH_LENGTH_MAX         SHA512_LENGTH
+
+/*
+ * Input block size for each hash algorithm.
+ */
+
+#define SHA256_BLOCK_LENGTH      64     /* bytes */
+#define SHA384_BLOCK_LENGTH     128     /* bytes */
+#define SHA512_BLOCK_LENGTH     128     /* bytes */
+#define HASH_BLOCK_LENGTH_MAX   SHA512_BLOCK_LENGTH
+
+/***************************************************************************
+** Opaque objects
+*/
+
+struct SHA256ContextStr     ;
+struct SHA512ContextStr     ;
+
+typedef struct SHA256ContextStr     SHA256Context;
+typedef struct SHA512ContextStr     SHA512Context;
+/* SHA384Context is really a SHA512ContextStr.  This is not a mistake. */
+typedef struct SHA512ContextStr     SHA384Context;
+
+#endif /* CRYPTO_THIRD_PARTY_NSS_CHROMIUM_BLAPIT_H_ */
diff --git a/crypto/third_party/nss/chromium-nss.h b/crypto/third_party/nss/chromium-nss.h
new file mode 100644
index 0000000..437e6bd
--- /dev/null
+++ b/crypto/third_party/nss/chromium-nss.h
@@ -0,0 +1,79 @@
+ /* ***** BEGIN LICENSE BLOCK *****
+  * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+  *
+  * The contents of this file are subject to the Mozilla Public License Version
+  * 1.1 (the "License"); you may not use this file except in compliance with
+  * the License. You may obtain a copy of the License at
+  * http://www.mozilla.org/MPL/
+  *
+  * Software distributed under the License is distributed on an "AS IS" basis,
+  * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+  * for the specific language governing rights and limitations under the
+  * License.
+  *
+  * The Original Code is the Netscape security libraries.
+  *
+  * The Initial Developer of the Original Code is
+  * Netscape Communications Corporation.
+  * Portions created by the Initial Developer are Copyright (C) 1994-2000
+  * the Initial Developer. All Rights Reserved.
+  *
+  * Contributor(s):
+  *
+  * Alternatively, the contents of this file may be used under the terms of
+  * either the GNU General Public License Version 2 or later (the "GPL"), or
+  * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+  * in which case the provisions of the GPL or the LGPL are applicable instead
+  * of those above. If you wish to allow use of your version of this file only
+  * under the terms of either the GPL or the LGPL, and not to allow others to
+  * use your version of this file under the terms of the MPL, indicate your
+  * decision by deleting the provisions above and replace them with the notice
+  * and other provisions required by the GPL or the LGPL. If you do not delete
+  * the provisions above, a recipient may use your version of this file under
+  * the terms of any one of the MPL, the GPL or the LGPL.
+  *
+  * ***** END LICENSE BLOCK ***** */
+
+#ifndef CRYPTO_THIRD_PARTY_NSS_CHROMIUM_NSS_H_
+#define CRYPTO_THIRD_PARTY_NSS_CHROMIUM_NSS_H_
+
+// This file contains some functions we borrowed from NSS.
+
+#include <prtypes.h>
+#include <hasht.h>
+#include <keyhi.h>
+#include <secmod.h>
+
+#include "crypto/crypto_export.h"
+
+extern "C" SECStatus emsa_pss_verify(const unsigned char *mHash,
+                                     const unsigned char *em,
+                                     unsigned int emLen,
+                                     HASH_HashType hashAlg,
+                                     HASH_HashType maskHashAlg,
+                                     unsigned int sLen);
+
+// Like PK11_ImportEncryptedPrivateKeyInfo, but hardcoded for EC, and returns
+// the SECKEYPrivateKey.
+// See https://bugzilla.mozilla.org/show_bug.cgi?id=211546
+// When we use NSS 3.13.2 or later,
+// PK11_ImportEncryptedPrivateKeyInfoAndReturnKey can be used instead.
+SECStatus ImportEncryptedECPrivateKeyInfoAndReturnKey(
+    PK11SlotInfo* slot,
+    SECKEYEncryptedPrivateKeyInfo* epki,
+    SECItem* password,
+    SECItem* nickname,
+    SECItem* public_value,
+    PRBool permanent,
+    PRBool sensitive,
+    SECKEYPrivateKey** private_key,
+    void* wincx);
+
+// Like SEC_DerSignData.
+CRYPTO_EXPORT SECStatus DerSignData(PLArenaPool *arena,
+                                    SECItem *result,
+                                    SECItem *input,
+                                    SECKEYPrivateKey *key,
+                                    SECOidTag algo_id);
+
+#endif  // CRYPTO_THIRD_PARTY_NSS_CHROMIUM_NSS_H_
diff --git a/crypto/third_party/nss/chromium-prtypes.h b/crypto/third_party/nss/chromium-prtypes.h
new file mode 100644
index 0000000..d5ea8a9
--- /dev/null
+++ b/crypto/third_party/nss/chromium-prtypes.h
@@ -0,0 +1,77 @@
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is the Netscape security libraries.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 2002
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* Emulates the real prtypes.h. Defines the types and macros that sha512.cc
+ * needs. */
+
+#ifndef CRYPTO_THIRD_PARTY_NSS_CHROMIUM_PRTYPES_H_
+#define CRYPTO_THIRD_PARTY_NSS_CHROMIUM_PRTYPES_H_
+
+#include <limits.h>
+#include <stdint.h>
+
+#include "build/build_config.h"
+
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+#define IS_LITTLE_ENDIAN 1
+#else
+#define IS_BIG_ENDIAN 1
+#endif
+
+/*
+ * The C language requires that 'long' be at least 32 bits. 2147483647 is the
+ * largest signed 32-bit integer.
+ */
+#if LONG_MAX > 2147483647L
+#define PR_BYTES_PER_LONG 8
+#else
+#define PR_BYTES_PER_LONG 4
+#endif
+
+#define HAVE_LONG_LONG
+
+#if defined(__linux__)
+#define LINUX
+#endif
+
+typedef uint8_t PRUint8;
+typedef uint32_t PRUint32;
+
+typedef int PRBool;
+
+#define PR_MIN(x,y) ((x)<(y)?(x):(y))
+
+#endif  /* CRYPTO_THIRD_PARTY_NSS_CHROMIUM_PRTYPES_H_ */
diff --git a/crypto/third_party/nss/chromium-sha256.h b/crypto/third_party/nss/chromium-sha256.h
new file mode 100644
index 0000000..52815ca
--- /dev/null
+++ b/crypto/third_party/nss/chromium-sha256.h
@@ -0,0 +1,51 @@
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is the Netscape security libraries.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 2002
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef CRYPTO_THIRD_PARTY_NSS_CHROMIUM_SHA_256_H_
+#define CRYPTO_THIRD_PARTY_NSS_CHROMIUM_SHA_256_H_
+
+#include "crypto/third_party/nss/chromium-prtypes.h"
+
+struct SHA256ContextStr {
+    union {
+	PRUint32 w[64];	    /* message schedule, input buffer, plus 48 words */
+	PRUint8  b[256];
+    } u;
+    PRUint32 h[8];		/* 8 state variables */
+    PRUint32 sizeHi,sizeLo;	/* 64-bit count of hashed bytes. */
+};
+
+#endif /* CRYPTO_THIRD_PARTY_NSS_CHROMIUM_SHA_256_H_ */
diff --git a/crypto/third_party/nss/rsawrapr.c b/crypto/third_party/nss/rsawrapr.c
new file mode 100644
index 0000000..73e498f
--- /dev/null
+++ b/crypto/third_party/nss/rsawrapr.c
@@ -0,0 +1,160 @@
+/*
+ * PKCS#1 encoding and decoding functions.
+ * This file is believed to contain no code licensed from other parties.
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "seccomon.h"
+#include "secerr.h"
+#include "sechash.h"
+
+/* Needed for RSA-PSS functions */
+static const unsigned char eightZeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+
+/*
+ * Mask generation function MGF1 as defined in PKCS #1 v2.1 / RFC 3447.
+ */
+static SECStatus
+MGF1(HASH_HashType hashAlg, unsigned char *mask, unsigned int maskLen,
+     const unsigned char *mgfSeed, unsigned int mgfSeedLen)
+{
+    unsigned int digestLen;
+    PRUint32 counter, rounds;
+    unsigned char *tempHash, *temp;
+    const SECHashObject *hash;
+    void *hashContext;
+    unsigned char C[4];
+
+    hash = HASH_GetHashObject(hashAlg);
+    if (hash == NULL)
+        return SECFailure;
+
+    hashContext = (*hash->create)();
+    rounds = (maskLen + hash->length - 1) / hash->length;
+    for (counter = 0; counter < rounds; counter++) {
+        C[0] = (unsigned char)((counter >> 24) & 0xff);
+        C[1] = (unsigned char)((counter >> 16) & 0xff);
+        C[2] = (unsigned char)((counter >> 8) & 0xff);
+        C[3] = (unsigned char)(counter & 0xff);
+
+        /* This could be optimized when the clone functions in
+         * rawhash.c are implemented. */
+        (*hash->begin)(hashContext);
+        (*hash->update)(hashContext, mgfSeed, mgfSeedLen); 
+        (*hash->update)(hashContext, C, sizeof C);
+
+        tempHash = mask + counter * hash->length;
+        if (counter != (rounds-1)) {
+            (*hash->end)(hashContext, tempHash, &digestLen, hash->length);
+        } else { /* we're in the last round and need to cut the hash */
+            temp = (unsigned char *)PORT_Alloc(hash->length);
+            (*hash->end)(hashContext, temp, &digestLen, hash->length);
+            PORT_Memcpy(tempHash, temp, maskLen - counter * hash->length);
+            PORT_Free(temp);
+        }
+    }
+    (*hash->destroy)(hashContext, PR_TRUE);
+
+    return SECSuccess;
+}
+
+/*
+ * Verify a RSA-PSS signature.
+ * Described in RFC 3447, section 9.1.2.
+ * We use mHash instead of M as input.
+ * emBits from the RFC is just modBits - 1, see section 8.1.2.
+ * We only support MGF1 as the MGF.
+ *
+ * NOTE: this code assumes modBits is a multiple of 8.
+ */
+SECStatus
+emsa_pss_verify(const unsigned char *mHash,
+                const unsigned char *em, unsigned int emLen,
+                HASH_HashType hashAlg, HASH_HashType maskHashAlg,
+                unsigned int sLen)
+{
+    const SECHashObject *hash;
+    void *hash_context;
+    unsigned char *db;
+    unsigned char *H_;  /* H' from the RFC */
+    unsigned int i, dbMaskLen;
+    SECStatus rv;
+
+    hash = HASH_GetHashObject(hashAlg);
+    dbMaskLen = emLen - hash->length - 1;
+
+    /* Step 3 + 4 + 6 */
+    if ((emLen < (hash->length + sLen + 2)) ||
+	(em[emLen - 1] != 0xbc) ||
+	((em[0] & 0x80) != 0)) {
+	PORT_SetError(SEC_ERROR_BAD_SIGNATURE);
+	return SECFailure;
+    }
+
+    /* Step 7 */
+    db = (unsigned char *)PORT_Alloc(dbMaskLen);
+    if (db == NULL) {
+	PORT_SetError(SEC_ERROR_NO_MEMORY);
+	return SECFailure;
+    }
+    /* &em[dbMaskLen] points to H, used as mgfSeed */
+    MGF1(maskHashAlg, db, dbMaskLen, &em[dbMaskLen], hash->length);
+
+    /* Step 8 */
+    for (i = 0; i < dbMaskLen; i++) {
+	db[i] ^= em[i];
+    }
+
+    /* Step 9 */
+    db[0] &= 0x7f;
+
+    /* Step 10 */
+    for (i = 0; i < (dbMaskLen - sLen - 1); i++) {
+	if (db[i] != 0) {
+	    PORT_Free(db);
+	    PORT_SetError(SEC_ERROR_BAD_SIGNATURE);
+	    return SECFailure;
+	}
+    }
+    if (db[dbMaskLen - sLen - 1] != 0x01) {
+	PORT_Free(db);
+	PORT_SetError(SEC_ERROR_BAD_SIGNATURE);
+	return SECFailure;
+    }
+
+    /* Step 12 + 13 */
+    H_ = (unsigned char *)PORT_Alloc(hash->length);
+    if (H_ == NULL) {
+	PORT_Free(db);
+	PORT_SetError(SEC_ERROR_NO_MEMORY);
+	return SECFailure;
+    }
+    hash_context = (*hash->create)();
+    if (hash_context == NULL) {
+	PORT_Free(db);
+	PORT_Free(H_);
+	PORT_SetError(SEC_ERROR_NO_MEMORY);
+	return SECFailure;
+    }
+    (*hash->begin)(hash_context);
+    (*hash->update)(hash_context, eightZeros, 8);
+    (*hash->update)(hash_context, mHash, hash->length);
+    (*hash->update)(hash_context, &db[dbMaskLen - sLen], sLen);
+    (*hash->end)(hash_context, H_, &i, hash->length);
+    (*hash->destroy)(hash_context, PR_TRUE);
+
+    PORT_Free(db);
+
+    /* Step 14 */
+    if (PORT_Memcmp(H_, &em[dbMaskLen], hash->length) != 0) {
+	PORT_SetError(SEC_ERROR_BAD_SIGNATURE);
+	rv = SECFailure;
+    } else {
+	rv = SECSuccess;
+    }
+
+    PORT_Free(H_);
+    return rv;
+}
diff --git a/crypto/third_party/nss/sha512.cc b/crypto/third_party/nss/sha512.cc
new file mode 100644
index 0000000..78950cb
--- /dev/null
+++ b/crypto/third_party/nss/sha512.cc
@@ -0,0 +1,1390 @@
+/*
+ * sha512.c - implementation of SHA256, SHA384 and SHA512
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is the Netscape security libraries.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 2002
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+/* $Id: sha512.c,v 1.9 2006/10/13 16:54:04 wtchang%redhat.com Exp $ */
+
+// Prevent manual unrolling in the sha256 code, which reduces the binary code
+// size from ~10k to ~1k.  The performance should be reasonable for our use.
+#define NOUNROLL256 1
+
+#include "crypto/third_party/nss/chromium-prtypes.h"  /* for PRUintXX */
+#if defined(_X86_) || defined(SHA_NO_LONG_LONG)
+#define NOUNROLL512 1
+#undef HAVE_LONG_LONG
+#endif
+#include "crypto/third_party/nss/chromium-blapi.h"
+#include "crypto/third_party/nss/chromium-sha256.h"    /* for struct SHA256ContextStr */
+
+#include <stdlib.h>
+#include <string.h>
+#define PORT_New(type) static_cast<type*>(malloc(sizeof(type)))
+#define PORT_ZFree(ptr, len) do { memset(ptr, 0, len); free(ptr); } while (0)
+#define PORT_Strlen(s) static_cast<unsigned int>(strlen(s))
+#define PORT_Memcpy memcpy
+
+/* ============= Common constants and defines ======================= */
+
+#define W ctx->u.w
+#define B ctx->u.b
+#define H ctx->h
+
+#define SHR(x,n) (x >> n)
+#define SHL(x,n) (x << n)
+#define Ch(x,y,z)  ((x & y) ^ (~x & z))
+#define Maj(x,y,z) ((x & y) ^ (x & z) ^ (y & z))
+
+/* Padding used with all flavors of SHA */
+static const PRUint8 pad[240] = {
+0x80,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+   0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+   /* compiler will fill the rest in with zeros */
+};
+
+/* ============= SHA256 implemenmtation ================================== */
+
+/* SHA-256 constants, K256. */
+static const PRUint32 K256[64] = {
+    0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
+    0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
+    0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
+    0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
+    0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
+    0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
+    0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
+    0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
+    0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
+    0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
+    0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
+    0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
+    0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
+    0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
+    0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
+    0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
+};
+
+/* SHA-256 initial hash values */
+static const PRUint32 H256[8] = {
+    0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
+    0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19
+};
+
+#if defined(_MSC_VER) && defined(_X86_)
+#ifndef FORCEINLINE
+#if (_MSC_VER >= 1200)
+#define FORCEINLINE __forceinline
+#else
+#define FORCEINLINE __inline
+#endif
+#endif
+#define FASTCALL __fastcall
+
+static FORCEINLINE PRUint32 FASTCALL
+swap4b(PRUint32 dwd)
+{
+    __asm {
+    	mov   eax,dwd
+	bswap eax
+    }
+}
+
+#define SHA_HTONL(x) swap4b(x)
+#define BYTESWAP4(x)  x = SHA_HTONL(x)
+
+#elif defined(LINUX) && defined(_X86_)
+#undef  __OPTIMIZE__
+#define __OPTIMIZE__ 1
+#undef  __pentium__
+#define __pentium__ 1
+#include <byteswap.h>
+#define SHA_HTONL(x) bswap_32(x)
+#define BYTESWAP4(x)  x = SHA_HTONL(x)
+
+#else /* neither windows nor Linux PC */
+#define SWAP4MASK  0x00FF00FF
+#define SHA_HTONL(x) (t1 = (x), t1 = (t1 << 16) | (t1 >> 16), \
+                      ((t1 & SWAP4MASK) << 8) | ((t1 >> 8) & SWAP4MASK))
+#define BYTESWAP4(x)  x = SHA_HTONL(x)
+#endif
+
+#if defined(_MSC_VER) && defined(_X86_)
+#pragma intrinsic (_lrotr, _lrotl)
+#define ROTR32(x,n) _lrotr(x,n)
+#define ROTL32(x,n) _lrotl(x,n)
+#else
+#define ROTR32(x,n) ((x >> n) | (x << ((8 * sizeof x) - n)))
+#define ROTL32(x,n) ((x << n) | (x >> ((8 * sizeof x) - n)))
+#endif
+
+/* Capitol Sigma and lower case sigma functions */
+#define S0(x) (ROTR32(x, 2) ^ ROTR32(x,13) ^ ROTR32(x,22))
+#define S1(x) (ROTR32(x, 6) ^ ROTR32(x,11) ^ ROTR32(x,25))
+#define s0(x) (t1 = x, ROTR32(t1, 7) ^ ROTR32(t1,18) ^ SHR(t1, 3))
+#define s1(x) (t2 = x, ROTR32(t2,17) ^ ROTR32(t2,19) ^ SHR(t2,10))
+
+SHA256Context *
+SHA256_NewContext(void)
+{
+    SHA256Context *ctx = PORT_New(SHA256Context);
+    return ctx;
+}
+
+void
+SHA256_DestroyContext(SHA256Context *ctx, PRBool freeit)
+{
+    if (freeit) {
+        PORT_ZFree(ctx, sizeof *ctx);
+    }
+}
+
+void
+SHA256_Begin(SHA256Context *ctx)
+{
+    memset(ctx, 0, sizeof *ctx);
+    memcpy(H, H256, sizeof H256);
+}
+
+static void
+SHA256_Compress(SHA256Context *ctx)
+{
+  {
+    register PRUint32 t1, t2;
+
+#if defined(IS_LITTLE_ENDIAN)
+    BYTESWAP4(W[0]);
+    BYTESWAP4(W[1]);
+    BYTESWAP4(W[2]);
+    BYTESWAP4(W[3]);
+    BYTESWAP4(W[4]);
+    BYTESWAP4(W[5]);
+    BYTESWAP4(W[6]);
+    BYTESWAP4(W[7]);
+    BYTESWAP4(W[8]);
+    BYTESWAP4(W[9]);
+    BYTESWAP4(W[10]);
+    BYTESWAP4(W[11]);
+    BYTESWAP4(W[12]);
+    BYTESWAP4(W[13]);
+    BYTESWAP4(W[14]);
+    BYTESWAP4(W[15]);
+#endif
+
+#define INITW(t) W[t] = (s1(W[t-2]) + W[t-7] + s0(W[t-15]) + W[t-16])
+
+    /* prepare the "message schedule"   */
+#ifdef NOUNROLL256
+    {
+	int t;
+	for (t = 16; t < 64; ++t) {
+	    INITW(t);
+	}
+    }
+#else
+    INITW(16);
+    INITW(17);
+    INITW(18);
+    INITW(19);
+
+    INITW(20);
+    INITW(21);
+    INITW(22);
+    INITW(23);
+    INITW(24);
+    INITW(25);
+    INITW(26);
+    INITW(27);
+    INITW(28);
+    INITW(29);
+
+    INITW(30);
+    INITW(31);
+    INITW(32);
+    INITW(33);
+    INITW(34);
+    INITW(35);
+    INITW(36);
+    INITW(37);
+    INITW(38);
+    INITW(39);
+
+    INITW(40);
+    INITW(41);
+    INITW(42);
+    INITW(43);
+    INITW(44);
+    INITW(45);
+    INITW(46);
+    INITW(47);
+    INITW(48);
+    INITW(49);
+
+    INITW(50);
+    INITW(51);
+    INITW(52);
+    INITW(53);
+    INITW(54);
+    INITW(55);
+    INITW(56);
+    INITW(57);
+    INITW(58);
+    INITW(59);
+
+    INITW(60);
+    INITW(61);
+    INITW(62);
+    INITW(63);
+
+#endif
+#undef INITW
+  }
+  {
+    PRUint32 a, b, c, d, e, f, g, h;
+
+    a = H[0];
+    b = H[1];
+    c = H[2];
+    d = H[3];
+    e = H[4];
+    f = H[5];
+    g = H[6];
+    h = H[7];
+
+#define ROUND(n,a,b,c,d,e,f,g,h) \
+    h += S1(e) + Ch(e,f,g) + K256[n] + W[n]; \
+    d += h; \
+    h += S0(a) + Maj(a,b,c);
+
+#ifdef NOUNROLL256
+    {
+	int t;
+	for (t = 0; t < 64; t+= 8) {
+	    ROUND(t+0,a,b,c,d,e,f,g,h)
+	    ROUND(t+1,h,a,b,c,d,e,f,g)
+	    ROUND(t+2,g,h,a,b,c,d,e,f)
+	    ROUND(t+3,f,g,h,a,b,c,d,e)
+	    ROUND(t+4,e,f,g,h,a,b,c,d)
+	    ROUND(t+5,d,e,f,g,h,a,b,c)
+	    ROUND(t+6,c,d,e,f,g,h,a,b)
+	    ROUND(t+7,b,c,d,e,f,g,h,a)
+	}
+    }
+#else
+    ROUND( 0,a,b,c,d,e,f,g,h)
+    ROUND( 1,h,a,b,c,d,e,f,g)
+    ROUND( 2,g,h,a,b,c,d,e,f)
+    ROUND( 3,f,g,h,a,b,c,d,e)
+    ROUND( 4,e,f,g,h,a,b,c,d)
+    ROUND( 5,d,e,f,g,h,a,b,c)
+    ROUND( 6,c,d,e,f,g,h,a,b)
+    ROUND( 7,b,c,d,e,f,g,h,a)
+
+    ROUND( 8,a,b,c,d,e,f,g,h)
+    ROUND( 9,h,a,b,c,d,e,f,g)
+    ROUND(10,g,h,a,b,c,d,e,f)
+    ROUND(11,f,g,h,a,b,c,d,e)
+    ROUND(12,e,f,g,h,a,b,c,d)
+    ROUND(13,d,e,f,g,h,a,b,c)
+    ROUND(14,c,d,e,f,g,h,a,b)
+    ROUND(15,b,c,d,e,f,g,h,a)
+
+    ROUND(16,a,b,c,d,e,f,g,h)
+    ROUND(17,h,a,b,c,d,e,f,g)
+    ROUND(18,g,h,a,b,c,d,e,f)
+    ROUND(19,f,g,h,a,b,c,d,e)
+    ROUND(20,e,f,g,h,a,b,c,d)
+    ROUND(21,d,e,f,g,h,a,b,c)
+    ROUND(22,c,d,e,f,g,h,a,b)
+    ROUND(23,b,c,d,e,f,g,h,a)
+
+    ROUND(24,a,b,c,d,e,f,g,h)
+    ROUND(25,h,a,b,c,d,e,f,g)
+    ROUND(26,g,h,a,b,c,d,e,f)
+    ROUND(27,f,g,h,a,b,c,d,e)
+    ROUND(28,e,f,g,h,a,b,c,d)
+    ROUND(29,d,e,f,g,h,a,b,c)
+    ROUND(30,c,d,e,f,g,h,a,b)
+    ROUND(31,b,c,d,e,f,g,h,a)
+
+    ROUND(32,a,b,c,d,e,f,g,h)
+    ROUND(33,h,a,b,c,d,e,f,g)
+    ROUND(34,g,h,a,b,c,d,e,f)
+    ROUND(35,f,g,h,a,b,c,d,e)
+    ROUND(36,e,f,g,h,a,b,c,d)
+    ROUND(37,d,e,f,g,h,a,b,c)
+    ROUND(38,c,d,e,f,g,h,a,b)
+    ROUND(39,b,c,d,e,f,g,h,a)
+
+    ROUND(40,a,b,c,d,e,f,g,h)
+    ROUND(41,h,a,b,c,d,e,f,g)
+    ROUND(42,g,h,a,b,c,d,e,f)
+    ROUND(43,f,g,h,a,b,c,d,e)
+    ROUND(44,e,f,g,h,a,b,c,d)
+    ROUND(45,d,e,f,g,h,a,b,c)
+    ROUND(46,c,d,e,f,g,h,a,b)
+    ROUND(47,b,c,d,e,f,g,h,a)
+
+    ROUND(48,a,b,c,d,e,f,g,h)
+    ROUND(49,h,a,b,c,d,e,f,g)
+    ROUND(50,g,h,a,b,c,d,e,f)
+    ROUND(51,f,g,h,a,b,c,d,e)
+    ROUND(52,e,f,g,h,a,b,c,d)
+    ROUND(53,d,e,f,g,h,a,b,c)
+    ROUND(54,c,d,e,f,g,h,a,b)
+    ROUND(55,b,c,d,e,f,g,h,a)
+
+    ROUND(56,a,b,c,d,e,f,g,h)
+    ROUND(57,h,a,b,c,d,e,f,g)
+    ROUND(58,g,h,a,b,c,d,e,f)
+    ROUND(59,f,g,h,a,b,c,d,e)
+    ROUND(60,e,f,g,h,a,b,c,d)
+    ROUND(61,d,e,f,g,h,a,b,c)
+    ROUND(62,c,d,e,f,g,h,a,b)
+    ROUND(63,b,c,d,e,f,g,h,a)
+#endif
+
+    H[0] += a;
+    H[1] += b;
+    H[2] += c;
+    H[3] += d;
+    H[4] += e;
+    H[5] += f;
+    H[6] += g;
+    H[7] += h;
+  }
+#undef ROUND
+}
+
+#undef s0
+#undef s1
+#undef S0
+#undef S1
+
+void
+SHA256_Update(SHA256Context *ctx, const unsigned char *input,
+		    unsigned int inputLen)
+{
+    unsigned int inBuf = ctx->sizeLo & 0x3f;
+    if (!inputLen)
+    	return;
+
+    /* Add inputLen into the count of bytes processed, before processing */
+    if ((ctx->sizeLo += inputLen) < inputLen)
+    	ctx->sizeHi++;
+
+    /* if data already in buffer, attemp to fill rest of buffer */
+    if (inBuf) {
+    	unsigned int todo = SHA256_BLOCK_LENGTH - inBuf;
+	if (inputLen < todo)
+	    todo = inputLen;
+	memcpy(B + inBuf, input, todo);
+	input    += todo;
+	inputLen -= todo;
+	if (inBuf + todo == SHA256_BLOCK_LENGTH)
+	    SHA256_Compress(ctx);
+    }
+
+    /* if enough data to fill one or more whole buffers, process them. */
+    while (inputLen >= SHA256_BLOCK_LENGTH) {
+    	memcpy(B, input, SHA256_BLOCK_LENGTH);
+	input    += SHA256_BLOCK_LENGTH;
+	inputLen -= SHA256_BLOCK_LENGTH;
+	SHA256_Compress(ctx);
+    }
+    /* if data left over, fill it into buffer */
+    if (inputLen)
+    	memcpy(B, input, inputLen);
+}
+
+void
+SHA256_End(SHA256Context *ctx, unsigned char *digest,
+           unsigned int *digestLen, unsigned int maxDigestLen)
+{
+    unsigned int inBuf = ctx->sizeLo & 0x3f;
+    unsigned int padLen = (inBuf < 56) ? (56 - inBuf) : (56 + 64 - inBuf);
+    PRUint32 hi, lo;
+#ifdef SWAP4MASK
+    PRUint32 t1;
+#endif
+
+    hi = (ctx->sizeHi << 3) | (ctx->sizeLo >> 29);
+    lo = (ctx->sizeLo << 3);
+
+    SHA256_Update(ctx, pad, padLen);
+
+#if defined(IS_LITTLE_ENDIAN)
+    W[14] = SHA_HTONL(hi);
+    W[15] = SHA_HTONL(lo);
+#else
+    W[14] = hi;
+    W[15] = lo;
+#endif
+    SHA256_Compress(ctx);
+
+    /* now output the answer */
+#if defined(IS_LITTLE_ENDIAN)
+    BYTESWAP4(H[0]);
+    BYTESWAP4(H[1]);
+    BYTESWAP4(H[2]);
+    BYTESWAP4(H[3]);
+    BYTESWAP4(H[4]);
+    BYTESWAP4(H[5]);
+    BYTESWAP4(H[6]);
+    BYTESWAP4(H[7]);
+#endif
+    padLen = PR_MIN(SHA256_LENGTH, maxDigestLen);
+    memcpy(digest, H, padLen);
+    if (digestLen)
+	*digestLen = padLen;
+}
+
+void SHA256_Clone(SHA256Context* dest, SHA256Context* src)
+{
+  memcpy(dest, src, sizeof *dest);
+}
+
+/* Comment out unused code, mostly the SHA384 and SHA512 implementations. */
+#if 0
+SECStatus
+SHA256_HashBuf(unsigned char *dest, const unsigned char *src,
+               unsigned int src_length)
+{
+    SHA256Context ctx;
+    unsigned int outLen;
+
+    SHA256_Begin(&ctx);
+    SHA256_Update(&ctx, src, src_length);
+    SHA256_End(&ctx, dest, &outLen, SHA256_LENGTH);
+
+    return SECSuccess;
+}
+
+
+SECStatus
+SHA256_Hash(unsigned char *dest, const char *src)
+{
+    return SHA256_HashBuf(dest, (const unsigned char *)src, PORT_Strlen(src));
+}
+
+
+void SHA256_TraceState(SHA256Context *ctx) { }
+
+unsigned int
+SHA256_FlattenSize(SHA256Context *ctx)
+{
+    return sizeof *ctx;
+}
+
+SECStatus
+SHA256_Flatten(SHA256Context *ctx,unsigned char *space)
+{
+    PORT_Memcpy(space, ctx, sizeof *ctx);
+    return SECSuccess;
+}
+
+SHA256Context *
+SHA256_Resurrect(unsigned char *space, void *arg)
+{
+    SHA256Context *ctx = SHA256_NewContext();
+    if (ctx)
+	PORT_Memcpy(ctx, space, sizeof *ctx);
+    return ctx;
+}
+
+/* ======= SHA512 and SHA384 common constants and defines ================= */
+
+/* common #defines for SHA512 and SHA384 */
+#if defined(HAVE_LONG_LONG)
+#define ROTR64(x,n) ((x >> n) | (x << (64 - n)))
+#define ROTL64(x,n) ((x << n) | (x >> (64 - n)))
+
+#define S0(x) (ROTR64(x,28) ^ ROTR64(x,34) ^ ROTR64(x,39))
+#define S1(x) (ROTR64(x,14) ^ ROTR64(x,18) ^ ROTR64(x,41))
+#define s0(x) (t1 = x, ROTR64(t1, 1) ^ ROTR64(t1, 8) ^ SHR(t1,7))
+#define s1(x) (t2 = x, ROTR64(t2,19) ^ ROTR64(t2,61) ^ SHR(t2,6))
+
+#if PR_BYTES_PER_LONG == 8
+#define ULLC(hi,lo) 0x ## hi ## lo ## UL
+#elif defined(_MSC_VER)
+#define ULLC(hi,lo) 0x ## hi ## lo ## ui64
+#else
+#define ULLC(hi,lo) 0x ## hi ## lo ## ULL
+#endif
+
+#define SHA_MASK16 ULLC(0000FFFF,0000FFFF)
+#define SHA_MASK8  ULLC(00FF00FF,00FF00FF)
+#define SHA_HTONLL(x) (t1 = x, \
+  t1 = ((t1 & SHA_MASK8 ) <<  8) | ((t1 >>  8) & SHA_MASK8 ), \
+  t1 = ((t1 & SHA_MASK16) << 16) | ((t1 >> 16) & SHA_MASK16), \
+  (t1 >> 32) | (t1 << 32))
+#define BYTESWAP8(x)  x = SHA_HTONLL(x)
+
+#else /* no long long */
+
+#if defined(IS_LITTLE_ENDIAN)
+#define ULLC(hi,lo) { 0x ## lo ## U, 0x ## hi ## U }
+#else
+#define ULLC(hi,lo) { 0x ## hi ## U, 0x ## lo ## U }
+#endif
+
+#define SHA_HTONLL(x) ( BYTESWAP4(x.lo), BYTESWAP4(x.hi), \
+   x.hi ^= x.lo ^= x.hi ^= x.lo, x)
+#define BYTESWAP8(x)  do { PRUint32 tmp; BYTESWAP4(x.lo); BYTESWAP4(x.hi); \
+   tmp = x.lo; x.lo = x.hi; x.hi = tmp; } while (0)
+#endif
+
+/* SHA-384 and SHA-512 constants, K512. */
+static const PRUint64 K512[80] = {
+#if PR_BYTES_PER_LONG == 8
+     0x428a2f98d728ae22UL ,  0x7137449123ef65cdUL ,
+     0xb5c0fbcfec4d3b2fUL ,  0xe9b5dba58189dbbcUL ,
+     0x3956c25bf348b538UL ,  0x59f111f1b605d019UL ,
+     0x923f82a4af194f9bUL ,  0xab1c5ed5da6d8118UL ,
+     0xd807aa98a3030242UL ,  0x12835b0145706fbeUL ,
+     0x243185be4ee4b28cUL ,  0x550c7dc3d5ffb4e2UL ,
+     0x72be5d74f27b896fUL ,  0x80deb1fe3b1696b1UL ,
+     0x9bdc06a725c71235UL ,  0xc19bf174cf692694UL ,
+     0xe49b69c19ef14ad2UL ,  0xefbe4786384f25e3UL ,
+     0x0fc19dc68b8cd5b5UL ,  0x240ca1cc77ac9c65UL ,
+     0x2de92c6f592b0275UL ,  0x4a7484aa6ea6e483UL ,
+     0x5cb0a9dcbd41fbd4UL ,  0x76f988da831153b5UL ,
+     0x983e5152ee66dfabUL ,  0xa831c66d2db43210UL ,
+     0xb00327c898fb213fUL ,  0xbf597fc7beef0ee4UL ,
+     0xc6e00bf33da88fc2UL ,  0xd5a79147930aa725UL ,
+     0x06ca6351e003826fUL ,  0x142929670a0e6e70UL ,
+     0x27b70a8546d22ffcUL ,  0x2e1b21385c26c926UL ,
+     0x4d2c6dfc5ac42aedUL ,  0x53380d139d95b3dfUL ,
+     0x650a73548baf63deUL ,  0x766a0abb3c77b2a8UL ,
+     0x81c2c92e47edaee6UL ,  0x92722c851482353bUL ,
+     0xa2bfe8a14cf10364UL ,  0xa81a664bbc423001UL ,
+     0xc24b8b70d0f89791UL ,  0xc76c51a30654be30UL ,
+     0xd192e819d6ef5218UL ,  0xd69906245565a910UL ,
+     0xf40e35855771202aUL ,  0x106aa07032bbd1b8UL ,
+     0x19a4c116b8d2d0c8UL ,  0x1e376c085141ab53UL ,
+     0x2748774cdf8eeb99UL ,  0x34b0bcb5e19b48a8UL ,
+     0x391c0cb3c5c95a63UL ,  0x4ed8aa4ae3418acbUL ,
+     0x5b9cca4f7763e373UL ,  0x682e6ff3d6b2b8a3UL ,
+     0x748f82ee5defb2fcUL ,  0x78a5636f43172f60UL ,
+     0x84c87814a1f0ab72UL ,  0x8cc702081a6439ecUL ,
+     0x90befffa23631e28UL ,  0xa4506cebde82bde9UL ,
+     0xbef9a3f7b2c67915UL ,  0xc67178f2e372532bUL ,
+     0xca273eceea26619cUL ,  0xd186b8c721c0c207UL ,
+     0xeada7dd6cde0eb1eUL ,  0xf57d4f7fee6ed178UL ,
+     0x06f067aa72176fbaUL ,  0x0a637dc5a2c898a6UL ,
+     0x113f9804bef90daeUL ,  0x1b710b35131c471bUL ,
+     0x28db77f523047d84UL ,  0x32caab7b40c72493UL ,
+     0x3c9ebe0a15c9bebcUL ,  0x431d67c49c100d4cUL ,
+     0x4cc5d4becb3e42b6UL ,  0x597f299cfc657e2aUL ,
+     0x5fcb6fab3ad6faecUL ,  0x6c44198c4a475817UL
+#else
+    ULLC(428a2f98,d728ae22), ULLC(71374491,23ef65cd),
+    ULLC(b5c0fbcf,ec4d3b2f), ULLC(e9b5dba5,8189dbbc),
+    ULLC(3956c25b,f348b538), ULLC(59f111f1,b605d019),
+    ULLC(923f82a4,af194f9b), ULLC(ab1c5ed5,da6d8118),
+    ULLC(d807aa98,a3030242), ULLC(12835b01,45706fbe),
+    ULLC(243185be,4ee4b28c), ULLC(550c7dc3,d5ffb4e2),
+    ULLC(72be5d74,f27b896f), ULLC(80deb1fe,3b1696b1),
+    ULLC(9bdc06a7,25c71235), ULLC(c19bf174,cf692694),
+    ULLC(e49b69c1,9ef14ad2), ULLC(efbe4786,384f25e3),
+    ULLC(0fc19dc6,8b8cd5b5), ULLC(240ca1cc,77ac9c65),
+    ULLC(2de92c6f,592b0275), ULLC(4a7484aa,6ea6e483),
+    ULLC(5cb0a9dc,bd41fbd4), ULLC(76f988da,831153b5),
+    ULLC(983e5152,ee66dfab), ULLC(a831c66d,2db43210),
+    ULLC(b00327c8,98fb213f), ULLC(bf597fc7,beef0ee4),
+    ULLC(c6e00bf3,3da88fc2), ULLC(d5a79147,930aa725),
+    ULLC(06ca6351,e003826f), ULLC(14292967,0a0e6e70),
+    ULLC(27b70a85,46d22ffc), ULLC(2e1b2138,5c26c926),
+    ULLC(4d2c6dfc,5ac42aed), ULLC(53380d13,9d95b3df),
+    ULLC(650a7354,8baf63de), ULLC(766a0abb,3c77b2a8),
+    ULLC(81c2c92e,47edaee6), ULLC(92722c85,1482353b),
+    ULLC(a2bfe8a1,4cf10364), ULLC(a81a664b,bc423001),
+    ULLC(c24b8b70,d0f89791), ULLC(c76c51a3,0654be30),
+    ULLC(d192e819,d6ef5218), ULLC(d6990624,5565a910),
+    ULLC(f40e3585,5771202a), ULLC(106aa070,32bbd1b8),
+    ULLC(19a4c116,b8d2d0c8), ULLC(1e376c08,5141ab53),
+    ULLC(2748774c,df8eeb99), ULLC(34b0bcb5,e19b48a8),
+    ULLC(391c0cb3,c5c95a63), ULLC(4ed8aa4a,e3418acb),
+    ULLC(5b9cca4f,7763e373), ULLC(682e6ff3,d6b2b8a3),
+    ULLC(748f82ee,5defb2fc), ULLC(78a5636f,43172f60),
+    ULLC(84c87814,a1f0ab72), ULLC(8cc70208,1a6439ec),
+    ULLC(90befffa,23631e28), ULLC(a4506ceb,de82bde9),
+    ULLC(bef9a3f7,b2c67915), ULLC(c67178f2,e372532b),
+    ULLC(ca273ece,ea26619c), ULLC(d186b8c7,21c0c207),
+    ULLC(eada7dd6,cde0eb1e), ULLC(f57d4f7f,ee6ed178),
+    ULLC(06f067aa,72176fba), ULLC(0a637dc5,a2c898a6),
+    ULLC(113f9804,bef90dae), ULLC(1b710b35,131c471b),
+    ULLC(28db77f5,23047d84), ULLC(32caab7b,40c72493),
+    ULLC(3c9ebe0a,15c9bebc), ULLC(431d67c4,9c100d4c),
+    ULLC(4cc5d4be,cb3e42b6), ULLC(597f299c,fc657e2a),
+    ULLC(5fcb6fab,3ad6faec), ULLC(6c44198c,4a475817)
+#endif
+};
+
+struct SHA512ContextStr {
+    union {
+	PRUint64 w[80];	    /* message schedule, input buffer, plus 64 words */
+	PRUint32 l[160];
+	PRUint8  b[640];
+    } u;
+    PRUint64 h[8];	    /* 8 state variables */
+    PRUint64 sizeLo;	    /* 64-bit count of hashed bytes. */
+};
+
+/* =========== SHA512 implementation ===================================== */
+
+/* SHA-512 initial hash values */
+static const PRUint64 H512[8] = {
+#if PR_BYTES_PER_LONG == 8
+     0x6a09e667f3bcc908UL ,  0xbb67ae8584caa73bUL ,
+     0x3c6ef372fe94f82bUL ,  0xa54ff53a5f1d36f1UL ,
+     0x510e527fade682d1UL ,  0x9b05688c2b3e6c1fUL ,
+     0x1f83d9abfb41bd6bUL ,  0x5be0cd19137e2179UL
+#else
+    ULLC(6a09e667,f3bcc908), ULLC(bb67ae85,84caa73b),
+    ULLC(3c6ef372,fe94f82b), ULLC(a54ff53a,5f1d36f1),
+    ULLC(510e527f,ade682d1), ULLC(9b05688c,2b3e6c1f),
+    ULLC(1f83d9ab,fb41bd6b), ULLC(5be0cd19,137e2179)
+#endif
+};
+
+
+SHA512Context *
+SHA512_NewContext(void)
+{
+    SHA512Context *ctx = PORT_New(SHA512Context);
+    return ctx;
+}
+
+void
+SHA512_DestroyContext(SHA512Context *ctx, PRBool freeit)
+{
+    if (freeit) {
+        PORT_ZFree(ctx, sizeof *ctx);
+    }
+}
+
+void
+SHA512_Begin(SHA512Context *ctx)
+{
+    memset(ctx, 0, sizeof *ctx);
+    memcpy(H, H512, sizeof H512);
+}
+
+#if defined(SHA512_TRACE)
+#if defined(HAVE_LONG_LONG)
+#define DUMP(n,a,d,e,h) printf(" t = %2d, %s = %016lx, %s = %016lx\n", \
+			       n, #e, d, #a, h);
+#else
+#define DUMP(n,a,d,e,h) printf(" t = %2d, %s = %08x%08x, %s = %08x%08x\n", \
+			       n, #e, d.hi, d.lo, #a, h.hi, h.lo);
+#endif
+#else
+#define DUMP(n,a,d,e,h)
+#endif
+
+#if defined(HAVE_LONG_LONG)
+
+#define ADDTO(x,y) y += x
+
+#define INITW(t) W[t] = (s1(W[t-2]) + W[t-7] + s0(W[t-15]) + W[t-16])
+
+#define ROUND(n,a,b,c,d,e,f,g,h) \
+    h += S1(e) + Ch(e,f,g) + K512[n] + W[n]; \
+    d += h; \
+    h += S0(a) + Maj(a,b,c); \
+    DUMP(n,a,d,e,h)
+
+#else /* use only 32-bit variables, and don't unroll loops */
+
+#undef  NOUNROLL512
+#define NOUNROLL512 1
+
+#define ADDTO(x,y) y.lo += x.lo; y.hi += x.hi + (x.lo > y.lo)
+
+#define ROTR64a(x,n,lo,hi) (x.lo >> n | x.hi << (32-n))
+#define ROTR64A(x,n,lo,hi) (x.lo << (64-n) | x.hi >> (n-32))
+#define  SHR64a(x,n,lo,hi) (x.lo >> n | x.hi << (32-n))
+
+/* Capitol Sigma and lower case sigma functions */
+#define s0lo(x) (ROTR64a(x,1,lo,hi) ^ ROTR64a(x,8,lo,hi) ^ SHR64a(x,7,lo,hi))
+#define s0hi(x) (ROTR64a(x,1,hi,lo) ^ ROTR64a(x,8,hi,lo) ^ (x.hi >> 7))
+
+#define s1lo(x) (ROTR64a(x,19,lo,hi) ^ ROTR64A(x,61,lo,hi) ^ SHR64a(x,6,lo,hi))
+#define s1hi(x) (ROTR64a(x,19,hi,lo) ^ ROTR64A(x,61,hi,lo) ^ (x.hi >> 6))
+
+#define S0lo(x)(ROTR64a(x,28,lo,hi) ^ ROTR64A(x,34,lo,hi) ^ ROTR64A(x,39,lo,hi))
+#define S0hi(x)(ROTR64a(x,28,hi,lo) ^ ROTR64A(x,34,hi,lo) ^ ROTR64A(x,39,hi,lo))
+
+#define S1lo(x)(ROTR64a(x,14,lo,hi) ^ ROTR64a(x,18,lo,hi) ^ ROTR64A(x,41,lo,hi))
+#define S1hi(x)(ROTR64a(x,14,hi,lo) ^ ROTR64a(x,18,hi,lo) ^ ROTR64A(x,41,hi,lo))
+
+/* 32-bit versions of Ch and Maj */
+#define Chxx(x,y,z,lo) ((x.lo & y.lo) ^ (~x.lo & z.lo))
+#define Majx(x,y,z,lo) ((x.lo & y.lo) ^ (x.lo & z.lo) ^ (y.lo & z.lo))
+
+#define INITW(t) \
+    do { \
+	PRUint32 lo, tm; \
+	PRUint32 cy = 0; \
+	lo = s1lo(W[t-2]); \
+	lo += (tm = W[t-7].lo);     if (lo < tm) cy++; \
+	lo += (tm = s0lo(W[t-15])); if (lo < tm) cy++; \
+	lo += (tm = W[t-16].lo);    if (lo < tm) cy++; \
+	W[t].lo = lo; \
+	W[t].hi = cy + s1hi(W[t-2]) + W[t-7].hi + s0hi(W[t-15]) + W[t-16].hi; \
+    } while (0)
+
+#define ROUND(n,a,b,c,d,e,f,g,h) \
+    { \
+	PRUint32 lo, tm, cy; \
+	lo  = S1lo(e); \
+	lo += (tm = Chxx(e,f,g,lo));    cy = (lo < tm); \
+	lo += (tm = K512[n].lo);	if (lo < tm) cy++; \
+	lo += (tm =    W[n].lo);	if (lo < tm) cy++; \
+	h.lo += lo;			if (h.lo < lo) cy++; \
+	h.hi += cy + S1hi(e) + Chxx(e,f,g,hi) + K512[n].hi + W[n].hi; \
+	d.lo += h.lo; \
+	d.hi += h.hi + (d.lo < h.lo); \
+	lo  = S0lo(a);  \
+	lo += (tm = Majx(a,b,c,lo));	cy = (lo < tm); \
+	h.lo += lo;			if (h.lo < lo) cy++; \
+	h.hi += cy + S0hi(a) + Majx(a,b,c,hi); \
+	DUMP(n,a,d,e,h) \
+    }
+#endif
+
+static void
+SHA512_Compress(SHA512Context *ctx)
+{
+#if defined(IS_LITTLE_ENDIAN)
+  {
+#if defined(HAVE_LONG_LONG)
+    PRUint64 t1;
+#else
+    PRUint32 t1;
+#endif
+    BYTESWAP8(W[0]);
+    BYTESWAP8(W[1]);
+    BYTESWAP8(W[2]);
+    BYTESWAP8(W[3]);
+    BYTESWAP8(W[4]);
+    BYTESWAP8(W[5]);
+    BYTESWAP8(W[6]);
+    BYTESWAP8(W[7]);
+    BYTESWAP8(W[8]);
+    BYTESWAP8(W[9]);
+    BYTESWAP8(W[10]);
+    BYTESWAP8(W[11]);
+    BYTESWAP8(W[12]);
+    BYTESWAP8(W[13]);
+    BYTESWAP8(W[14]);
+    BYTESWAP8(W[15]);
+  }
+#endif
+
+  {
+    PRUint64 t1, t2;
+#ifdef NOUNROLL512
+    {
+	/* prepare the "message schedule"   */
+	int t;
+	for (t = 16; t < 80; ++t) {
+	    INITW(t);
+	}
+    }
+#else
+    INITW(16);
+    INITW(17);
+    INITW(18);
+    INITW(19);
+
+    INITW(20);
+    INITW(21);
+    INITW(22);
+    INITW(23);
+    INITW(24);
+    INITW(25);
+    INITW(26);
+    INITW(27);
+    INITW(28);
+    INITW(29);
+
+    INITW(30);
+    INITW(31);
+    INITW(32);
+    INITW(33);
+    INITW(34);
+    INITW(35);
+    INITW(36);
+    INITW(37);
+    INITW(38);
+    INITW(39);
+
+    INITW(40);
+    INITW(41);
+    INITW(42);
+    INITW(43);
+    INITW(44);
+    INITW(45);
+    INITW(46);
+    INITW(47);
+    INITW(48);
+    INITW(49);
+
+    INITW(50);
+    INITW(51);
+    INITW(52);
+    INITW(53);
+    INITW(54);
+    INITW(55);
+    INITW(56);
+    INITW(57);
+    INITW(58);
+    INITW(59);
+
+    INITW(60);
+    INITW(61);
+    INITW(62);
+    INITW(63);
+    INITW(64);
+    INITW(65);
+    INITW(66);
+    INITW(67);
+    INITW(68);
+    INITW(69);
+
+    INITW(70);
+    INITW(71);
+    INITW(72);
+    INITW(73);
+    INITW(74);
+    INITW(75);
+    INITW(76);
+    INITW(77);
+    INITW(78);
+    INITW(79);
+#endif
+  }
+#ifdef SHA512_TRACE
+  {
+    int i;
+    for (i = 0; i < 80; ++i) {
+#ifdef HAVE_LONG_LONG
+	printf("W[%2d] = %016lx\n", i, W[i]);
+#else
+	printf("W[%2d] = %08x%08x\n", i, W[i].hi, W[i].lo);
+#endif
+    }
+  }
+#endif
+  {
+    PRUint64 a, b, c, d, e, f, g, h;
+
+    a = H[0];
+    b = H[1];
+    c = H[2];
+    d = H[3];
+    e = H[4];
+    f = H[5];
+    g = H[6];
+    h = H[7];
+
+#ifdef NOUNROLL512
+    {
+	int t;
+	for (t = 0; t < 80; t+= 8) {
+	    ROUND(t+0,a,b,c,d,e,f,g,h)
+	    ROUND(t+1,h,a,b,c,d,e,f,g)
+	    ROUND(t+2,g,h,a,b,c,d,e,f)
+	    ROUND(t+3,f,g,h,a,b,c,d,e)
+	    ROUND(t+4,e,f,g,h,a,b,c,d)
+	    ROUND(t+5,d,e,f,g,h,a,b,c)
+	    ROUND(t+6,c,d,e,f,g,h,a,b)
+	    ROUND(t+7,b,c,d,e,f,g,h,a)
+	}
+    }
+#else
+    ROUND( 0,a,b,c,d,e,f,g,h)
+    ROUND( 1,h,a,b,c,d,e,f,g)
+    ROUND( 2,g,h,a,b,c,d,e,f)
+    ROUND( 3,f,g,h,a,b,c,d,e)
+    ROUND( 4,e,f,g,h,a,b,c,d)
+    ROUND( 5,d,e,f,g,h,a,b,c)
+    ROUND( 6,c,d,e,f,g,h,a,b)
+    ROUND( 7,b,c,d,e,f,g,h,a)
+
+    ROUND( 8,a,b,c,d,e,f,g,h)
+    ROUND( 9,h,a,b,c,d,e,f,g)
+    ROUND(10,g,h,a,b,c,d,e,f)
+    ROUND(11,f,g,h,a,b,c,d,e)
+    ROUND(12,e,f,g,h,a,b,c,d)
+    ROUND(13,d,e,f,g,h,a,b,c)
+    ROUND(14,c,d,e,f,g,h,a,b)
+    ROUND(15,b,c,d,e,f,g,h,a)
+
+    ROUND(16,a,b,c,d,e,f,g,h)
+    ROUND(17,h,a,b,c,d,e,f,g)
+    ROUND(18,g,h,a,b,c,d,e,f)
+    ROUND(19,f,g,h,a,b,c,d,e)
+    ROUND(20,e,f,g,h,a,b,c,d)
+    ROUND(21,d,e,f,g,h,a,b,c)
+    ROUND(22,c,d,e,f,g,h,a,b)
+    ROUND(23,b,c,d,e,f,g,h,a)
+
+    ROUND(24,a,b,c,d,e,f,g,h)
+    ROUND(25,h,a,b,c,d,e,f,g)
+    ROUND(26,g,h,a,b,c,d,e,f)
+    ROUND(27,f,g,h,a,b,c,d,e)
+    ROUND(28,e,f,g,h,a,b,c,d)
+    ROUND(29,d,e,f,g,h,a,b,c)
+    ROUND(30,c,d,e,f,g,h,a,b)
+    ROUND(31,b,c,d,e,f,g,h,a)
+
+    ROUND(32,a,b,c,d,e,f,g,h)
+    ROUND(33,h,a,b,c,d,e,f,g)
+    ROUND(34,g,h,a,b,c,d,e,f)
+    ROUND(35,f,g,h,a,b,c,d,e)
+    ROUND(36,e,f,g,h,a,b,c,d)
+    ROUND(37,d,e,f,g,h,a,b,c)
+    ROUND(38,c,d,e,f,g,h,a,b)
+    ROUND(39,b,c,d,e,f,g,h,a)
+
+    ROUND(40,a,b,c,d,e,f,g,h)
+    ROUND(41,h,a,b,c,d,e,f,g)
+    ROUND(42,g,h,a,b,c,d,e,f)
+    ROUND(43,f,g,h,a,b,c,d,e)
+    ROUND(44,e,f,g,h,a,b,c,d)
+    ROUND(45,d,e,f,g,h,a,b,c)
+    ROUND(46,c,d,e,f,g,h,a,b)
+    ROUND(47,b,c,d,e,f,g,h,a)
+
+    ROUND(48,a,b,c,d,e,f,g,h)
+    ROUND(49,h,a,b,c,d,e,f,g)
+    ROUND(50,g,h,a,b,c,d,e,f)
+    ROUND(51,f,g,h,a,b,c,d,e)
+    ROUND(52,e,f,g,h,a,b,c,d)
+    ROUND(53,d,e,f,g,h,a,b,c)
+    ROUND(54,c,d,e,f,g,h,a,b)
+    ROUND(55,b,c,d,e,f,g,h,a)
+
+    ROUND(56,a,b,c,d,e,f,g,h)
+    ROUND(57,h,a,b,c,d,e,f,g)
+    ROUND(58,g,h,a,b,c,d,e,f)
+    ROUND(59,f,g,h,a,b,c,d,e)
+    ROUND(60,e,f,g,h,a,b,c,d)
+    ROUND(61,d,e,f,g,h,a,b,c)
+    ROUND(62,c,d,e,f,g,h,a,b)
+    ROUND(63,b,c,d,e,f,g,h,a)
+
+    ROUND(64,a,b,c,d,e,f,g,h)
+    ROUND(65,h,a,b,c,d,e,f,g)
+    ROUND(66,g,h,a,b,c,d,e,f)
+    ROUND(67,f,g,h,a,b,c,d,e)
+    ROUND(68,e,f,g,h,a,b,c,d)
+    ROUND(69,d,e,f,g,h,a,b,c)
+    ROUND(70,c,d,e,f,g,h,a,b)
+    ROUND(71,b,c,d,e,f,g,h,a)
+
+    ROUND(72,a,b,c,d,e,f,g,h)
+    ROUND(73,h,a,b,c,d,e,f,g)
+    ROUND(74,g,h,a,b,c,d,e,f)
+    ROUND(75,f,g,h,a,b,c,d,e)
+    ROUND(76,e,f,g,h,a,b,c,d)
+    ROUND(77,d,e,f,g,h,a,b,c)
+    ROUND(78,c,d,e,f,g,h,a,b)
+    ROUND(79,b,c,d,e,f,g,h,a)
+#endif
+
+    ADDTO(a,H[0]);
+    ADDTO(b,H[1]);
+    ADDTO(c,H[2]);
+    ADDTO(d,H[3]);
+    ADDTO(e,H[4]);
+    ADDTO(f,H[5]);
+    ADDTO(g,H[6]);
+    ADDTO(h,H[7]);
+  }
+}
+
+void
+SHA512_Update(SHA512Context *ctx, const unsigned char *input,
+              unsigned int inputLen)
+{
+    unsigned int inBuf;
+    if (!inputLen)
+    	return;
+
+#if defined(HAVE_LONG_LONG)
+    inBuf = (unsigned int)ctx->sizeLo & 0x7f;
+    /* Add inputLen into the count of bytes processed, before processing */
+    ctx->sizeLo += inputLen;
+#else
+    inBuf = (unsigned int)ctx->sizeLo.lo & 0x7f;
+    ctx->sizeLo.lo += inputLen;
+    if (ctx->sizeLo.lo < inputLen) ctx->sizeLo.hi++;
+#endif
+
+    /* if data already in buffer, attemp to fill rest of buffer */
+    if (inBuf) {
+    	unsigned int todo = SHA512_BLOCK_LENGTH - inBuf;
+	if (inputLen < todo)
+	    todo = inputLen;
+	memcpy(B + inBuf, input, todo);
+	input    += todo;
+	inputLen -= todo;
+	if (inBuf + todo == SHA512_BLOCK_LENGTH)
+	    SHA512_Compress(ctx);
+    }
+
+    /* if enough data to fill one or more whole buffers, process them. */
+    while (inputLen >= SHA512_BLOCK_LENGTH) {
+    	memcpy(B, input, SHA512_BLOCK_LENGTH);
+	input    += SHA512_BLOCK_LENGTH;
+	inputLen -= SHA512_BLOCK_LENGTH;
+	SHA512_Compress(ctx);
+    }
+    /* if data left over, fill it into buffer */
+    if (inputLen)
+    	memcpy(B, input, inputLen);
+}
+
+void
+SHA512_End(SHA512Context *ctx, unsigned char *digest,
+           unsigned int *digestLen, unsigned int maxDigestLen)
+{
+#if defined(HAVE_LONG_LONG)
+    unsigned int inBuf  = (unsigned int)ctx->sizeLo & 0x7f;
+    unsigned int padLen = (inBuf < 112) ? (112 - inBuf) : (112 + 128 - inBuf);
+    PRUint64 lo, t1;
+    lo = (ctx->sizeLo << 3);
+#else
+    unsigned int inBuf  = (unsigned int)ctx->sizeLo.lo & 0x7f;
+    unsigned int padLen = (inBuf < 112) ? (112 - inBuf) : (112 + 128 - inBuf);
+    PRUint64 lo = ctx->sizeLo;
+    PRUint32 t1;
+    lo.lo <<= 3;
+#endif
+
+    SHA512_Update(ctx, pad, padLen);
+
+#if defined(HAVE_LONG_LONG)
+    W[14] = 0;
+#else
+    W[14].lo = 0;
+    W[14].hi = 0;
+#endif
+
+    W[15] = lo;
+#if defined(IS_LITTLE_ENDIAN)
+    BYTESWAP8(W[15]);
+#endif
+    SHA512_Compress(ctx);
+
+    /* now output the answer */
+#if defined(IS_LITTLE_ENDIAN)
+    BYTESWAP8(H[0]);
+    BYTESWAP8(H[1]);
+    BYTESWAP8(H[2]);
+    BYTESWAP8(H[3]);
+    BYTESWAP8(H[4]);
+    BYTESWAP8(H[5]);
+    BYTESWAP8(H[6]);
+    BYTESWAP8(H[7]);
+#endif
+    padLen = PR_MIN(SHA512_LENGTH, maxDigestLen);
+    memcpy(digest, H, padLen);
+    if (digestLen)
+	*digestLen = padLen;
+}
+
+SECStatus
+SHA512_HashBuf(unsigned char *dest, const unsigned char *src,
+               unsigned int src_length)
+{
+    SHA512Context ctx;
+    unsigned int outLen;
+
+    SHA512_Begin(&ctx);
+    SHA512_Update(&ctx, src, src_length);
+    SHA512_End(&ctx, dest, &outLen, SHA512_LENGTH);
+
+    return SECSuccess;
+}
+
+
+SECStatus
+SHA512_Hash(unsigned char *dest, const char *src)
+{
+    return SHA512_HashBuf(dest, (const unsigned char *)src, PORT_Strlen(src));
+}
+
+
+void SHA512_TraceState(SHA512Context *ctx) { }
+
+unsigned int
+SHA512_FlattenSize(SHA512Context *ctx)
+{
+    return sizeof *ctx;
+}
+
+SECStatus
+SHA512_Flatten(SHA512Context *ctx,unsigned char *space)
+{
+    PORT_Memcpy(space, ctx, sizeof *ctx);
+    return SECSuccess;
+}
+
+SHA512Context *
+SHA512_Resurrect(unsigned char *space, void *arg)
+{
+    SHA512Context *ctx = SHA512_NewContext();
+    if (ctx)
+	PORT_Memcpy(ctx, space, sizeof *ctx);
+    return ctx;
+}
+
+void SHA512_Clone(SHA512Context *dest, SHA512Context *src)
+{
+    memcpy(dest, src, sizeof *dest);
+}
+
+/* ======================================================================= */
+/* SHA384 uses a SHA512Context as the real context.
+** The only differences between SHA384 an SHA512 are:
+** a) the intialization values for the context, and
+** b) the number of bytes of data produced as output.
+*/
+
+/* SHA-384 initial hash values */
+static const PRUint64 H384[8] = {
+#if PR_BYTES_PER_LONG == 8
+     0xcbbb9d5dc1059ed8UL ,  0x629a292a367cd507UL ,
+     0x9159015a3070dd17UL ,  0x152fecd8f70e5939UL ,
+     0x67332667ffc00b31UL ,  0x8eb44a8768581511UL ,
+     0xdb0c2e0d64f98fa7UL ,  0x47b5481dbefa4fa4UL
+#else
+    ULLC(cbbb9d5d,c1059ed8), ULLC(629a292a,367cd507),
+    ULLC(9159015a,3070dd17), ULLC(152fecd8,f70e5939),
+    ULLC(67332667,ffc00b31), ULLC(8eb44a87,68581511),
+    ULLC(db0c2e0d,64f98fa7), ULLC(47b5481d,befa4fa4)
+#endif
+};
+
+SHA384Context *
+SHA384_NewContext(void)
+{
+    return SHA512_NewContext();
+}
+
+void
+SHA384_DestroyContext(SHA384Context *ctx, PRBool freeit)
+{
+    SHA512_DestroyContext(ctx, freeit);
+}
+
+void
+SHA384_Begin(SHA384Context *ctx)
+{
+    memset(ctx, 0, sizeof *ctx);
+    memcpy(H, H384, sizeof H384);
+}
+
+void
+SHA384_Update(SHA384Context *ctx, const unsigned char *input,
+		    unsigned int inputLen)
+{
+    SHA512_Update(ctx, input, inputLen);
+}
+
+void
+SHA384_End(SHA384Context *ctx, unsigned char *digest,
+		 unsigned int *digestLen, unsigned int maxDigestLen)
+{
+#define SHA_MIN(a,b) (a < b ? a : b)
+    unsigned int maxLen = SHA_MIN(maxDigestLen, SHA384_LENGTH);
+    SHA512_End(ctx, digest, digestLen, maxLen);
+}
+
+SECStatus
+SHA384_HashBuf(unsigned char *dest, const unsigned char *src,
+			  unsigned int src_length)
+{
+    SHA512Context ctx;
+    unsigned int outLen;
+
+    SHA384_Begin(&ctx);
+    SHA512_Update(&ctx, src, src_length);
+    SHA512_End(&ctx, dest, &outLen, SHA384_LENGTH);
+
+    return SECSuccess;
+}
+
+SECStatus
+SHA384_Hash(unsigned char *dest, const char *src)
+{
+    return SHA384_HashBuf(dest, (const unsigned char *)src, PORT_Strlen(src));
+}
+
+void SHA384_TraceState(SHA384Context *ctx) { }
+
+unsigned int
+SHA384_FlattenSize(SHA384Context *ctx)
+{
+    return sizeof(SHA384Context);
+}
+
+SECStatus
+SHA384_Flatten(SHA384Context *ctx,unsigned char *space)
+{
+    return SHA512_Flatten(ctx, space);
+}
+
+SHA384Context *
+SHA384_Resurrect(unsigned char *space, void *arg)
+{
+    return SHA512_Resurrect(space, arg);
+}
+
+void SHA384_Clone(SHA384Context *dest, SHA384Context *src)
+{
+    memcpy(dest, src, sizeof *dest);
+}
+#endif  /* Comment out unused code. */
+
+/* ======================================================================= */
+#ifdef SELFTEST
+#include <stdio.h>
+
+static const char abc[] = { "abc" };
+static const char abcdbc[] = {
+    "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"
+};
+static const char abcdef[] = {
+    "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"
+    "hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu"
+};
+
+void
+dumpHash32(const unsigned char *buf, unsigned int bufLen)
+{
+    unsigned int i;
+    for (i = 0; i < bufLen; i += 4) {
+	printf(" %02x%02x%02x%02x", buf[i], buf[i+1], buf[i+2], buf[i+3]);
+    }
+    printf("\n");
+}
+
+void test256(void)
+{
+    unsigned char outBuf[SHA256_LENGTH];
+
+    printf("SHA256, input = %s\n", abc);
+    SHA256_Hash(outBuf, abc);
+    dumpHash32(outBuf, sizeof outBuf);
+
+    printf("SHA256, input = %s\n", abcdbc);
+    SHA256_Hash(outBuf, abcdbc);
+    dumpHash32(outBuf, sizeof outBuf);
+}
+
+void
+dumpHash64(const unsigned char *buf, unsigned int bufLen)
+{
+    unsigned int i;
+    for (i = 0; i < bufLen; i += 8) {
+    	if (i % 32 == 0)
+	    printf("\n");
+	printf(" %02x%02x%02x%02x%02x%02x%02x%02x",
+	       buf[i  ], buf[i+1], buf[i+2], buf[i+3],
+	       buf[i+4], buf[i+5], buf[i+6], buf[i+7]);
+    }
+    printf("\n");
+}
+
+void test512(void)
+{
+    unsigned char outBuf[SHA512_LENGTH];
+
+    printf("SHA512, input = %s\n", abc);
+    SHA512_Hash(outBuf, abc);
+    dumpHash64(outBuf, sizeof outBuf);
+
+    printf("SHA512, input = %s\n", abcdef);
+    SHA512_Hash(outBuf, abcdef);
+    dumpHash64(outBuf, sizeof outBuf);
+}
+
+void time512(void)
+{
+    unsigned char outBuf[SHA512_LENGTH];
+
+    SHA512_Hash(outBuf, abc);
+    SHA512_Hash(outBuf, abcdef);
+}
+
+void test384(void)
+{
+    unsigned char outBuf[SHA384_LENGTH];
+
+    printf("SHA384, input = %s\n", abc);
+    SHA384_Hash(outBuf, abc);
+    dumpHash64(outBuf, sizeof outBuf);
+
+    printf("SHA384, input = %s\n", abcdef);
+    SHA384_Hash(outBuf, abcdef);
+    dumpHash64(outBuf, sizeof outBuf);
+}
+
+int main (int argc, char *argv[], char *envp[])
+{
+    int i = 1;
+    if (argc > 1) {
+    	i = atoi(argv[1]);
+    }
+    if (i < 2) {
+	test256();
+	test512();
+	test384();
+    } else {
+    	while (i-- > 0) {
+	    time512();
+	}
+	printf("done\n");
+    }
+    return 0;
+}
+
+#endif
diff --git a/crypto/wincrypt_shim.h b/crypto/wincrypt_shim.h
index dcfd4ad..48d4b5c 100644
--- a/crypto/wincrypt_shim.h
+++ b/crypto/wincrypt_shim.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef CRYPTO_WINCRYPT_SHIM_H_
-#define CRYPTO_WINCRYPT_SHIM_H_
+#ifndef NET_CRYPTO_WINCRYPT_SHIM_H_
+#define NET_CRYPTO_WINCRYPT_SHIM_H_
 
 // wincrypt.h defines macros which conflict with OpenSSL's types. This header
 // includes wincrypt and undefines the OpenSSL macros which conflict. Any
@@ -22,4 +22,4 @@
 #define WINCRYPT_X509_EXTENSIONS ((LPCSTR) 5)
 #define WINCRYPT_X509_NAME ((LPCSTR) 7)
 
-#endif  // CRYPTO_WINCRYPT_SHIM_H_
+#endif  // NET_CRYPTO_WINCRYPT_SHIM_H_
diff --git a/dbus/BUILD.gn b/dbus/BUILD.gn
index c0bd77d..28efb93 100644
--- a/dbus/BUILD.gn
+++ b/dbus/BUILD.gn
@@ -17,6 +17,8 @@
     "dbus_statistics.h",
     "exported_object.cc",
     "exported_object.h",
+    "file_descriptor.cc",
+    "file_descriptor.h",
     "message.cc",
     "message.h",
     "object_manager.cc",
diff --git a/dbus/bus.cc b/dbus/bus.cc
index b6a13d6..57834d3 100644
--- a/dbus/bus.cc
+++ b/dbus/bus.cc
@@ -26,11 +26,6 @@
 
 namespace {
 
-const char kDisconnectedSignal[] = "Disconnected";
-const char kDisconnectedMatchRule[] =
-    "type='signal', path='/org/freedesktop/DBus/Local',"
-    "interface='org.freedesktop.DBus.Local', member='Disconnected'";
-
 // The NameOwnerChanged member in org.freedesktop.DBus
 const char kNameOwnerChangedSignal[] = "NameOwnerChanged";
 
@@ -45,7 +40,7 @@
 class Watch : public base::MessagePumpLibevent::Watcher {
  public:
   explicit Watch(DBusWatch* watch)
-      : raw_watch_(watch), file_descriptor_watcher_(FROM_HERE) {
+      : raw_watch_(watch) {
     dbus_watch_set_data(raw_watch_, this, NULL);
   }
 
@@ -400,6 +395,13 @@
   callback.Run();
 }
 
+void Bus::GetManagedObjects() {
+  for (ObjectManagerTable::iterator iter = object_manager_table_.begin();
+       iter != object_manager_table_.end(); ++iter) {
+    iter->second->GetManagedObjects();
+  }
+}
+
 bool Bus::Connect() {
   // dbus_bus_get_private() and dbus_bus_get() are blocking calls.
   AssertOnDBusThread();
@@ -441,12 +443,6 @@
       return false;
     }
   }
-  // We shouldn't exit on the disconnected signal.
-  dbus_connection_set_exit_on_disconnect(connection_, false);
-
-  // Watch Disconnected signal.
-  AddFilterFunction(Bus::OnConnectionDisconnectedFilter, this);
-  AddMatch(kDisconnectedMatchRule, error.get());
 
   return true;
 }
@@ -506,8 +502,6 @@
   if (connection_) {
     // Remove Disconnected watcher.
     ScopedDBusError error;
-    RemoveFilterFunction(Bus::OnConnectionDisconnectedFilter, this);
-    RemoveMatch(kDisconnectedMatchRule, error.get());
 
     if (connection_type_ == PRIVATE)
       ClosePrivateConnection();
@@ -1186,20 +1180,6 @@
 }
 
 // static
-DBusHandlerResult Bus::OnConnectionDisconnectedFilter(
-    DBusConnection* /*connection*/,
-    DBusMessage* message,
-    void* /*data*/) {
-  if (dbus_message_is_signal(message,
-                             DBUS_INTERFACE_LOCAL,
-                             kDisconnectedSignal)) {
-    // Abort when the connection is lost.
-    LOG(FATAL) << "D-Bus connection was disconnected. Aborting.";
-  }
-  return DBUS_HANDLER_RESULT_NOT_YET_HANDLED;
-}
-
-// static
 DBusHandlerResult Bus::OnServiceOwnerChangedFilter(
     DBusConnection* /*connection*/,
     DBusMessage* message,
diff --git a/dbus/bus.h b/dbus/bus.h
index 59a1972..7d39159 100644
--- a/dbus/bus.h
+++ b/dbus/bus.h
@@ -28,6 +28,10 @@
 class TaskRunner;
 }
 
+namespace tracked_objects {
+class Location;
+}
+
 namespace dbus {
 
 class ExportedObject;
@@ -356,6 +360,12 @@
                                    const ObjectPath& object_path,
                                    const base::Closure& callback);
 
+  // Instructs all registered object managers to retrieve their set of managed
+  // objects from their respective remote objects. There is no need to call this
+  // manually, this is called automatically by the D-Bus thread manager once
+  // implementation classes are registered.
+  virtual void GetManagedObjects();
+
   // Shuts down the bus and blocks until it's done. More specifically, this
   // function does the following:
   //
diff --git a/dbus/dbus.gyp b/dbus/dbus.gyp
new file mode 100644
index 0000000..264383e
--- /dev/null
+++ b/dbus/dbus.gyp
@@ -0,0 +1,141 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'variables': {
+    'chromium_code': 1,
+  },
+  'targets': [
+    {
+      'target_name': 'dbus',
+      'type': '<(component)',
+      'dependencies': [
+        '../base/base.gyp:base',
+        '../build/linux/system.gyp:dbus',
+        '../third_party/protobuf/protobuf.gyp:protobuf_lite',
+      ],
+      'export_dependent_settings': [
+        '../base/base.gyp:base',
+      ],
+      'defines': [
+        'DBUS_IMPLEMENTATION',
+      ],
+      'sources': [
+        'bus.cc',
+        'bus.h',
+        'dbus_export.h',
+        'dbus_statistics.cc',
+        'dbus_statistics.h',
+        'exported_object.cc',
+        'exported_object.h',
+        'file_descriptor.cc',
+        'file_descriptor.h',
+        'message.cc',
+        'message.h',
+        'object_manager.cc',
+        'object_manager.h',
+        'object_path.cc',
+        'object_path.h',
+        'object_proxy.cc',
+        'object_proxy.h',
+        'property.cc',
+        'property.h',
+        'scoped_dbus_error.cc',
+        'scoped_dbus_error.h',
+        'string_util.cc',
+        'string_util.h',
+        'util.cc',
+        'util.h',
+        'values_util.cc',
+        'values_util.h',
+      ],
+    },
+    {
+      # Protobuf compiler / generator test protocol buffer
+      'target_name': 'dbus_test_proto',
+      'type': 'static_library',
+      'sources': [ 'test_proto.proto' ],
+      'variables': {
+        'proto_out_dir': 'dbus',
+      },
+      'includes': [ '../build/protoc.gypi' ],
+    },
+    {
+      # This target contains mocks that can be used to write unit tests
+      # without issuing actual D-Bus calls.
+      'target_name': 'dbus_test_support',
+      'type': 'static_library',
+      'dependencies': [
+        '../build/linux/system.gyp:dbus',
+        '../testing/gmock.gyp:gmock',
+        'dbus',
+      ],
+      'sources': [
+        'mock_bus.cc',
+        'mock_bus.h',
+        'mock_exported_object.cc',
+        'mock_exported_object.h',
+        'mock_object_manager.cc',
+        'mock_object_manager.h',
+        'mock_object_proxy.cc',
+        'mock_object_proxy.h',
+      ],
+      'include_dirs': [
+        '..',
+      ],
+    },
+    {
+      'target_name': 'dbus_unittests',
+      'type': 'executable',
+      'dependencies': [
+        '../base/base.gyp:run_all_unittests',
+        '../base/base.gyp:test_support_base',
+        '../build/linux/system.gyp:dbus',
+        '../testing/gmock.gyp:gmock',
+        '../testing/gtest.gyp:gtest',
+        'dbus',
+        'dbus_test_proto',
+        'dbus_test_support',
+      ],
+      'sources': [
+        'bus_unittest.cc',
+        'dbus_statistics_unittest.cc',
+        'end_to_end_async_unittest.cc',
+        'end_to_end_sync_unittest.cc',
+        'message_unittest.cc',
+        'mock_unittest.cc',
+        'object_manager_unittest.cc',
+        'object_proxy_unittest.cc',
+        'property_unittest.cc',
+        'signal_sender_verification_unittest.cc',
+        'string_util_unittest.cc',
+        'test_service.cc',
+        'test_service.h',
+        'util_unittest.cc',
+        'values_util_unittest.cc',
+      ],
+      'include_dirs': [
+        '..',
+      ],
+    },
+    {
+      'target_name': 'dbus_test_server',
+      'type': 'executable',
+      'dependencies': [
+        '../base/base.gyp:test_support_base',
+        '../base/base.gyp:base',
+        '../build/linux/system.gyp:dbus',
+        'dbus',
+      ],
+      'sources': [
+        'test_server.cc',
+        'test_service.cc',
+        'test_service.h',
+      ],
+      'include_dirs': [
+        '..',
+      ],
+    },
+  ],
+}
diff --git a/dbus/dbus_statistics.cc b/dbus/dbus_statistics.cc
index 2949c50..e1e0973 100644
--- a/dbus/dbus_statistics.cc
+++ b/dbus/dbus_statistics.cc
@@ -4,11 +4,12 @@
 
 #include "dbus/dbus_statistics.h"
 
-#include <map>
-#include <tuple>
+#include <memory>
+#include <set>
 
 #include "base/logging.h"
 #include "base/macros.h"
+#include "base/stl_util.h"
 #include "base/strings/stringprintf.h"
 #include "base/threading/platform_thread.h"
 #include "base/time/time.h"
@@ -17,24 +18,43 @@
 
 namespace {
 
-struct StatKey {
+// Used to store dbus statistics sorted alphabetically by service, interface,
+// then method (using std::string <).
+struct Stat {
+  Stat(const std::string& service,
+       const std::string& interface,
+       const std::string& method)
+      : service(service),
+        interface(interface),
+        method(method),
+        sent_method_calls(0),
+        received_signals(0),
+        sent_blocking_method_calls(0) {
+  }
   std::string service;
   std::string interface;
   std::string method;
+  int sent_method_calls;
+  int received_signals;
+  int sent_blocking_method_calls;
+
+  bool Compare(const Stat& other) const {
+    if (service != other.service)
+      return service < other.service;
+    if (interface != other.interface)
+      return interface < other.interface;
+    return method < other.method;
+  }
+
+  struct PtrCompare {
+    bool operator()(Stat* lhs, Stat* rhs) const {
+      DCHECK(lhs && rhs);
+      return lhs->Compare(*rhs);
+    }
+  };
 };
 
-bool operator<(const StatKey& lhs, const StatKey& rhs) {
-  return std::tie(lhs.service, lhs.interface, lhs.method) <
-         std::tie(rhs.service, rhs.interface, rhs.method);
-}
-
-struct StatValue {
-  int sent_method_calls = 0;
-  int received_signals = 0;
-  int sent_blocking_method_calls = 0;
-};
-
-using StatMap = std::map<StatKey, StatValue>;
+typedef std::set<Stat*, Stat::PtrCompare> StatSet;
 
 //------------------------------------------------------------------------------
 // DBusStatistics
@@ -49,9 +69,10 @@
 
   ~DBusStatistics() {
     DCHECK_EQ(origin_thread_id_, base::PlatformThread::CurrentId());
+    STLDeleteContainerPointers(stats_.begin(), stats_.end());
   }
 
-  // Enum to specify which field in Stat to increment in AddStat.
+  // Enum to specify which field in Stat to increment in AddStat
   enum StatType {
     TYPE_SENT_METHOD_CALLS,
     TYPE_RECEIVED_SIGNALS,
@@ -68,7 +89,7 @@
                << base::PlatformThread::CurrentId();
       return;
     }
-    StatValue* stat = GetStats(service, interface, method, true);
+    Stat* stat = GetStat(service, interface, method, true);
     DCHECK(stat);
     if (type == TYPE_SENT_METHOD_CALLS)
       ++stat->sent_method_calls;
@@ -82,35 +103,33 @@
 
   // Look up the Stat entry in |stats_|. If |add_stat| is true, add a new entry
   // if one does not already exist.
-  StatValue* GetStats(const std::string& service,
-                      const std::string& interface,
-                      const std::string& method,
-                      bool add_stat) {
+  Stat* GetStat(const std::string& service,
+                const std::string& interface,
+                const std::string& method,
+                bool add_stat) {
     DCHECK_EQ(origin_thread_id_, base::PlatformThread::CurrentId());
-
-    StatKey key = {service, interface, method};
-    auto it = stats_.find(key);
-    if (it != stats_.end())
-      return &(it->second);
-
+    std::unique_ptr<Stat> stat(new Stat(service, interface, method));
+    StatSet::iterator found = stats_.find(stat.get());
+    if (found != stats_.end())
+      return *found;
     if (!add_stat)
-      return nullptr;
-
-    return &(stats_[key]);
+      return NULL;
+    found = stats_.insert(stat.release()).first;
+    return *found;
   }
 
-  StatMap& stats() { return stats_; }
+  StatSet& stats() { return stats_; }
   base::Time start_time() { return start_time_; }
 
  private:
-  StatMap stats_;
+  StatSet stats_;
   base::Time start_time_;
   base::PlatformThreadId origin_thread_id_;
 
   DISALLOW_COPY_AND_ASSIGN(DBusStatistics);
 };
 
-DBusStatistics* g_dbus_statistics = nullptr;
+DBusStatistics* g_dbus_statistics = NULL;
 
 }  // namespace
 
@@ -126,7 +145,7 @@
 
 void Shutdown() {
   delete g_dbus_statistics;
-  g_dbus_statistics = nullptr;
+  g_dbus_statistics = NULL;
 }
 
 void AddSentMethodCall(const std::string& service,
@@ -163,7 +182,7 @@
   if (!g_dbus_statistics)
     return "DBusStatistics not initialized.";
 
-  const StatMap& stats = g_dbus_statistics->stats();
+  const StatSet& stats = g_dbus_statistics->stats();
   if (stats.empty())
     return "No DBus calls.";
 
@@ -174,21 +193,19 @@
   std::string result;
   int sent = 0, received = 0, sent_blocking = 0;
   // Stats are stored in order by service, then interface, then method.
-  for (auto iter = stats.begin(); iter != stats.end();) {
-    auto cur_iter = iter;
-    auto next_iter = ++iter;
-    const StatKey& stat_key = cur_iter->first;
-    const StatValue& stat = cur_iter->second;
-    sent += stat.sent_method_calls;
-    received += stat.received_signals;
-    sent_blocking += stat.sent_blocking_method_calls;
+  for (StatSet::const_iterator iter = stats.begin(); iter != stats.end(); ) {
+    StatSet::const_iterator cur_iter = iter;
+    StatSet::const_iterator next_iter = ++iter;
+    const Stat* stat = *cur_iter;
+    sent += stat->sent_method_calls;
+    received += stat->received_signals;
+    sent_blocking += stat->sent_blocking_method_calls;
     // If this is not the last stat, and if the next stat matches the current
     // stat, continue.
     if (next_iter != stats.end() &&
-        next_iter->first.service == stat_key.service &&
-        (show < SHOW_INTERFACE ||
-         next_iter->first.interface == stat_key.interface) &&
-        (show < SHOW_METHOD || next_iter->first.method == stat_key.method))
+        (*next_iter)->service == stat->service &&
+        (show < SHOW_INTERFACE || (*next_iter)->interface == stat->interface) &&
+        (show < SHOW_METHOD || (*next_iter)->method == stat->method))
       continue;
 
     if (!sent && !received && !sent_blocking)
@@ -197,12 +214,12 @@
     // Add a line to the result and clear the counts.
     std::string line;
     if (show == SHOW_SERVICE) {
-      line += stat_key.service;
+      line += stat->service;
     } else {
       // The interface usually includes the service so don't show both.
-      line += stat_key.interface;
+      line += stat->interface;
       if (show >= SHOW_METHOD)
-        line += "." + stat_key.method;
+        line += "." + stat->method;
     }
     line += base::StringPrintf(":");
     if (sent_blocking) {
@@ -252,8 +269,7 @@
               int* blocking) {
   if (!g_dbus_statistics)
     return false;
-  StatValue* stat =
-      g_dbus_statistics->GetStats(service, interface, method, false);
+  Stat* stat = g_dbus_statistics->GetStat(service, interface, method, false);
   if (!stat)
     return false;
   *sent = stat->sent_method_calls;
diff --git a/dbus/exported_object.cc b/dbus/exported_object.cc
index ffc5eb3..b156308 100644
--- a/dbus/exported_object.cc
+++ b/dbus/exported_object.cc
@@ -11,7 +11,7 @@
 #include "base/logging.h"
 #include "base/memory/ref_counted.h"
 #include "base/message_loop/message_loop.h"
-#include "base/metrics/histogram_macros.h"
+#include "base/metrics/histogram.h"
 #include "base/threading/thread_restrictions.h"
 #include "base/time/time.h"
 #include "dbus/bus.h"
@@ -186,9 +186,8 @@
   return true;
 }
 
-DBusHandlerResult ExportedObject::HandleMessage(
-    DBusConnection* /*connection*/,
-    DBusMessage* raw_message) {
+DBusHandlerResult ExportedObject::HandleMessage(DBusConnection*,
+                                                DBusMessage* raw_message) {
   bus_->AssertOnDBusThread();
   DCHECK_EQ(DBUS_MESSAGE_TYPE_METHOD_CALL, dbus_message_get_type(raw_message));
 
@@ -301,8 +300,7 @@
                       base::TimeTicks::Now() - start_time);
 }
 
-void ExportedObject::OnUnregistered(DBusConnection* /*connection*/) {
-}
+void ExportedObject::OnUnregistered(DBusConnection*) {}
 
 DBusHandlerResult ExportedObject::HandleMessageThunk(
     DBusConnection* connection,
diff --git a/dbus/file_descriptor.cc b/dbus/file_descriptor.cc
new file mode 100644
index 0000000..b690881
--- /dev/null
+++ b/dbus/file_descriptor.cc
@@ -0,0 +1,68 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <algorithm>
+
+#include "base/bind.h"
+#include "base/files/file.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/threading/worker_pool.h"
+#include "dbus/file_descriptor.h"
+
+using std::swap;
+
+namespace dbus {
+
+void CHROME_DBUS_EXPORT FileDescriptor::Deleter::operator()(
+    FileDescriptor* fd) {
+  base::WorkerPool::PostTask(
+      FROM_HERE, base::Bind(&base::DeletePointer<FileDescriptor>, fd), false);
+}
+
+FileDescriptor::FileDescriptor(FileDescriptor&& other) : FileDescriptor() {
+  Swap(&other);
+}
+
+FileDescriptor::~FileDescriptor() {
+  if (owner_)
+    base::File auto_closer(value_);
+}
+
+FileDescriptor& FileDescriptor::operator=(FileDescriptor&& other) {
+  Swap(&other);
+  return *this;
+}
+
+int FileDescriptor::value() const {
+  CHECK(valid_);
+  return value_;
+}
+
+int FileDescriptor::TakeValue() {
+  CHECK(valid_);  // NB: check first so owner_ is unchanged if this triggers
+  owner_ = false;
+  return value_;
+}
+
+void FileDescriptor::CheckValidity() {
+  base::File file(value_);
+  if (!file.IsValid()) {
+    valid_ = false;
+    return;
+  }
+
+  base::File::Info info;
+  bool ok = file.GetInfo(&info);
+  file.TakePlatformFile();  // Prevent |value_| from being closed by |file|.
+  valid_ = (ok && !info.is_directory);
+}
+
+void FileDescriptor::Swap(FileDescriptor* other) {
+  swap(value_, other->value_);
+  swap(owner_, other->owner_);
+  swap(valid_, other->valid_);
+}
+
+}  // namespace dbus
diff --git a/dbus/file_descriptor.h b/dbus/file_descriptor.h
new file mode 100644
index 0000000..f8e8677
--- /dev/null
+++ b/dbus/file_descriptor.h
@@ -0,0 +1,92 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef DBUS_FILE_DESCRIPTOR_H_
+#define DBUS_FILE_DESCRIPTOR_H_
+
+#include <memory>
+
+#include "base/macros.h"
+#include "dbus/dbus_export.h"
+
+namespace dbus {
+
+// FileDescriptor is a type used to encapsulate D-Bus file descriptors
+// and to follow the RAII idiom appropiate for use with message operations
+// where the descriptor might be easily leaked.  To guard against this the
+// descriptor is closed when an instance is destroyed if it is owned.
+// Ownership is asserted only when PutValue is used and TakeValue can be
+// used to take ownership.
+//
+// For example, in the following
+//  FileDescriptor fd;
+//  if (!reader->PopString(&name) ||
+//      !reader->PopFileDescriptor(&fd) ||
+//      !reader->PopUint32(&flags)) {
+// the descriptor in fd will be closed if the PopUint32 fails.  But
+//   writer.AppendFileDescriptor(dbus::FileDescriptor(1));
+// will not automatically close "1" because it is not owned.
+//
+// Descriptors must be validated before marshalling in a D-Bus message
+// or using them after unmarshalling.  We disallow descriptors to a
+// directory to reduce the security risks.  Splitting out validation
+// also allows the caller to do this work on the File thread to conform
+// with i/o restrictions.
+class CHROME_DBUS_EXPORT FileDescriptor {
+ public:
+  // This provides a simple way to pass around file descriptors since they must
+  // be closed on a thread that is allowed to perform I/O.
+  struct Deleter {
+    void CHROME_DBUS_EXPORT operator()(FileDescriptor* fd);
+  };
+
+  // Permits initialization without a value for passing to
+  // dbus::MessageReader::PopFileDescriptor to fill in and from int values.
+  FileDescriptor() : value_(-1), owner_(false), valid_(false) {}
+  explicit FileDescriptor(int value) : value_(value), owner_(false),
+      valid_(false) {}
+
+  FileDescriptor(FileDescriptor&& other);
+
+  virtual ~FileDescriptor();
+
+  FileDescriptor& operator=(FileDescriptor&& other);
+
+  // Retrieves value as an int without affecting ownership.
+  int value() const;
+
+  // Retrieves whether or not the descriptor is ok to send/receive.
+  int is_valid() const { return valid_; }
+
+  // Sets the value and assign ownership.
+  void PutValue(int value) {
+    value_ = value;
+    owner_ = true;
+    valid_ = false;
+  }
+
+  // Takes the value and ownership.
+  int TakeValue();
+
+  // Checks (and records) validity of the file descriptor.
+  // We disallow directories to avoid potential sandbox escapes.
+  // Note this call must be made on a thread where file i/o is allowed.
+  void CheckValidity();
+
+ private:
+  void Swap(FileDescriptor* other);
+
+  int value_;
+  bool owner_;
+  bool valid_;
+
+  DISALLOW_COPY_AND_ASSIGN(FileDescriptor);
+};
+
+using ScopedFileDescriptor =
+    std::unique_ptr<FileDescriptor, FileDescriptor::Deleter>;
+
+}  // namespace dbus
+
+#endif  // DBUS_FILE_DESCRIPTOR_H_
diff --git a/dbus/message.cc b/dbus/message.cc
index c8663f7..4a84756 100644
--- a/dbus/message.cc
+++ b/dbus/message.cc
@@ -222,11 +222,11 @@
       case UNIX_FD: {
         CHECK(IsDBusTypeUnixFdSupported());
 
-        base::ScopedFD file_descriptor;
+        FileDescriptor file_descriptor;
         if (!reader->PopFileDescriptor(&file_descriptor))
           return kBrokenMessage;
         output += indent + "fd#" +
-                  base::IntToString(file_descriptor.get()) + "\n";
+                  base::IntToString(file_descriptor.value()) + "\n";
         break;
       }
       default:
@@ -714,9 +714,15 @@
   CloseContainer(&variant_writer);
 }
 
-void MessageWriter::AppendFileDescriptor(int value) {
+void MessageWriter::AppendFileDescriptor(const FileDescriptor& value) {
   CHECK(IsDBusTypeUnixFdSupported());
-  AppendBasic(DBUS_TYPE_UNIX_FD, &value);  // This duplicates the FD.
+
+  if (!value.is_valid()) {
+    // NB: sending a directory potentially enables sandbox escape
+    LOG(FATAL) << "Attempt to pass invalid file descriptor";
+  }
+  int fd = value.value();
+  AppendBasic(DBUS_TYPE_UNIX_FD, &fd);
 }
 
 //
@@ -1010,7 +1016,7 @@
   return variant_reader.PopBasic(dbus_type, value);
 }
 
-bool MessageReader::PopFileDescriptor(base::ScopedFD* value) {
+bool MessageReader::PopFileDescriptor(FileDescriptor* value) {
   CHECK(IsDBusTypeUnixFdSupported());
 
   int fd = -1;
@@ -1018,7 +1024,8 @@
   if (!success)
     return false;
 
-  *value = base::ScopedFD(fd);
+  value->PutValue(fd);
+  // NB: the caller must check validity before using the value
   return true;
 }
 
diff --git a/dbus/message.h b/dbus/message.h
index 256a842..0aa010c 100644
--- a/dbus/message.h
+++ b/dbus/message.h
@@ -13,9 +13,9 @@
 #include <string>
 #include <vector>
 
-#include "base/files/scoped_file.h"
 #include "base/macros.h"
 #include "dbus/dbus_export.h"
+#include "dbus/file_descriptor.h"
 #include "dbus/object_path.h"
 
 namespace google {
@@ -285,10 +285,7 @@
   void AppendDouble(double value);
   void AppendString(const std::string& value);
   void AppendObjectPath(const ObjectPath& value);
-
-  // Appends a file descriptor to the message.
-  // The FD will be duplicated so you still have to close the original FD.
-  void AppendFileDescriptor(int value);
+  void AppendFileDescriptor(const FileDescriptor& value);
 
   // Opens an array. The array contents can be added to the array with
   // |sub_writer|. The client code must close the array with
@@ -401,7 +398,7 @@
   bool PopDouble(double* value);
   bool PopString(std::string* value);
   bool PopObjectPath(ObjectPath* value);
-  bool PopFileDescriptor(base::ScopedFD* value);
+  bool PopFileDescriptor(FileDescriptor* value);
 
   // Sets up the given message reader to read an array at the current
   // iterator position.
diff --git a/dbus/mock_bus.h b/dbus/mock_bus.h
index 40b090b..b50f230 100644
--- a/dbus/mock_bus.h
+++ b/dbus/mock_bus.h
@@ -26,6 +26,15 @@
                ObjectProxy*(const std::string& service_name,
                             const ObjectPath& object_path,
                             int options));
+  MOCK_METHOD3(RemoveObjectProxy, bool(
+      const std::string& service_name,
+      const ObjectPath& object_path,
+      const base::Closure& callback));
+  MOCK_METHOD4(RemoveObjectProxyWithOptions, bool(
+      const std::string& service_name,
+      const ObjectPath& object_path,
+      int options,
+      const base::Closure& callback));
   MOCK_METHOD1(GetExportedObject, ExportedObject*(
       const ObjectPath& object_path));
   MOCK_METHOD2(GetObjectManager, ObjectManager*(const std::string&,
diff --git a/dbus/mock_object_manager.h b/dbus/mock_object_manager.h
index 2318e49..e4c76ba 100644
--- a/dbus/mock_object_manager.h
+++ b/dbus/mock_object_manager.h
@@ -31,6 +31,7 @@
   MOCK_METHOD1(GetObjectProxy, ObjectProxy*(const ObjectPath&));
   MOCK_METHOD2(GetProperties, PropertySet*(const ObjectPath&,
                                            const std::string&));
+  MOCK_METHOD0(GetManagedObjects, void());
 
  protected:
   virtual ~MockObjectManager();
diff --git a/dbus/mock_object_proxy.h b/dbus/mock_object_proxy.h
index 17d2a9f..f27f6f6 100644
--- a/dbus/mock_object_proxy.h
+++ b/dbus/mock_object_proxy.h
@@ -56,10 +56,6 @@
                     const std::string& signal_name,
                     SignalCallback signal_callback,
                     OnConnectedCallback on_connected_callback));
-  MOCK_METHOD1(SetNameOwnerChangedCallback,
-               void(NameOwnerChangedCallback callback));
-  MOCK_METHOD1(WaitForServiceToBeAvailable,
-               void(WaitForServiceToBeAvailableCallback callback));
   MOCK_METHOD0(Detach, void());
 
  protected:
diff --git a/dbus/object_manager.cc b/dbus/object_manager.cc
index 08e6e04..178bb5f 100644
--- a/dbus/object_manager.cc
+++ b/dbus/object_manager.cc
@@ -167,6 +167,35 @@
   match_rule_.clear();
 }
 
+void ObjectManager::InitializeObjects() {
+  DCHECK(bus_);
+  DCHECK(object_proxy_);
+  DCHECK(setup_success_);
+
+  // |object_proxy_| is no longer valid if the Bus was shut down before this
+  // call. Don't initiate any other action from the origin thread.
+  if (cleanup_called_)
+    return;
+
+  object_proxy_->ConnectToSignal(
+      kObjectManagerInterface,
+      kObjectManagerInterfacesAdded,
+      base::Bind(&ObjectManager::InterfacesAddedReceived,
+                 weak_ptr_factory_.GetWeakPtr()),
+      base::Bind(&ObjectManager::InterfacesAddedConnected,
+                 weak_ptr_factory_.GetWeakPtr()));
+
+  object_proxy_->ConnectToSignal(
+      kObjectManagerInterface,
+      kObjectManagerInterfacesRemoved,
+      base::Bind(&ObjectManager::InterfacesRemovedReceived,
+                 weak_ptr_factory_.GetWeakPtr()),
+      base::Bind(&ObjectManager::InterfacesRemovedConnected,
+                 weak_ptr_factory_.GetWeakPtr()));
+
+  GetManagedObjects();
+}
+
 bool ObjectManager::SetupMatchRuleAndFilter() {
   DCHECK(bus_);
   DCHECK(!setup_success_);
@@ -206,39 +235,10 @@
 }
 
 void ObjectManager::OnSetupMatchRuleAndFilterComplete(bool success) {
-  if (!success) {
-    LOG(WARNING) << service_name_ << " " << object_path_.value()
-                 << ": Failed to set up match rule.";
-    return;
-  }
-
-  DCHECK(bus_);
-  DCHECK(object_proxy_);
-  DCHECK(setup_success_);
-
-  // |object_proxy_| is no longer valid if the Bus was shut down before this
-  // call. Don't initiate any other action from the origin thread.
-  if (cleanup_called_)
-    return;
-
-  object_proxy_->ConnectToSignal(
-      kObjectManagerInterface,
-      kObjectManagerInterfacesAdded,
-      base::Bind(&ObjectManager::InterfacesAddedReceived,
-                 weak_ptr_factory_.GetWeakPtr()),
-      base::Bind(&ObjectManager::InterfacesAddedConnected,
-                 weak_ptr_factory_.GetWeakPtr()));
-
-  object_proxy_->ConnectToSignal(
-      kObjectManagerInterface,
-      kObjectManagerInterfacesRemoved,
-      base::Bind(&ObjectManager::InterfacesRemovedReceived,
-                 weak_ptr_factory_.GetWeakPtr()),
-      base::Bind(&ObjectManager::InterfacesRemovedConnected,
-                 weak_ptr_factory_.GetWeakPtr()));
-
-  if (!service_name_owner_.empty())
-    GetManagedObjects();
+  LOG_IF(WARNING, !success) << service_name_ << " " << object_path_.value()
+                            << ": Failed to set up match rule.";
+  if (success)
+    InitializeObjects();
 }
 
 // static
@@ -249,7 +249,7 @@
   return self->HandleMessage(connection, raw_message);
 }
 
-DBusHandlerResult ObjectManager::HandleMessage(DBusConnection* /*connection*/,
+DBusHandlerResult ObjectManager::HandleMessage(DBusConnection*,
                                                DBusMessage* raw_message) {
   DCHECK(bus_);
   bus_->AssertOnDBusThread();
@@ -385,9 +385,10 @@
   UpdateObject(object_path, &reader);
 }
 
-void ObjectManager::InterfacesAddedConnected(const std::string& /*interface_name*/,
-                                             const std::string& /*signal_name*/,
-                                             bool success) {
+void ObjectManager::InterfacesAddedConnected(
+    const std::string& /*interface_name*/,
+    const std::string& /*signal_name*/,
+    bool success) {
   LOG_IF(WARNING, !success) << service_name_ << " " << object_path_.value()
                             << ": Failed to connect to InterfacesAdded signal.";
 }
diff --git a/dbus/object_manager.h b/dbus/object_manager.h
index 90cf919..a97495e 100644
--- a/dbus/object_manager.h
+++ b/dbus/object_manager.h
@@ -71,9 +71,8 @@
 //     object_manager_->UnregisterInterface(kInterface);
 //   }
 //
-// This class calls GetManagedObjects() asynchronously after the remote service
-// becomes available and additionally refreshes managed objects after the
-// service stops or restarts.
+// The D-Bus thread manager takes care of issuing the necessary call to
+// GetManagedObjects() after the implementation classes have been set up.
 //
 // The object manager interface class has one abstract method that must be
 // implemented by the class to create Properties structures on demand. As well
@@ -168,7 +167,7 @@
     // |interface_name| as appropriate. An implementation class will only
     // receive multiple calls if it has registered for multiple interfaces.
     virtual void ObjectAdded(const ObjectPath& /*object_path*/,
-                             const std::string& /*interface_name*/) { }
+                             const std::string& /*interface_name*/) {}
 
     // Called by ObjectManager to inform the implementation class than an
     // object with the path |object_path| has been removed. Ths D-Bus interface
@@ -180,7 +179,7 @@
     // ObjectProxy object for the given interface are cleaned up, it is safe
     // to retrieve them during removal to vary processing.
     virtual void ObjectRemoved(const ObjectPath& /*object_path*/,
-                               const std::string& /*interface_name*/) { }
+                               const std::string& /*interface_name*/) {}
   };
 
   // Client code should use Bus::GetObjectManager() instead of this constructor.
@@ -239,14 +238,17 @@
  private:
   friend class base::RefCountedThreadSafe<ObjectManager>;
 
+  // Connects the InterfacesAdded and InterfacesRemoved signals and calls
+  // GetManagedObjects. Called from OnSetupMatchRuleAndFilterComplete.
+  void InitializeObjects();
+
   // Called from the constructor to add a match rule for PropertiesChanged
-  // signals on the D-Bus thread and set up a corresponding filter function.
+  // signals on the DBus thread and set up a corresponding filter function.
   bool SetupMatchRuleAndFilter();
 
   // Called on the origin thread once the match rule and filter have been set
-  // up. Connects the InterfacesAdded and InterfacesRemoved signals and
-  // refreshes objects if the service is available. |success| is false if an
-  // error occurred during setup and true otherwise.
+  // up. |success| is false, if an error occurred during set up; it's true
+  // otherwise.
   void OnSetupMatchRuleAndFilterComplete(bool success);
 
   // Called by dbus:: when a message is received. This is used to filter
diff --git a/dbus/object_proxy.cc b/dbus/object_proxy.cc
index 50e62a3..ce02551 100644
--- a/dbus/object_proxy.cc
+++ b/dbus/object_proxy.cc
@@ -10,7 +10,7 @@
 #include "base/bind.h"
 #include "base/logging.h"
 #include "base/message_loop/message_loop.h"
-#include "base/metrics/histogram_macros.h"
+#include "base/metrics/histogram.h"
 #include "base/strings/string_piece.h"
 #include "base/strings/stringprintf.h"
 #include "base/task_runner_util.h"
@@ -459,9 +459,8 @@
   }
 }
 
-DBusHandlerResult ObjectProxy::HandleMessage(
-    DBusConnection* /*connection*/,
-    DBusMessage* raw_message) {
+DBusHandlerResult ObjectProxy::HandleMessage(DBusConnection*,
+                                             DBusMessage* raw_message) {
   bus_->AssertOnDBusThread();
 
   if (dbus_message_get_type(raw_message) != DBUS_MESSAGE_TYPE_SIGNAL)
diff --git a/dbus/object_proxy.h b/dbus/object_proxy.h
index 5de3904..033e886 100644
--- a/dbus/object_proxy.h
+++ b/dbus/object_proxy.h
@@ -137,10 +137,10 @@
   // from the method (i.e. calling a method that does not return a value),
   // EmptyResponseCallback() can be passed to the |callback| parameter.
   //
-  // If the method call is successful, |callback| will be invoked with a
-  // Response object. If unsuccessful, |error_callback| will be invoked with an
-  // ErrorResponse object (if the remote object returned an error) or nullptr
-  // (if a response was not received at all).
+  // If the method call is successful, a pointer to Response object will
+  // be passed to the callback. If unsuccessful, the error callback will be
+  // called and a pointer to ErrorResponse object will be passed to the error
+  // callback if available, otherwise NULL will be passed.
   //
   // Must be called in the origin thread.
   virtual void CallMethodWithErrorCallback(MethodCall* method_call,
@@ -174,11 +174,7 @@
   // represented by |service_name_|.
   virtual void SetNameOwnerChangedCallback(NameOwnerChangedCallback callback);
 
-  // Registers |callback| to run when the service becomes available. If the
-  // service is already available, or if connecting to the name-owner-changed
-  // signal fails, |callback| will be run once asynchronously. Otherwise,
-  // |callback| will be run once in the future after the service becomes
-  // available.
+  // Runs the callback as soon as the service becomes available.
   virtual void WaitForServiceToBeAvailable(
       WaitForServiceToBeAvailableCallback callback);
 
diff --git a/dbus/property.cc b/dbus/property.cc
index 93f9ed6..aa58436 100644
--- a/dbus/property.cc
+++ b/dbus/property.cc
@@ -6,8 +6,6 @@
 
 #include <stddef.h>
 
-#include <memory>
-
 #include "base/bind.h"
 #include "base/logging.h"
 
@@ -661,134 +659,6 @@
   writer->CloseContainer(&variant_writer);
 }
 
-//
-// Property<std::unordered_map<std::string, std::vector<uint8_t>>>
-// specialization.
-//
-
-template <>
-bool Property<std::unordered_map<std::string, std::vector<uint8_t>>>::
-    PopValueFromReader(MessageReader* reader) {
-  MessageReader variant_reader(nullptr);
-  MessageReader dict_reader(nullptr);
-  if (!reader->PopVariant(&variant_reader) ||
-      !variant_reader.PopArray(&dict_reader))
-    return false;
-
-  value_.clear();
-  while (dict_reader.HasMoreData()) {
-    MessageReader entry_reader(nullptr);
-    if (!dict_reader.PopDictEntry(&entry_reader))
-      return false;
-
-    std::string key;
-    MessageReader value_varient_reader(nullptr);
-    if (!entry_reader.PopString(&key) ||
-        !entry_reader.PopVariant(&value_varient_reader))
-      return false;
-
-    const uint8_t* bytes = nullptr;
-    size_t length = 0;
-    if (!value_varient_reader.PopArrayOfBytes(&bytes, &length))
-      return false;
-
-    value_[key].assign(bytes, bytes + length);
-  }
-  return true;
-}
-
-template <>
-void Property<std::unordered_map<std::string, std::vector<uint8_t>>>::
-    AppendSetValueToWriter(MessageWriter* writer) {
-  MessageWriter variant_writer(nullptr);
-  MessageWriter dict_writer(nullptr);
-
-  writer->OpenVariant("a{sv}", &variant_writer);
-  variant_writer.OpenArray("{sv}", &dict_writer);
-
-  for (const auto& pair : set_value_) {
-    MessageWriter entry_writer(nullptr);
-    dict_writer.OpenDictEntry(&entry_writer);
-
-    entry_writer.AppendString(pair.first);
-
-    MessageWriter value_varient_writer(nullptr);
-    entry_writer.OpenVariant("ay", &value_varient_writer);
-    value_varient_writer.AppendArrayOfBytes(pair.second.data(),
-                                            pair.second.size());
-    entry_writer.CloseContainer(&value_varient_writer);
-
-    dict_writer.CloseContainer(&entry_writer);
-  }
-
-  variant_writer.CloseContainer(&dict_writer);
-  writer->CloseContainer(&variant_writer);
-}
-
-//
-// Property<std::unordered_map<uint16_t, std::vector<uint8_t>>>
-// specialization.
-//
-
-template <>
-bool Property<std::unordered_map<uint16_t, std::vector<uint8_t>>>::
-    PopValueFromReader(MessageReader* reader) {
-  MessageReader variant_reader(nullptr);
-  MessageReader dict_reader(nullptr);
-  if (!reader->PopVariant(&variant_reader) ||
-      !variant_reader.PopArray(&dict_reader))
-    return false;
-
-  value_.clear();
-  while (dict_reader.HasMoreData()) {
-    MessageReader entry_reader(nullptr);
-    if (!dict_reader.PopDictEntry(&entry_reader))
-      return false;
-
-    uint16_t key;
-    MessageReader value_varient_reader(nullptr);
-    if (!entry_reader.PopUint16(&key) ||
-        !entry_reader.PopVariant(&value_varient_reader))
-      return false;
-
-    const uint8_t* bytes = nullptr;
-    size_t length = 0;
-    if (!value_varient_reader.PopArrayOfBytes(&bytes, &length))
-      return false;
-
-    value_[key].assign(bytes, bytes + length);
-  }
-  return true;
-}
-
-template <>
-void Property<std::unordered_map<uint16_t, std::vector<uint8_t>>>::
-    AppendSetValueToWriter(MessageWriter* writer) {
-  MessageWriter variant_writer(nullptr);
-  MessageWriter dict_writer(nullptr);
-
-  writer->OpenVariant("a{qv}", &variant_writer);
-  variant_writer.OpenArray("{qv}", &dict_writer);
-
-  for (const auto& pair : set_value_) {
-    MessageWriter entry_writer(nullptr);
-    dict_writer.OpenDictEntry(&entry_writer);
-
-    entry_writer.AppendUint16(pair.first);
-
-    MessageWriter value_varient_writer(nullptr);
-    entry_writer.OpenVariant("ay", &value_varient_writer);
-    value_varient_writer.AppendArrayOfBytes(pair.second.data(),
-                                            pair.second.size());
-    entry_writer.CloseContainer(&value_varient_writer);
-
-    dict_writer.CloseContainer(&entry_writer);
-  }
-
-  variant_writer.CloseContainer(&dict_writer);
-  writer->CloseContainer(&variant_writer);
-}
-
 template class Property<uint8_t>;
 template class Property<bool>;
 template class Property<int16_t>;
@@ -805,7 +675,5 @@
 template class Property<std::vector<uint8_t>>;
 template class Property<std::map<std::string, std::string>>;
 template class Property<std::vector<std::pair<std::vector<uint8_t>, uint16_t>>>;
-template class Property<std::unordered_map<std::string, std::vector<uint8_t>>>;
-template class Property<std::unordered_map<uint16_t, std::vector<uint8_t>>>;
 
 }  // namespace dbus
diff --git a/dbus/property.h b/dbus/property.h
index 0559ea0..efbad22 100644
--- a/dbus/property.h
+++ b/dbus/property.h
@@ -9,7 +9,6 @@
 
 #include <map>
 #include <string>
-#include <unordered_map>
 #include <utility>
 #include <vector>
 
@@ -611,28 +610,6 @@
 extern template class CHROME_DBUS_EXPORT
     Property<std::vector<std::pair<std::vector<uint8_t>, uint16_t>>>;
 
-template <>
-CHROME_DBUS_EXPORT bool
-Property<std::unordered_map<std::string, std::vector<uint8_t>>>::
-    PopValueFromReader(MessageReader* reader);
-template <>
-CHROME_DBUS_EXPORT void
-Property<std::unordered_map<std::string, std::vector<uint8_t>>>::
-    AppendSetValueToWriter(MessageWriter* writer);
-extern template class CHROME_DBUS_EXPORT
-    Property<std::unordered_map<std::string, std::vector<uint8_t>>>;
-
-template <>
-CHROME_DBUS_EXPORT bool
-Property<std::unordered_map<uint16_t, std::vector<uint8_t>>>::
-    PopValueFromReader(MessageReader* reader);
-template <>
-CHROME_DBUS_EXPORT void
-Property<std::unordered_map<uint16_t, std::vector<uint8_t>>>::
-    AppendSetValueToWriter(MessageWriter* writer);
-extern template class CHROME_DBUS_EXPORT
-    Property<std::unordered_map<uint16_t, std::vector<uint8_t>>>;
-
 #pragma GCC diagnostic pop
 
 }  // namespace dbus
diff --git a/dbus/values_util.cc b/dbus/values_util.cc
index 1e035c9..bea7bea 100644
--- a/dbus/values_util.cc
+++ b/dbus/values_util.cc
@@ -67,19 +67,19 @@
 // Gets the D-Bus type signature for the value.
 std::string GetTypeSignature(const base::Value& value) {
   switch (value.GetType()) {
-    case base::Value::Type::BOOLEAN:
+    case base::Value::TYPE_BOOLEAN:
       return "b";
-    case base::Value::Type::INTEGER:
+    case base::Value::TYPE_INTEGER:
       return "i";
-    case base::Value::Type::DOUBLE:
+    case base::Value::TYPE_DOUBLE:
       return "d";
-    case base::Value::Type::STRING:
+    case base::Value::TYPE_STRING:
       return "s";
-    case base::Value::Type::BINARY:
+    case base::Value::TYPE_BINARY:
       return "ay";
-    case base::Value::Type::DICTIONARY:
+    case base::Value::TYPE_DICTIONARY:
       return "a{sv}";
-    case base::Value::Type::LIST:
+    case base::Value::TYPE_LIST:
       return "av";
     default:
       DLOG(ERROR) << "Unexpected type " << value.GetType();
@@ -98,37 +98,38 @@
     case Message::BYTE: {
       uint8_t value = 0;
       if (reader->PopByte(&value))
-        result = base::MakeUnique<base::Value>(value);
+        result = base::MakeUnique<base::FundamentalValue>(value);
       break;
     }
     case Message::BOOL: {
       bool value = false;
       if (reader->PopBool(&value))
-        result = base::MakeUnique<base::Value>(value);
+        result = base::MakeUnique<base::FundamentalValue>(value);
       break;
     }
     case Message::INT16: {
       int16_t value = 0;
       if (reader->PopInt16(&value))
-        result = base::MakeUnique<base::Value>(value);
+        result = base::MakeUnique<base::FundamentalValue>(value);
       break;
     }
     case Message::UINT16: {
       uint16_t value = 0;
       if (reader->PopUint16(&value))
-        result = base::MakeUnique<base::Value>(value);
+        result = base::MakeUnique<base::FundamentalValue>(value);
       break;
     }
     case Message::INT32: {
       int32_t value = 0;
       if (reader->PopInt32(&value))
-        result = base::MakeUnique<base::Value>(value);
+        result = base::MakeUnique<base::FundamentalValue>(value);
       break;
     }
     case Message::UINT32: {
       uint32_t value = 0;
       if (reader->PopUint32(&value)) {
-        result = base::MakeUnique<base::Value>(static_cast<double>(value));
+        result = base::MakeUnique<base::FundamentalValue>(
+            static_cast<double>(value));
       }
       break;
     }
@@ -137,7 +138,8 @@
       if (reader->PopInt64(&value)) {
         DLOG_IF(WARNING, !IsExactlyRepresentableByDouble(value)) <<
             value << " is not exactly representable by double";
-        result = base::MakeUnique<base::Value>(static_cast<double>(value));
+        result = base::MakeUnique<base::FundamentalValue>(
+            static_cast<double>(value));
       }
       break;
     }
@@ -146,26 +148,27 @@
       if (reader->PopUint64(&value)) {
         DLOG_IF(WARNING, !IsExactlyRepresentableByDouble(value)) <<
             value << " is not exactly representable by double";
-        result = base::MakeUnique<base::Value>(static_cast<double>(value));
+        result = base::MakeUnique<base::FundamentalValue>(
+            static_cast<double>(value));
       }
       break;
     }
     case Message::DOUBLE: {
       double value = 0;
       if (reader->PopDouble(&value))
-        result = base::MakeUnique<base::Value>(value);
+        result = base::MakeUnique<base::FundamentalValue>(value);
       break;
     }
     case Message::STRING: {
       std::string value;
       if (reader->PopString(&value))
-        result = base::MakeUnique<base::Value>(value);
+        result = base::MakeUnique<base::StringValue>(value);
       break;
     }
     case Message::OBJECT_PATH: {
       ObjectPath value;
       if (reader->PopObjectPath(&value))
-        result = base::MakeUnique<base::Value>(value.value());
+        result = base::MakeUnique<base::StringValue>(value.value());
       break;
     }
     case Message::UNIX_FD: {
@@ -216,28 +219,28 @@
 
 void AppendBasicTypeValueData(MessageWriter* writer, const base::Value& value) {
   switch (value.GetType()) {
-    case base::Value::Type::BOOLEAN: {
+    case base::Value::TYPE_BOOLEAN: {
       bool bool_value = false;
       bool success = value.GetAsBoolean(&bool_value);
       DCHECK(success);
       writer->AppendBool(bool_value);
       break;
     }
-    case base::Value::Type::INTEGER: {
+    case base::Value::TYPE_INTEGER: {
       int int_value = 0;
       bool success = value.GetAsInteger(&int_value);
       DCHECK(success);
       writer->AppendInt32(int_value);
       break;
     }
-    case base::Value::Type::DOUBLE: {
+    case base::Value::TYPE_DOUBLE: {
       double double_value = 0;
       bool success = value.GetAsDouble(&double_value);
       DCHECK(success);
       writer->AppendDouble(double_value);
       break;
     }
-    case base::Value::Type::STRING: {
+    case base::Value::TYPE_STRING: {
       std::string string_value;
       bool success = value.GetAsString(&string_value);
       DCHECK(success);
@@ -260,7 +263,7 @@
 
 void AppendValueData(MessageWriter* writer, const base::Value& value) {
   switch (value.GetType()) {
-    case base::Value::Type::DICTIONARY: {
+    case base::Value::TYPE_DICTIONARY: {
       const base::DictionaryValue* dictionary = NULL;
       value.GetAsDictionary(&dictionary);
       dbus::MessageWriter array_writer(NULL);
@@ -276,7 +279,7 @@
       writer->CloseContainer(&array_writer);
       break;
     }
-    case base::Value::Type::LIST: {
+    case base::Value::TYPE_LIST: {
       const base::ListValue* list = NULL;
       value.GetAsList(&list);
       dbus::MessageWriter array_writer(NULL);
@@ -287,10 +290,10 @@
       writer->CloseContainer(&array_writer);
       break;
     }
-    case base::Value::Type::BOOLEAN:
-    case base::Value::Type::INTEGER:
-    case base::Value::Type::DOUBLE:
-    case base::Value::Type::STRING:
+    case base::Value::TYPE_BOOLEAN:
+    case base::Value::TYPE_INTEGER:
+    case base::Value::TYPE_DOUBLE:
+    case base::Value::TYPE_STRING:
       AppendBasicTypeValueData(writer, value);
       break;
     default:
diff --git a/sandbox/BUILD.gn b/sandbox/BUILD.gn
index 8c0405e..8ca3574 100644
--- a/sandbox/BUILD.gn
+++ b/sandbox/BUILD.gn
@@ -2,9 +2,6 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-import("//build/buildflag_header.gni")
-import("//sandbox/features.gni")
-
 # Meta-target that forwards to the proper platform one.
 group("sandbox") {
   if (is_win) {
@@ -22,8 +19,3 @@
     ]
   }
 }
-
-buildflag_header("sandbox_features") {
-  header = "sandbox_features.h"
-  flags = [ "USE_SECCOMP_BPF=$use_seccomp_bpf" ]
-}
diff --git a/sandbox/linux/BUILD.gn b/sandbox/linux/BUILD.gn
index 3e98def..a5c041f 100644
--- a/sandbox/linux/BUILD.gn
+++ b/sandbox/linux/BUILD.gn
@@ -4,7 +4,6 @@
 
 import("//build/config/features.gni")
 import("//build/config/nacl/config.gni")
-import("//sandbox/features.gni")
 import("//testing/test.gni")
 
 if (is_android) {
@@ -42,7 +41,10 @@
     public_deps += [ ":suid_sandbox_client" ]
   }
   if (use_seccomp_bpf || is_nacl_nonsfi) {
-    public_deps += [ ":seccomp_bpf" ]
+    public_deps += [
+      ":seccomp_bpf",
+      ":seccomp_bpf_helpers",
+    ]
   }
 }
 
@@ -190,6 +192,7 @@
       rebase_path(outputs, root_build_dir) + rebase_path(inputs, root_build_dir)
 }
 
+
 test("sandbox_linux_unittests") {
   deps = [
     ":sandbox_linux_unittests_sources",
@@ -219,14 +222,6 @@
     "bpf_dsl/syscall_set.cc",
     "bpf_dsl/syscall_set.h",
     "bpf_dsl/trap_registry.h",
-    "seccomp-bpf-helpers/baseline_policy.cc",
-    "seccomp-bpf-helpers/baseline_policy.h",
-    "seccomp-bpf-helpers/sigsys_handlers.cc",
-    "seccomp-bpf-helpers/sigsys_handlers.h",
-    "seccomp-bpf-helpers/syscall_parameters_restrictions.cc",
-    "seccomp-bpf-helpers/syscall_parameters_restrictions.h",
-    "seccomp-bpf-helpers/syscall_sets.cc",
-    "seccomp-bpf-helpers/syscall_sets.h",
     "seccomp-bpf/die.cc",
     "seccomp-bpf/die.h",
     "seccomp-bpf/sandbox_bpf.cc",
@@ -256,6 +251,31 @@
       "bpf_dsl/linux_syscall_ranges.h",
       "bpf_dsl/seccomp_macros.h",
       "bpf_dsl/trap_registry.h",
+    ]
+  }
+}
+
+component("seccomp_bpf_helpers") {
+  sources = [
+    "seccomp-bpf-helpers/baseline_policy.cc",
+    "seccomp-bpf-helpers/baseline_policy.h",
+    "seccomp-bpf-helpers/sigsys_handlers.cc",
+    "seccomp-bpf-helpers/sigsys_handlers.h",
+    "seccomp-bpf-helpers/syscall_parameters_restrictions.cc",
+    "seccomp-bpf-helpers/syscall_parameters_restrictions.h",
+    "seccomp-bpf-helpers/syscall_sets.cc",
+    "seccomp-bpf-helpers/syscall_sets.h",
+  ]
+  defines = [ "SANDBOX_IMPLEMENTATION" ]
+
+  deps = [
+    ":sandbox_services",
+    ":seccomp_bpf",
+    "//base",
+  ]
+
+  if (is_nacl_nonsfi) {
+    sources -= [
       "seccomp-bpf-helpers/baseline_policy.cc",
       "seccomp-bpf-helpers/baseline_policy.h",
       "seccomp-bpf-helpers/syscall_sets.cc",
diff --git a/sandbox/linux/bpf_dsl/bpf_dsl.h b/sandbox/linux/bpf_dsl/bpf_dsl.h
index 6f0dd4e..7f81344 100644
--- a/sandbox/linux/bpf_dsl/bpf_dsl.h
+++ b/sandbox/linux/bpf_dsl/bpf_dsl.h
@@ -76,11 +76,6 @@
 namespace sandbox {
 namespace bpf_dsl {
 
-template <typename T>
-class Caser;
-
-class Elser;
-
 // ResultExpr is an opaque reference to an immutable result expression tree.
 using ResultExpr = std::shared_ptr<const internal::ResultExprImpl>;
 
diff --git a/sandbox/linux/bpf_dsl/bpf_dsl_forward.h b/sandbox/linux/bpf_dsl/bpf_dsl_forward.h
index af1b48b..10477c9 100644
--- a/sandbox/linux/bpf_dsl/bpf_dsl_forward.h
+++ b/sandbox/linux/bpf_dsl/bpf_dsl_forward.h
@@ -24,6 +24,14 @@
 using ResultExpr = std::shared_ptr<const internal::ResultExprImpl>;
 using BoolExpr = std::shared_ptr<const internal::BoolExprImpl>;
 
+template <typename T>
+class Arg;
+
+class Elser;
+
+template <typename T>
+class Caser;
+
 }  // namespace bpf_dsl
 }  // namespace sandbox
 
diff --git a/sandbox/linux/bpf_dsl/bpf_dsl_impl.h b/sandbox/linux/bpf_dsl/bpf_dsl_impl.h
index f397321..35ff64f 100644
--- a/sandbox/linux/bpf_dsl/bpf_dsl_impl.h
+++ b/sandbox/linux/bpf_dsl/bpf_dsl_impl.h
@@ -13,6 +13,7 @@
 
 namespace sandbox {
 namespace bpf_dsl {
+class ErrorCode;
 class PolicyCompiler;
 
 namespace internal {
diff --git a/sandbox/linux/sandbox_linux.gypi b/sandbox/linux/sandbox_linux.gypi
new file mode 100644
index 0000000..e96ae9e
--- /dev/null
+++ b/sandbox/linux/sandbox_linux.gypi
@@ -0,0 +1,434 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'variables': {
+    'conditions': [
+      ['OS=="linux"', {
+        'compile_suid_client': 1,
+        'compile_credentials': 1,
+        'use_base_test_suite': 1,
+      }, {
+        'compile_suid_client': 0,
+        'compile_credentials': 0,
+        'use_base_test_suite': 0,
+      }],
+      ['OS=="linux" and (target_arch=="ia32" or target_arch=="x64" or '
+         'target_arch=="mipsel")', {
+        'compile_seccomp_bpf_demo': 1,
+      }, {
+        'compile_seccomp_bpf_demo': 0,
+      }],
+    ],
+  },
+  'target_defaults': {
+    'target_conditions': [
+      # All linux/ files will automatically be excluded on Android
+      # so make sure we re-include them explicitly.
+      ['OS == "android"', {
+        'sources/': [
+          ['include', '^linux/'],
+        ],
+      }],
+    ],
+  },
+  'targets': [
+    # We have two principal targets: sandbox and sandbox_linux_unittests
+    # All other targets are listed as dependencies.
+    # There is one notable exception: for historical reasons, chrome_sandbox is
+    # the setuid sandbox and is its own target.
+    {
+      'target_name': 'sandbox',
+      'type': 'none',
+      'dependencies': [
+        'sandbox_services',
+      ],
+      'conditions': [
+        [ 'compile_suid_client==1', {
+          'dependencies': [
+            'suid_sandbox_client',
+          ],
+        }],
+        # Compile seccomp BPF when we support it.
+        [ 'use_seccomp_bpf==1', {
+          'dependencies': [
+            'seccomp_bpf',
+            'seccomp_bpf_helpers',
+          ],
+        }],
+      ],
+    },
+    {
+      'target_name': 'sandbox_linux_test_utils',
+      'type': 'static_library',
+      'dependencies': [
+        '../testing/gtest.gyp:gtest',
+      ],
+      'include_dirs': [
+        '../..',
+      ],
+      'sources': [
+        'tests/sandbox_test_runner.cc',
+        'tests/sandbox_test_runner.h',
+        'tests/sandbox_test_runner_function_pointer.cc',
+        'tests/sandbox_test_runner_function_pointer.h',
+        'tests/test_utils.cc',
+        'tests/test_utils.h',
+        'tests/unit_tests.cc',
+        'tests/unit_tests.h',
+      ],
+      'conditions': [
+        [ 'use_seccomp_bpf==1', {
+          'sources': [
+            'seccomp-bpf/bpf_tester_compatibility_delegate.h',
+            'seccomp-bpf/bpf_tests.h',
+            'seccomp-bpf/sandbox_bpf_test_runner.cc',
+            'seccomp-bpf/sandbox_bpf_test_runner.h',
+          ],
+          'dependencies': [
+            'seccomp_bpf',
+          ]
+        }],
+        [ 'use_base_test_suite==1', {
+          'dependencies': [
+            '../base/base.gyp:test_support_base',
+          ],
+          'defines': [
+            'SANDBOX_USES_BASE_TEST_SUITE',
+          ],
+        }],
+      ],
+    },
+    {
+      # The main sandboxing test target.
+      'target_name': 'sandbox_linux_unittests',
+      'includes': [
+        'sandbox_linux_test_sources.gypi',
+      ],
+      'type': 'executable',
+      'conditions': [
+        [ 'OS == "android"', {
+          'variables': {
+            'test_type': 'gtest',
+            'test_suite_name': '<(_target_name)',
+          },
+          'includes': [
+            '../../build/android/test_runner.gypi',
+          ],
+        }]
+      ]
+    },
+    {
+      'target_name': 'seccomp_bpf',
+      'type': '<(component)',
+      'sources': [
+        'bpf_dsl/bpf_dsl.cc',
+        'bpf_dsl/bpf_dsl.h',
+        'bpf_dsl/bpf_dsl_forward.h',
+        'bpf_dsl/bpf_dsl_impl.h',
+        'bpf_dsl/codegen.cc',
+        'bpf_dsl/codegen.h',
+        'bpf_dsl/cons.h',
+        'bpf_dsl/errorcode.h',
+        'bpf_dsl/linux_syscall_ranges.h',
+        'bpf_dsl/policy.cc',
+        'bpf_dsl/policy.h',
+        'bpf_dsl/policy_compiler.cc',
+        'bpf_dsl/policy_compiler.h',
+        'bpf_dsl/seccomp_macros.h',
+        'bpf_dsl/seccomp_macros.h',
+        'bpf_dsl/syscall_set.cc',
+        'bpf_dsl/syscall_set.h',
+        'bpf_dsl/trap_registry.h',
+        'seccomp-bpf/die.cc',
+        'seccomp-bpf/die.h',
+        'seccomp-bpf/sandbox_bpf.cc',
+        'seccomp-bpf/sandbox_bpf.h',
+        'seccomp-bpf/syscall.cc',
+        'seccomp-bpf/syscall.h',
+        'seccomp-bpf/trap.cc',
+        'seccomp-bpf/trap.h',
+      ],
+      'dependencies': [
+        '../base/base.gyp:base',
+        'sandbox_services',
+        'sandbox_services_headers',
+      ],
+      'defines': [
+        'SANDBOX_IMPLEMENTATION',
+      ],
+      'includes': [
+        # Disable LTO due to compiler bug
+        # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=57703
+        '../../build/android/disable_gcc_lto.gypi',
+      ],
+      'include_dirs': [
+        '../..',
+      ],
+    },
+    {
+      'target_name': 'seccomp_bpf_helpers',
+      'type': '<(component)',
+      'sources': [
+        'seccomp-bpf-helpers/baseline_policy.cc',
+        'seccomp-bpf-helpers/baseline_policy.h',
+        'seccomp-bpf-helpers/sigsys_handlers.cc',
+        'seccomp-bpf-helpers/sigsys_handlers.h',
+        'seccomp-bpf-helpers/syscall_parameters_restrictions.cc',
+        'seccomp-bpf-helpers/syscall_parameters_restrictions.h',
+        'seccomp-bpf-helpers/syscall_sets.cc',
+        'seccomp-bpf-helpers/syscall_sets.h',
+      ],
+      'dependencies': [
+        '../base/base.gyp:base',
+        'sandbox_services',
+        'seccomp_bpf',
+      ],
+      'defines': [
+        'SANDBOX_IMPLEMENTATION',
+      ],
+      'include_dirs': [
+        '../..',
+      ],
+    },
+    {
+      # The setuid sandbox, for Linux
+      'target_name': 'chrome_sandbox',
+      'type': 'executable',
+      'sources': [
+        'suid/common/sandbox.h',
+        'suid/common/suid_unsafe_environment_variables.h',
+        'suid/process_util.h',
+        'suid/process_util_linux.c',
+        'suid/sandbox.c',
+      ],
+      'cflags': [
+        # For ULLONG_MAX
+        '-std=gnu99',
+      ],
+      'include_dirs': [
+        '../..',
+      ],
+      # Do not use any sanitizer tools with this binary. http://crbug.com/382766
+      'cflags/': [
+        ['exclude', '-fsanitize'],
+      ],
+      'ldflags/': [
+        ['exclude', '-fsanitize'],
+      ],
+    },
+    { 'target_name': 'sandbox_services',
+      'type': '<(component)',
+      'sources': [
+        'services/init_process_reaper.cc',
+        'services/init_process_reaper.h',
+        'services/proc_util.cc',
+        'services/proc_util.h',
+        'services/resource_limits.cc',
+        'services/resource_limits.h',
+        'services/scoped_process.cc',
+        'services/scoped_process.h',
+        'services/syscall_wrappers.cc',
+        'services/syscall_wrappers.h',
+        'services/thread_helpers.cc',
+        'services/thread_helpers.h',
+        'services/yama.cc',
+        'services/yama.h',
+        'syscall_broker/broker_channel.cc',
+        'syscall_broker/broker_channel.h',
+        'syscall_broker/broker_client.cc',
+        'syscall_broker/broker_client.h',
+        'syscall_broker/broker_common.h',
+        'syscall_broker/broker_file_permission.cc',
+        'syscall_broker/broker_file_permission.h',
+        'syscall_broker/broker_host.cc',
+        'syscall_broker/broker_host.h',
+        'syscall_broker/broker_policy.cc',
+        'syscall_broker/broker_policy.h',
+        'syscall_broker/broker_process.cc',
+        'syscall_broker/broker_process.h',
+      ],
+      'dependencies': [
+        '../base/base.gyp:base',
+      ],
+      'defines': [
+        'SANDBOX_IMPLEMENTATION',
+      ],
+      'conditions': [
+        ['compile_credentials==1', {
+          'sources': [
+            'services/credentials.cc',
+            'services/credentials.h',
+            'services/namespace_sandbox.cc',
+            'services/namespace_sandbox.h',
+            'services/namespace_utils.cc',
+            'services/namespace_utils.h',
+          ],
+          'dependencies': [
+            # for capability.h.
+            'sandbox_services_headers',
+          ],
+        }],
+      ],
+      'include_dirs': [
+        '..',
+      ],
+    },
+    { 'target_name': 'sandbox_services_headers',
+      'type': 'none',
+      'sources': [
+        'system_headers/arm64_linux_syscalls.h',
+        'system_headers/arm64_linux_ucontext.h',
+        'system_headers/arm_linux_syscalls.h',
+        'system_headers/arm_linux_ucontext.h',
+        'system_headers/capability.h',
+        'system_headers/i386_linux_ucontext.h',
+        'system_headers/linux_futex.h',
+        'system_headers/linux_seccomp.h',
+        'system_headers/linux_syscalls.h',
+        'system_headers/linux_time.h',
+        'system_headers/linux_ucontext.h',
+        'system_headers/mips_linux_syscalls.h',
+        'system_headers/mips_linux_ucontext.h',
+        'system_headers/x86_32_linux_syscalls.h',
+        'system_headers/x86_64_linux_syscalls.h',
+      ],
+      'include_dirs': [
+        '..',
+      ],
+    },
+    {
+      'target_name': 'suid_sandbox_client',
+      'type': '<(component)',
+      'sources': [
+        'suid/common/sandbox.h',
+        'suid/common/suid_unsafe_environment_variables.h',
+        'suid/client/setuid_sandbox_client.cc',
+        'suid/client/setuid_sandbox_client.h',
+        'suid/client/setuid_sandbox_host.cc',
+        'suid/client/setuid_sandbox_host.h',
+      ],
+      'defines': [
+        'SANDBOX_IMPLEMENTATION',
+      ],
+      'dependencies': [
+        '../base/base.gyp:base',
+        'sandbox_services',
+      ],
+      'include_dirs': [
+        '..',
+      ],
+    },
+    {
+      'target_name': 'bpf_dsl_golden',
+      'type': 'none',
+      'actions': [
+        {
+          'action_name': 'generate',
+          'inputs': [
+            'bpf_dsl/golden/generate.py',
+            'bpf_dsl/golden/i386/ArgSizePolicy.txt',
+            'bpf_dsl/golden/i386/BasicPolicy.txt',
+            'bpf_dsl/golden/i386/ElseIfPolicy.txt',
+            'bpf_dsl/golden/i386/MaskingPolicy.txt',
+            'bpf_dsl/golden/i386/MoreBooleanLogicPolicy.txt',
+            'bpf_dsl/golden/i386/NegativeConstantsPolicy.txt',
+            'bpf_dsl/golden/i386/SwitchPolicy.txt',
+            'bpf_dsl/golden/x86-64/ArgSizePolicy.txt',
+            'bpf_dsl/golden/x86-64/BasicPolicy.txt',
+            'bpf_dsl/golden/x86-64/BooleanLogicPolicy.txt',
+            'bpf_dsl/golden/x86-64/ElseIfPolicy.txt',
+            'bpf_dsl/golden/x86-64/MaskingPolicy.txt',
+            'bpf_dsl/golden/x86-64/MoreBooleanLogicPolicy.txt',
+            'bpf_dsl/golden/x86-64/NegativeConstantsPolicy.txt',
+            'bpf_dsl/golden/x86-64/SwitchPolicy.txt',
+          ],
+          'outputs': [
+            '<(SHARED_INTERMEDIATE_DIR)/sandbox/linux/bpf_dsl/golden/golden_files.h',
+          ],
+          'action': [
+            'python',
+            'linux/bpf_dsl/golden/generate.py',
+            '<(SHARED_INTERMEDIATE_DIR)/sandbox/linux/bpf_dsl/golden/golden_files.h',
+            'linux/bpf_dsl/golden/i386/ArgSizePolicy.txt',
+            'linux/bpf_dsl/golden/i386/BasicPolicy.txt',
+            'linux/bpf_dsl/golden/i386/ElseIfPolicy.txt',
+            'linux/bpf_dsl/golden/i386/MaskingPolicy.txt',
+            'linux/bpf_dsl/golden/i386/MoreBooleanLogicPolicy.txt',
+            'linux/bpf_dsl/golden/i386/NegativeConstantsPolicy.txt',
+            'linux/bpf_dsl/golden/i386/SwitchPolicy.txt',
+            'linux/bpf_dsl/golden/x86-64/ArgSizePolicy.txt',
+            'linux/bpf_dsl/golden/x86-64/BasicPolicy.txt',
+            'linux/bpf_dsl/golden/x86-64/BooleanLogicPolicy.txt',
+            'linux/bpf_dsl/golden/x86-64/ElseIfPolicy.txt',
+            'linux/bpf_dsl/golden/x86-64/MaskingPolicy.txt',
+            'linux/bpf_dsl/golden/x86-64/MoreBooleanLogicPolicy.txt',
+            'linux/bpf_dsl/golden/x86-64/NegativeConstantsPolicy.txt',
+            'linux/bpf_dsl/golden/x86-64/SwitchPolicy.txt',
+          ],
+          'message': 'Generating header from golden files ...',
+        },
+      ],
+    },
+  ],
+  'conditions': [
+    [ 'OS=="android"', {
+      'targets': [
+      {
+        'target_name': 'sandbox_linux_unittests_deps',
+        'type': 'none',
+        'dependencies': [
+          'sandbox_linux_unittests',
+        ],
+        'variables': {
+           'output_dir': '<(PRODUCT_DIR)/sandbox_linux_unittests__dist/',
+           'native_binary': '<(PRODUCT_DIR)/sandbox_linux_unittests',
+           'include_main_binary': 1,
+        },
+        'includes': [
+          '../../build/android/native_app_dependencies.gypi'
+        ],
+      }],
+    }],
+    [ 'OS=="android"', {
+      'conditions': [
+        ['test_isolation_mode != "noop"', {
+          'targets': [
+            {
+              'target_name': 'sandbox_linux_unittests_android_run',
+              'type': 'none',
+              'dependencies': [
+                'sandbox_linux_unittests',
+              ],
+              'includes': [
+                '../../build/isolate.gypi',
+              ],
+              'sources': [
+                '../sandbox_linux_unittests_android.isolate',
+              ],
+            },
+          ],
+        },
+      ],
+    ],
+    }],
+    ['test_isolation_mode != "noop"', {
+      'targets': [
+        {
+          'target_name': 'sandbox_linux_unittests_run',
+          'type': 'none',
+          'dependencies': [
+            'sandbox_linux_unittests',
+          ],
+          'includes': [
+            '../../build/isolate.gypi',
+          ],
+          'sources': [
+            '../sandbox_linux_unittests.isolate',
+          ],
+        },
+      ],
+    }],
+  ],
+}
diff --git a/sandbox/linux/sandbox_linux_nacl_nonsfi.gyp b/sandbox/linux/sandbox_linux_nacl_nonsfi.gyp
new file mode 100644
index 0000000..50e637c
--- /dev/null
+++ b/sandbox/linux/sandbox_linux_nacl_nonsfi.gyp
@@ -0,0 +1,87 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'variables': {
+    'chromium_code': 1,
+  },
+  'includes': [
+    '../../build/common_untrusted.gypi',
+  ],
+  'conditions': [
+    ['disable_nacl==0 and disable_nacl_untrusted==0', {
+      'targets': [
+        {
+          'target_name': 'sandbox_linux_nacl_nonsfi',
+          'type': 'none',
+          'variables': {
+            'nacl_untrusted_build': 1,
+            'nlib_target': 'libsandbox_linux_nacl_nonsfi.a',
+            'build_glibc': 0,
+            'build_newlib': 0,
+            'build_irt': 0,
+            'build_pnacl_newlib': 0,
+            'build_nonsfi_helper': 1,
+            'compile_flags': [
+              '-fgnu-inline-asm',
+            ],
+            'sources': [
+              # This is the subset of linux build target, needed for
+              # nacl_helper_nonsfi's sandbox implementation.
+              'bpf_dsl/bpf_dsl.cc',
+              'bpf_dsl/codegen.cc',
+              'bpf_dsl/policy.cc',
+              'bpf_dsl/policy_compiler.cc',
+              'bpf_dsl/syscall_set.cc',
+              'seccomp-bpf-helpers/sigsys_handlers.cc',
+              'seccomp-bpf-helpers/syscall_parameters_restrictions.cc',
+              'seccomp-bpf/die.cc',
+              'seccomp-bpf/sandbox_bpf.cc',
+              'seccomp-bpf/syscall.cc',
+              'seccomp-bpf/trap.cc',
+              'services/credentials.cc',
+              'services/namespace_sandbox.cc',
+              'services/namespace_utils.cc',
+              'services/proc_util.cc',
+              'services/resource_limits.cc',
+              'services/syscall_wrappers.cc',
+              'services/thread_helpers.cc',
+              'suid/client/setuid_sandbox_client.cc',
+            ],
+          },
+          'dependencies': [
+            '../../base/base_nacl.gyp:base_nacl_nonsfi',
+          ],
+        },
+      ],
+    }],
+
+    ['disable_nacl==0 and disable_nacl_untrusted==0 and enable_nacl_nonsfi_test==1', {
+      'targets': [
+        {
+          'target_name': 'sandbox_linux_test_utils_nacl_nonsfi',
+          'type': 'none',
+          'variables': {
+            'nacl_untrusted_build': 1,
+            'nlib_target': 'libsandbox_linux_test_utils_nacl_nonsfi.a',
+            'build_glibc': 0,
+            'build_newlib': 0,
+            'build_irt': 0,
+            'build_pnacl_newlib': 0,
+            'build_nonsfi_helper': 1,
+
+            'sources': [
+              'seccomp-bpf/sandbox_bpf_test_runner.cc',
+              'tests/sandbox_test_runner.cc',
+              'tests/unit_tests.cc',
+            ],
+          },
+          'dependencies': [
+            '../../testing/gtest_nacl.gyp:gtest_nacl',
+          ],
+        },
+      ],
+    }],
+  ],
+}
diff --git a/sandbox/linux/sandbox_linux_test_sources.gypi b/sandbox/linux/sandbox_linux_test_sources.gypi
new file mode 100644
index 0000000..612814e
--- /dev/null
+++ b/sandbox/linux/sandbox_linux_test_sources.gypi
@@ -0,0 +1,93 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Tests need to be compiled in the same link unit, so we have to list them
+# in a separate .gypi file.
+{
+  'dependencies': [
+    'sandbox',
+    'sandbox_linux_test_utils',
+    'sandbox_services',
+    '../base/base.gyp:base',
+    '../testing/gtest.gyp:gtest',
+  ],
+  'include_dirs': [
+    '../..',
+  ],
+  'sources': [
+    'services/proc_util_unittest.cc',
+    'services/scoped_process_unittest.cc',
+    'services/resource_limits_unittests.cc',
+    'services/syscall_wrappers_unittest.cc',
+    'services/thread_helpers_unittests.cc',
+    'services/yama_unittests.cc',
+    'syscall_broker/broker_file_permission_unittest.cc',
+    'syscall_broker/broker_process_unittest.cc',
+    'tests/main.cc',
+    'tests/scoped_temporary_file.cc',
+    'tests/scoped_temporary_file.h',
+    'tests/scoped_temporary_file_unittest.cc',
+    'tests/test_utils_unittest.cc',
+    'tests/unit_tests_unittest.cc',
+  ],
+  'conditions': [
+    [ 'compile_suid_client==1', {
+      'sources': [
+        'suid/client/setuid_sandbox_client_unittest.cc',
+        'suid/client/setuid_sandbox_host_unittest.cc',
+      ],
+    }],
+    [ 'use_seccomp_bpf==1', {
+      'sources': [
+        'bpf_dsl/bpf_dsl_unittest.cc',
+        'bpf_dsl/codegen_unittest.cc',
+        'bpf_dsl/cons_unittest.cc',
+        'bpf_dsl/dump_bpf.cc',
+        'bpf_dsl/dump_bpf.h',
+        'bpf_dsl/syscall_set_unittest.cc',
+        'bpf_dsl/test_trap_registry.cc',
+        'bpf_dsl/test_trap_registry.h',
+        'bpf_dsl/test_trap_registry_unittest.cc',
+        'bpf_dsl/verifier.cc',
+        'bpf_dsl/verifier.h',
+        'integration_tests/bpf_dsl_seccomp_unittest.cc',
+        'integration_tests/seccomp_broker_process_unittest.cc',
+        'seccomp-bpf-helpers/baseline_policy_unittest.cc',
+        'seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc',
+        'seccomp-bpf/bpf_tests_unittest.cc',
+        'seccomp-bpf/sandbox_bpf_unittest.cc',
+        'seccomp-bpf/syscall_unittest.cc',
+        'seccomp-bpf/trap_unittest.cc',
+      ],
+      'dependencies': [
+        'bpf_dsl_golden',
+      ],
+    }],
+    [ 'compile_credentials==1', {
+      'sources': [
+        'integration_tests/namespace_unix_domain_socket_unittest.cc',
+        'services/credentials_unittest.cc',
+        'services/namespace_utils_unittest.cc',
+      ],
+      'dependencies': [
+        '../build/linux/system.gyp:libcap'
+      ],
+      'conditions': [
+        [ 'use_base_test_suite==1', {
+          'sources': [
+            'services/namespace_sandbox_unittest.cc',
+          ]
+        }]
+      ],
+    }],
+    [ 'use_base_test_suite==1', {
+      'dependencies': [
+        '../base/base.gyp:test_support_base',
+      ],
+      'defines': [
+        'SANDBOX_USES_BASE_TEST_SUITE',
+      ],
+    }],
+  ],
+}
diff --git a/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc b/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc
index 88a9326..2bf572c 100644
--- a/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc
+++ b/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc
@@ -169,20 +169,10 @@
   if (sysno == __NR_getpriority || sysno ==__NR_setpriority)
     return RestrictGetSetpriority(current_pid);
 
-  if (sysno == __NR_getrandom) {
-    return RestrictGetRandom();
-  }
-
   if (sysno == __NR_madvise) {
-    // Only allow MADV_DONTNEED and MADV_FREE.
+    // Only allow MADV_DONTNEED (aka MADV_FREE).
     const Arg<int> advice(2);
-    return If(AnyOf(advice == MADV_DONTNEED
-#if defined(MADV_FREE)
-                    // MADV_FREE was introduced in Linux 4.5 and started being
-                    // defined in glibc 2.24.
-                    , advice == MADV_FREE
-#endif
-                    ), Allow()).Else(Error(EPERM));
+    return If(advice == MADV_DONTNEED, Allow()).Else(Error(EPERM));
   }
 
 #if defined(__i386__) || defined(__x86_64__) || defined(__mips__) || \
diff --git a/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc b/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc
index ca812d8..f0392b1 100644
--- a/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc
+++ b/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc
@@ -168,21 +168,6 @@
   TestPipeOrSocketPair(base::ScopedFD(sv[0]), base::ScopedFD(sv[1]));
 }
 
-#if !defined(GRND_NONBLOCK)
-#define GRND_NONBLOCK 1
-#endif
-
-BPF_TEST_C(BaselinePolicy, GetRandom, BaselinePolicy) {
-  char buf[1];
-
-  // Many systems do not yet support getrandom(2) so ENOSYS is a valid result
-  // here.
-  int ret = HANDLE_EINTR(syscall(__NR_getrandom, buf, sizeof(buf), 0));
-  BPF_ASSERT((ret == -1 && errno == ENOSYS) || ret == 1);
-  ret = HANDLE_EINTR(syscall(__NR_getrandom, buf, sizeof(buf), GRND_NONBLOCK));
-  BPF_ASSERT((ret == -1 && (errno == ENOSYS || errno == EAGAIN)) || ret == 1);
-}
-
 // Not all architectures can restrict the domain for socketpair().
 #if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
 BPF_DEATH_TEST_C(BaselinePolicy,
@@ -364,17 +349,6 @@
   clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
 }
 
-#if !defined(GRND_RANDOM)
-#define GRND_RANDOM 2
-#endif
-
-BPF_DEATH_TEST_C(BaselinePolicy,
-                 GetRandomOfDevRandomCrashes,
-                 DEATH_SEGV_MESSAGE(sandbox::GetErrorMessageContentForTests()),
-                 BaselinePolicy) {
-  syscall(__NR_getrandom, NULL, 0, GRND_RANDOM);
-}
-
 #if !defined(__i386__)
 BPF_DEATH_TEST_C(BaselinePolicy,
                  GetSockOptWrongLevelSigsys,
diff --git a/sandbox/linux/seccomp-bpf-helpers/sigsys_handlers.cc b/sandbox/linux/seccomp-bpf-helpers/sigsys_handlers.cc
index e6c64de..077bc61 100644
--- a/sandbox/linux/seccomp-bpf-helpers/sigsys_handlers.cc
+++ b/sandbox/linux/seccomp-bpf-helpers/sigsys_handlers.cc
@@ -11,7 +11,6 @@
 #include <sys/syscall.h>
 #include <unistd.h>
 
-#include "base/debug/crash_logging.h"
 #include "base/logging.h"
 #include "base/posix/eintr_wrapper.h"
 #include "build/build_config.h"
@@ -50,8 +49,7 @@
   while (size > 0) {
     // TODO(jln): query the current policy to check if send() is available and
     // use it to perform a non-blocking write.
-    const int ret = HANDLE_EINTR(
-        sandbox::sys_write(STDERR_FILENO, error_message, size));
+    const int ret = HANDLE_EINTR(write(STDERR_FILENO, error_message, size));
     // We can't handle any type of error here.
     if (ret <= 0 || static_cast<size_t>(ret) > size) break;
     size -= ret;
@@ -94,7 +92,6 @@
     rem /= 10;
     sysno_base10[i] = '0' + mod;
   }
-
 #if defined(__mips__) && (_MIPS_SIM == _MIPS_SIM_ABI32)
   static const char kSeccompErrorPrefix[] = __FILE__
       ":**CRASHING**:" SECCOMP_MESSAGE_COMMON_CONTENT " in syscall 4000 + ";
@@ -108,88 +105,7 @@
   WriteToStdErr(kSeccompErrorPostfix, sizeof(kSeccompErrorPostfix) - 1);
 }
 
-// Helper to convert a number of type T to a hexadecimal string using
-// stack-allocated storage.
-template <typename T>
-class NumberToHex {
- public:
-  explicit NumberToHex(T value) {
-    static const char kHexChars[] = "0123456789abcdef";
-
-    memset(str_, '0', sizeof(str_));
-    str_[1] = 'x';
-    str_[sizeof(str_) - 1] = '\0';
-
-    T rem = value;
-    T mod = 0;
-    for (size_t i = sizeof(str_) - 2; i >= 2; --i) {
-      mod = rem % 16;
-      rem /= 16;
-      str_[i] = kHexChars[mod];
-    }
-  }
-
-  const char* str() const { return str_; }
-
-  static size_t length() { return sizeof(str_) - 1; }
-
- private:
-  // HEX uses two characters per byte, with a leading '0x', and a trailing NUL.
-  char str_[sizeof(T) * 2 + 3];
-};
-
-// Records the syscall number and first four arguments in a crash key, to help
-// debug the failure.
-void SetSeccompCrashKey(const struct sandbox::arch_seccomp_data& args) {
-#if !defined(OS_NACL_NONSFI)
-  NumberToHex<int> nr(args.nr);
-  NumberToHex<uint64_t> arg1(args.args[0]);
-  NumberToHex<uint64_t> arg2(args.args[1]);
-  NumberToHex<uint64_t> arg3(args.args[2]);
-  NumberToHex<uint64_t> arg4(args.args[3]);
-
-  // In order to avoid calling into libc sprintf functions from an unsafe signal
-  // context, manually construct the crash key string.
-  const char* const prefixes[] = {
-    "nr=",
-    " arg1=",
-    " arg2=",
-    " arg3=",
-    " arg4=",
-  };
-  const char* const values[] = {
-    nr.str(),
-    arg1.str(),
-    arg2.str(),
-    arg3.str(),
-    arg4.str(),
-  };
-
-  size_t crash_key_length = nr.length() + arg1.length() + arg2.length() +
-                            arg3.length() + arg4.length();
-  for (auto* prefix : prefixes) {
-    crash_key_length += strlen(prefix);
-  }
-  ++crash_key_length;  // For the trailing NUL byte.
-
-  char crash_key[crash_key_length];
-  memset(crash_key, '\0', crash_key_length);
-
-  size_t offset = 0;
-  for (size_t i = 0; i < arraysize(values); ++i) {
-    const char* strings[2] = { prefixes[i], values[i] };
-    for (auto* string : strings) {
-      size_t string_len = strlen(string);
-      memmove(&crash_key[offset], string, string_len);
-      offset += string_len;
-    }
-  }
-
-  base::debug::SetCrashKeyValue("seccomp-sigsys", crash_key);
-#endif
-}
-
-}  // namespace
+}  // namespace.
 
 namespace sandbox {
 
@@ -197,7 +113,6 @@
   uint32_t syscall = SyscallNumberToOffsetFromBase(args.nr);
 
   PrintSyscallError(syscall);
-  SetSeccompCrashKey(args);
 
   // Encode 8-bits of the 1st two arguments too, so we can discern which socket
   // type, which fcntl, ... etc., without being likely to hit a mapped
@@ -225,7 +140,6 @@
   static const char kSeccompCloneError[] =
       __FILE__":**CRASHING**:" SECCOMP_MESSAGE_CLONE_CONTENT "\n";
   WriteToStdErr(kSeccompCloneError, sizeof(kSeccompCloneError) - 1);
-  SetSeccompCrashKey(args);
   // "flags" is the first argument in the kernel's clone().
   // Mark as volatile to be able to find the value on the stack in a minidump.
   volatile uint64_t clone_flags = args.args[0];
@@ -246,7 +160,6 @@
   static const char kSeccompPrctlError[] =
       __FILE__":**CRASHING**:" SECCOMP_MESSAGE_PRCTL_CONTENT "\n";
   WriteToStdErr(kSeccompPrctlError, sizeof(kSeccompPrctlError) - 1);
-  SetSeccompCrashKey(args);
   // Mark as volatile to be able to find the value on the stack in a minidump.
   volatile uint64_t option = args.args[0];
   volatile char* addr =
@@ -261,7 +174,6 @@
   static const char kSeccompIoctlError[] =
       __FILE__":**CRASHING**:" SECCOMP_MESSAGE_IOCTL_CONTENT "\n";
   WriteToStdErr(kSeccompIoctlError, sizeof(kSeccompIoctlError) - 1);
-  SetSeccompCrashKey(args);
   // Make "request" volatile so that we can see it on the stack in a minidump.
   volatile uint64_t request = args.args[1];
   volatile char* addr = reinterpret_cast<volatile char*>(request & 0xFFFF);
@@ -278,7 +190,6 @@
    static const char kSeccompKillError[] =
       __FILE__":**CRASHING**:" SECCOMP_MESSAGE_KILL_CONTENT "\n";
   WriteToStdErr(kSeccompKillError, sizeof(kSeccompKillError) - 1);
-  SetSeccompCrashKey(args);
   // Make "pid" volatile so that we can see it on the stack in a minidump.
   volatile uint64_t my_pid = sys_getpid();
   volatile char* addr = reinterpret_cast<volatile char*>(my_pid & 0xFFF);
@@ -292,7 +203,6 @@
   static const char kSeccompFutexError[] =
       __FILE__ ":**CRASHING**:" SECCOMP_MESSAGE_FUTEX_CONTENT "\n";
   WriteToStdErr(kSeccompFutexError, sizeof(kSeccompFutexError) - 1);
-  SetSeccompCrashKey(args);
   volatile int futex_op = args.args[1];
   volatile char* addr = reinterpret_cast<volatile char*>(futex_op & 0xFFF);
   *addr = '\0';
diff --git a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc b/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
index 061bfb4..56c4cb3 100644
--- a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
+++ b/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
@@ -351,22 +351,4 @@
       .Default(CrashSIGSYS());
 }
 
-#if !defined(GRND_NONBLOCK)
-#define GRND_NONBLOCK 1
-#endif
-
-ResultExpr RestrictGetRandom() {
-  const Arg<unsigned int> flags(2);
-  const unsigned int kGoodFlags = GRND_NONBLOCK;
-  return If((flags & ~kGoodFlags) == 0, Allow()).Else(CrashSIGSYS());
-}
-
-ResultExpr RestrictPrlimitToGetrlimit(pid_t target_pid) {
-  const Arg<pid_t> pid(0);
-  const Arg<uintptr_t> new_limit(2);
-  // Only allow 'get' operations, and only for the current process.
-  return If(AllOf(new_limit == 0, AnyOf(pid == 0, pid == target_pid)), Allow())
-      .Else(Error(EPERM));
-}
-
 }  // namespace sandbox.
diff --git a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.h b/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.h
index c4577dc..b96fe20 100644
--- a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.h
+++ b/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.h
@@ -94,15 +94,6 @@
 // about the state of the host OS.
 SANDBOX_EXPORT bpf_dsl::ResultExpr RestrictClockID();
 
-// Restrict the flags argument to getrandom() to allow only no flags, or
-// GRND_NONBLOCK.
-SANDBOX_EXPORT bpf_dsl::ResultExpr RestrictGetRandom();
-
-// Restrict |new_limit| to NULL, and |pid| to the calling process (or 0) for
-// prlimit64().  This allows only getting rlimits on the current process.
-// Otherwise, fail gracefully; see crbug.com/160157.
-SANDBOX_EXPORT bpf_dsl::ResultExpr RestrictPrlimitToGetrlimit(pid_t target_pid);
-
 }  // namespace sandbox.
 
 #endif  // SANDBOX_LINUX_SECCOMP_BPF_HELPERS_SYSCALL_PARAMETERS_RESTRICTIONS_H_
diff --git a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc b/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc
index c068cd2..804a8fe 100644
--- a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc
+++ b/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc
@@ -13,7 +13,6 @@
 #include <unistd.h>
 
 #include "base/bind.h"
-#include "base/single_thread_task_runner.h"
 #include "base/synchronization/waitable_event.h"
 #include "base/sys_info.h"
 #include "base/threading/thread.h"
@@ -165,7 +164,7 @@
   // different.
   base::Thread getparam_thread("sched_getparam_thread");
   BPF_ASSERT(getparam_thread.Start());
-  getparam_thread.task_runner()->PostTask(
+  getparam_thread.message_loop()->PostTask(
       FROM_HERE, base::Bind(&SchedGetParamThread, &thread_run));
   BPF_ASSERT(thread_run.TimedWait(base::TimeDelta::FromMilliseconds(5000)));
   getparam_thread.Stop();
diff --git a/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc b/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc
index 1d9f95c..c217d47 100644
--- a/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc
+++ b/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc
@@ -120,7 +120,9 @@
 #if defined(__i386__) || defined(__arm__) || defined(__mips__)
     case __NR_lstat64:
 #endif
+#if defined(__i386__) || defined(__arm__) || defined(__x86_64__)
     case __NR_memfd_create:
+#endif
     case __NR_mkdirat:
     case __NR_mknodat:
 #if defined(__i386__)
@@ -412,7 +414,6 @@
     case __NR_epoll_create:
     case __NR_epoll_wait:
 #endif
-    case __NR_epoll_pwait:
     case __NR_epoll_create1:
     case __NR_epoll_ctl:
       return true;
@@ -420,6 +421,7 @@
 #if defined(__x86_64__)
     case __NR_epoll_ctl_old:
 #endif
+    case __NR_epoll_pwait:
 #if defined(__x86_64__)
     case __NR_epoll_wait_old:
 #endif
@@ -548,7 +550,7 @@
 #if defined(__i386__) || defined(__arm__) || defined(__mips__)
     case __NR__newselect:
 #endif
-#if defined(__arm__) || defined(__mips__)
+#if defined(__arm__)
     case __NR_send:
 #endif
 #if defined(__x86_64__) || defined(__arm__) || defined(__mips__) || \
diff --git a/sandbox/linux/services/credentials.cc b/sandbox/linux/services/credentials.cc
index 50a109e..0c617d4 100644
--- a/sandbox/linux/services/credentials.cc
+++ b/sandbox/linux/services/credentials.cc
@@ -150,18 +150,6 @@
   return 0;
 }
 
-void SetGidAndUidMaps(gid_t gid, uid_t uid) {
-  if (NamespaceUtils::KernelSupportsDenySetgroups()) {
-    PCHECK(NamespaceUtils::DenySetgroups());
-  }
-  DCHECK(GetRESIds(NULL, NULL));
-  const char kGidMapFile[] = "/proc/self/gid_map";
-  const char kUidMapFile[] = "/proc/self/uid_map";
-  PCHECK(NamespaceUtils::WriteToIdMapFile(kGidMapFile, gid));
-  PCHECK(NamespaceUtils::WriteToIdMapFile(kUidMapFile, uid));
-  DCHECK(GetRESIds(NULL, NULL));
-}
-
 }  // namespace.
 
 // static
@@ -265,14 +253,8 @@
   return false;
 #endif
 
-  uid_t uid;
-  gid_t gid;
-  if (!GetRESIds(&uid, &gid)) {
-    return false;
-  }
-
-  const pid_t pid =
-      base::ForkWithFlags(CLONE_NEWUSER | SIGCHLD, nullptr, nullptr);
+  // This is roughly a fork().
+  const pid_t pid = sys_clone(CLONE_NEWUSER | SIGCHLD, 0, 0, 0, 0);
 
   if (pid == -1) {
     CheckCloneNewUserErrno(errno);
@@ -280,28 +262,20 @@
   }
 
   // The parent process could have had threads. In the child, these threads
-  // have disappeared.
+  // have disappeared. Make sure to not do anything in the child, as this is a
+  // fragile execution environment.
   if (pid == 0) {
-    // unshare() requires the effective uid and gid to have a mapping in the
-    // parent namespace.
-    SetGidAndUidMaps(gid, uid);
-
-    // Make sure we drop CAP_SYS_ADMIN.
-    CHECK(sandbox::Credentials::DropAllCapabilities());
-
-    // Ensure we have unprivileged use of CLONE_NEWUSER.  Debian
-    // Jessie explicitly forbids this case.  See:
-    // add-sysctl-to-disallow-unprivileged-CLONE_NEWUSER-by-default.patch
-    _exit(!!sys_unshare(CLONE_NEWUSER));
+    _exit(kExitSuccess);
   }
 
   // Always reap the child.
   int status = -1;
   PCHECK(HANDLE_EINTR(waitpid(pid, &status, 0)) == pid);
+  CHECK(WIFEXITED(status));
+  CHECK_EQ(kExitSuccess, WEXITSTATUS(status));
 
-  // clone(2) succeeded.  Now return true only if the system grants
-  // unprivileged use of CLONE_NEWUSER as well.
-  return WIFEXITED(status) && WEXITSTATUS(status) == kExitSuccess;
+  // clone(2) succeeded, we can use CLONE_NEWUSER.
+  return true;
 }
 
 bool Credentials::MoveToNewUserNS() {
@@ -322,9 +296,18 @@
     return false;
   }
 
+  if (NamespaceUtils::KernelSupportsDenySetgroups()) {
+    PCHECK(NamespaceUtils::DenySetgroups());
+  }
+
   // The current {r,e,s}{u,g}id is now an overflow id (c.f.
   // /proc/sys/kernel/overflowuid). Setup the uid and gid maps.
-  SetGidAndUidMaps(gid, uid);
+  DCHECK(GetRESIds(NULL, NULL));
+  const char kGidMapFile[] = "/proc/self/gid_map";
+  const char kUidMapFile[] = "/proc/self/uid_map";
+  PCHECK(NamespaceUtils::WriteToIdMapFile(kGidMapFile, gid));
+  PCHECK(NamespaceUtils::WriteToIdMapFile(kUidMapFile, uid));
+  DCHECK(GetRESIds(NULL, NULL));
   return true;
 }
 
@@ -332,16 +315,12 @@
   CHECK_LE(0, proc_fd);
 
   CHECK(ChrootToSafeEmptyDir());
-  CHECK(!HasFileSystemAccess());
+  CHECK(!base::DirectoryExists(base::FilePath("/proc")));
   CHECK(!ProcUtil::HasOpenDirectory(proc_fd));
   // We never let this function fail.
   return true;
 }
 
-bool Credentials::HasFileSystemAccess() {
-  return base::DirectoryExists(base::FilePath("/proc"));
-}
-
 pid_t Credentials::ForkAndDropCapabilitiesInChild() {
   pid_t pid = fork();
   if (pid != 0) {
diff --git a/sandbox/linux/services/credentials.h b/sandbox/linux/services/credentials.h
index 157c8e7..b89a6aa 100644
--- a/sandbox/linux/services/credentials.h
+++ b/sandbox/linux/services/credentials.h
@@ -94,9 +94,6 @@
   //   - DropAllCapabilities() must be called to prevent escapes.
   static bool DropFileSystemAccess(int proc_fd) WARN_UNUSED_RESULT;
 
-  // This function returns true if the process can still access the filesystem.
-  static bool HasFileSystemAccess();
-
   // Forks and drops capabilities in the child.
   static pid_t ForkAndDropCapabilitiesInChild();
 
diff --git a/sandbox/linux/services/credentials_unittest.cc b/sandbox/linux/services/credentials_unittest.cc
index 41c04bb..b95ba0b 100644
--- a/sandbox/linux/services/credentials_unittest.cc
+++ b/sandbox/linux/services/credentials_unittest.cc
@@ -145,12 +145,11 @@
 
 // Disabled on ASAN because of crbug.com/451603.
 SANDBOX_TEST(Credentials, DISABLE_ON_ASAN(DropFileSystemAccessIsSafe)) {
-  CHECK(Credentials::HasFileSystemAccess());
   CHECK(Credentials::DropAllCapabilities());
   // Probably missing kernel support.
   if (!Credentials::MoveToNewUserNS()) return;
   CHECK(Credentials::DropFileSystemAccess(ProcUtil::OpenProc().get()));
-  CHECK(!Credentials::HasFileSystemAccess());
+  CHECK(!base::DirectoryExists(base::FilePath("/proc")));
   CHECK(WorkingDirectoryIsRoot());
   CHECK(base::IsDirectoryEmpty(base::FilePath("/")));
   // We want the chroot to never have a subdirectory. A subdirectory
@@ -246,19 +245,18 @@
   signal_handler_called = 1;
 }
 
-// glibc (and some other libcs) caches the PID and TID in TLS. This test
-// verifies that these values are correct after DropFilesystemAccess.
 // Disabled on ASAN because of crbug.com/451603.
 SANDBOX_TEST(Credentials, DISABLE_ON_ASAN(DropFileSystemAccessPreservesTLS)) {
   // Probably missing kernel support.
   if (!Credentials::MoveToNewUserNS()) return;
   CHECK(Credentials::DropFileSystemAccess(ProcUtil::OpenProc().get()));
 
-  // The libc getpid implementation may return a cached PID. Ensure that
-  // it matches the PID returned from the getpid system call.
-  CHECK_EQ(sys_getpid(), getpid());
+  // In glibc, pthread_getattr_np makes an assertion about the cached PID/TID in
+  // TLS.
+  pthread_attr_t attr;
+  EXPECT_EQ(0, pthread_getattr_np(pthread_self(), &attr));
 
-  // raise uses the cached TID in glibc.
+  // raise also uses the cached TID in glibc.
   struct sigaction action = {};
   action.sa_handler = &SignalHandler;
   PCHECK(sigaction(SIGUSR1, &action, nullptr) == 0);
diff --git a/sandbox/linux/services/syscall_wrappers.cc b/sandbox/linux/services/syscall_wrappers.cc
index 9c7727c..7132d2a 100644
--- a/sandbox/linux/services/syscall_wrappers.cc
+++ b/sandbox/linux/services/syscall_wrappers.cc
@@ -32,10 +32,6 @@
   return syscall(__NR_gettid);
 }
 
-ssize_t sys_write(int fd, const char* buffer, size_t buffer_size) {
-  return syscall(__NR_write, fd, buffer, buffer_size);
-}
-
 long sys_clone(unsigned long flags,
                std::nullptr_t child_stack,
                pid_t* ptid,
diff --git a/sandbox/linux/services/syscall_wrappers.h b/sandbox/linux/services/syscall_wrappers.h
index 1975bfb..057e4c8 100644
--- a/sandbox/linux/services/syscall_wrappers.h
+++ b/sandbox/linux/services/syscall_wrappers.h
@@ -28,10 +28,6 @@
 
 SANDBOX_EXPORT pid_t sys_gettid(void);
 
-SANDBOX_EXPORT ssize_t sys_write(int fd,
-                                 const char* buffer,
-                                 size_t buffer_size);
-
 SANDBOX_EXPORT long sys_clone(unsigned long flags);
 
 // |regs| is not supported and must be passed as nullptr. |child_stack| must be
diff --git a/sandbox/linux/suid/process_util_linux.c b/sandbox/linux/suid/process_util_linux.c
index d28d576..40949bd 100644
--- a/sandbox/linux/suid/process_util_linux.c
+++ b/sandbox/linux/suid/process_util_linux.c
@@ -59,7 +59,6 @@
     fd = openat(dirfd, "oom_adj", O_WRONLY);
     if (fd < 0) {
       // Nope, that doesn't work either.
-      close(dirfd);
       return false;
     } else {
       // If we're using the old oom_adj file, the allowed range is now
diff --git a/sandbox/linux/syscall_broker/broker_file_permission.h b/sandbox/linux/syscall_broker/broker_file_permission.h
index ddc62d5..03300d1 100644
--- a/sandbox/linux/syscall_broker/broker_file_permission.h
+++ b/sandbox/linux/syscall_broker/broker_file_permission.h
@@ -61,7 +61,7 @@
   // or a pointer the matched path in the whitelist if an absolute
   // match.
   // If not NULL |unlink_after_open| is set to point to true if the
-  // caller should unlink the path after opening.
+  // caller should unlink the path after openning.
   // Async signal safe if |file_to_open| is NULL.
   bool CheckOpen(const char* requested_filename,
                  int flags,
diff --git a/sandbox/linux/syscall_broker/broker_file_permission_unittest.cc b/sandbox/linux/syscall_broker/broker_file_permission_unittest.cc
index 8384077..b58a901 100644
--- a/sandbox/linux/syscall_broker/broker_file_permission_unittest.cc
+++ b/sandbox/linux/syscall_broker/broker_file_permission_unittest.cc
@@ -46,19 +46,10 @@
   BrokerFilePermission perm = BrokerFilePermission::ReadOnlyRecursive(kPath);
 }
 
-// In official builds, CHECK(x) causes a SIGTRAP on the architectures where the
-// sanbox is enabled (that are x86, x86_64, arm64 and 32-bit arm processes
-// running on a arm64 kernel).
-#if defined(OFFICIAL_BUILD)
-#define DEATH_BY_CHECK(msg) DEATH_BY_SIGNAL(SIGTRAP)
-#else
-#define DEATH_BY_CHECK(msg) DEATH_MESSAGE(msg)
-#endif
-
 SANDBOX_DEATH_TEST(
     BrokerFilePermission,
     CreateBad,
-    DEATH_BY_CHECK(BrokerFilePermissionTester::GetErrorMessage())) {
+    DEATH_MESSAGE(BrokerFilePermissionTester::GetErrorMessage())) {
   const char kPath[] = "/tmp/bad/";
   BrokerFilePermission perm = BrokerFilePermission::ReadOnly(kPath);
 }
@@ -66,7 +57,7 @@
 SANDBOX_DEATH_TEST(
     BrokerFilePermission,
     CreateBadRecursive,
-    DEATH_BY_CHECK(BrokerFilePermissionTester::GetErrorMessage())) {
+    DEATH_MESSAGE(BrokerFilePermissionTester::GetErrorMessage())) {
   const char kPath[] = "/tmp/bad";
   BrokerFilePermission perm = BrokerFilePermission::ReadOnlyRecursive(kPath);
 }
@@ -74,7 +65,7 @@
 SANDBOX_DEATH_TEST(
     BrokerFilePermission,
     CreateBadNotAbs,
-    DEATH_BY_CHECK(BrokerFilePermissionTester::GetErrorMessage())) {
+    DEATH_MESSAGE(BrokerFilePermissionTester::GetErrorMessage())) {
   const char kPath[] = "tmp/bad";
   BrokerFilePermission perm = BrokerFilePermission::ReadOnly(kPath);
 }
@@ -82,7 +73,7 @@
 SANDBOX_DEATH_TEST(
     BrokerFilePermission,
     CreateBadEmpty,
-    DEATH_BY_CHECK(BrokerFilePermissionTester::GetErrorMessage())) {
+    DEATH_MESSAGE(BrokerFilePermissionTester::GetErrorMessage())) {
   const char kPath[] = "";
   BrokerFilePermission perm = BrokerFilePermission::ReadOnly(kPath);
 }
diff --git a/sandbox/linux/system_headers/arm64_linux_syscalls.h b/sandbox/linux/system_headers/arm64_linux_syscalls.h
index 59d0eab..8acb2d1 100644
--- a/sandbox/linux/system_headers/arm64_linux_syscalls.h
+++ b/sandbox/linux/system_headers/arm64_linux_syscalls.h
@@ -1059,8 +1059,4 @@
 #define __NR_getrandom 278
 #endif
 
-#if !defined(__NR_memfd_create)
-#define __NR_memfd_create 279
-#endif
-
 #endif  // SANDBOX_LINUX_SYSTEM_HEADERS_ARM64_LINUX_SYSCALLS_H_
diff --git a/sandbox/linux/system_headers/mips64_linux_syscalls.h b/sandbox/linux/system_headers/mips64_linux_syscalls.h
index 90f3d1b..d003124 100644
--- a/sandbox/linux/system_headers/mips64_linux_syscalls.h
+++ b/sandbox/linux/system_headers/mips64_linux_syscalls.h
@@ -1263,12 +1263,4 @@
 #define __NR_seccomp (__NR_Linux + 312)
 #endif
 
-#if !defined(__NR_getrandom)
-#define __NR_getrandom (__NR_Linux + 313)
-#endif
-
-#if !defined(__NR_memfd_create)
-#define __NR_memfd_create (__NR_Linux + 314)
-#endif
-
 #endif  // SANDBOX_LINUX_SYSTEM_HEADERS_MIPS64_LINUX_SYSCALLS_H_
diff --git a/sandbox/linux/system_headers/mips_linux_syscalls.h b/sandbox/linux/system_headers/mips_linux_syscalls.h
index 784d6b8..eb1717a 100644
--- a/sandbox/linux/system_headers/mips_linux_syscalls.h
+++ b/sandbox/linux/system_headers/mips_linux_syscalls.h
@@ -1425,12 +1425,4 @@
 #define __NR_seccomp (__NR_Linux + 352)
 #endif
 
-#if !defined(__NR_getrandom)
-#define __NR_getrandom (__NR_Linux + 353)
-#endif
-
-#if !defined(__NR_memfd_create)
-#define __NR_memfd_create (__NR_Linux + 354)
-#endif
-
 #endif  // SANDBOX_LINUX_SYSTEM_HEADERS_MIPS_LINUX_SYSCALLS_H_
diff --git a/sandbox/mac/BUILD.gn b/sandbox/mac/BUILD.gn
index 5174b54..fd53131 100644
--- a/sandbox/mac/BUILD.gn
+++ b/sandbox/mac/BUILD.gn
@@ -35,8 +35,6 @@
 
 component("seatbelt") {
   sources = [
-    "sandbox_compiler.cc",
-    "sandbox_compiler.h",
     "seatbelt.cc",
     "seatbelt.h",
     "seatbelt_export.h",
@@ -49,8 +47,6 @@
   sources = [
     "bootstrap_sandbox_unittest.mm",
     "policy_unittest.cc",
-    "sandbox_mac_compiler_unittest.mm",
-    "sandbox_mac_compiler_v2_unittest.mm",
     "xpc_message_server_unittest.cc",
   ]
 
@@ -61,7 +57,6 @@
 
   deps = [
     ":sandbox",
-    ":seatbelt",
     "//base",
     "//base/test:run_all_unittests",
     "//testing/gtest",
diff --git a/sandbox/mac/sandbox_mac.gypi b/sandbox/mac/sandbox_mac.gypi
new file mode 100644
index 0000000..79740e5
--- /dev/null
+++ b/sandbox/mac/sandbox_mac.gypi
@@ -0,0 +1,104 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'targets': [
+    {
+      'target_name': 'seatbelt',
+      'type' : '<(component)',
+      'sources': [
+        'seatbelt.cc',
+        'seatbelt.h',
+        'seatbelt_export.h',
+      ],
+      'defines': [
+        'SEATBELT_IMPLEMENTATION',
+      ],
+      'include_dirs': [
+        '../..',
+      ],
+      'link_settings': {
+        'libraries': [
+          '$(SDKROOT)/usr/lib/libsandbox.dylib',
+        ],
+      }
+    },
+    {
+      'target_name': 'sandbox',
+      'type': '<(component)',
+      'sources': [
+        'bootstrap_sandbox.cc',
+        'bootstrap_sandbox.h',
+        'launchd_interception_server.cc',
+        'launchd_interception_server.h',
+        'mach_message_server.cc',
+        'mach_message_server.h',
+        'message_server.h',
+        'os_compatibility.cc',
+        'os_compatibility.h',
+        'policy.cc',
+        'policy.h',
+        'pre_exec_delegate.cc',
+        'pre_exec_delegate.h',
+        'xpc.h',
+        'xpc_message_server.cc',
+        'xpc_message_server.h',
+      ],
+      'dependencies': [
+        '../base/base.gyp:base',
+      ],
+      'include_dirs': [
+        '..',
+        '<(SHARED_INTERMEDIATE_DIR)',
+      ],
+      'defines': [
+        'SANDBOX_IMPLEMENTATION',
+      ],
+      'link_settings': {
+        'libraries': [
+          '$(SDKROOT)/usr/lib/libbsm.dylib',
+        ],
+      },
+    },
+    {
+      'target_name': 'sandbox_mac_unittests',
+      'type': 'executable',
+      'sources': [
+        'bootstrap_sandbox_unittest.mm',
+        'policy_unittest.cc',
+        'xpc_message_server_unittest.cc',
+      ],
+      'dependencies': [
+        'sandbox',
+        '../base/base.gyp:base',
+        '../base/base.gyp:run_all_unittests',
+        '../testing/gtest.gyp:gtest',
+      ],
+      'include_dirs': [
+        '..',
+      ],
+      'link_settings': {
+        'libraries': [
+          '$(SDKROOT)/System/Library/Frameworks/CoreFoundation.framework',
+          '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
+        ],
+      },
+    },
+  ],
+  'conditions': [
+    ['test_isolation_mode != "noop"', {
+      'targets': [
+        {
+          'target_name': 'sandbox_mac_unittests_run',
+          'type': 'none',
+          'dependencies': [
+            'sandbox_mac_unittests',
+          ],
+          'includes': [ '../../build/isolate.gypi' ],
+          'sources': [ '../sandbox_mac_unittests.isolate' ],
+        },
+      ],
+    }],
+  ],
+}
diff --git a/sandbox/sandbox.gyp b/sandbox/sandbox.gyp
new file mode 100644
index 0000000..f93fa18
--- /dev/null
+++ b/sandbox/sandbox.gyp
@@ -0,0 +1,35 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'variables': {
+    'chromium_code': 1,
+  },
+  'conditions': [
+    [ 'OS=="win"', {
+      'includes': [
+        'win/sandbox_win.gypi',
+      ],
+    }],
+    [ 'OS=="linux" or OS=="android"', {
+      'includes': [
+        'linux/sandbox_linux.gypi',
+      ],
+    }],
+    [ 'OS=="mac" and OS!="ios"', {
+      'includes': [
+        'mac/sandbox_mac.gypi',
+      ],
+    }],
+    [ 'OS!="win" and OS!="mac" and OS!="linux" and OS!="android"', {
+      # A 'default' to accomodate the "sandbox" target.
+      'targets': [
+        {
+          'target_name': 'sandbox',
+          'type': 'none',
+        }
+       ]
+    }],
+  ],
+}
diff --git a/sandbox/sandbox_linux_unittests.isolate b/sandbox/sandbox_linux_unittests.isolate
new file mode 100644
index 0000000..2b7c2a7
--- /dev/null
+++ b/sandbox/sandbox_linux_unittests.isolate
@@ -0,0 +1,23 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Because of a limitation in isolate_driver.py, this file needs to be in
+# the same directory as the main .gyp file.
+
+{
+  'conditions': [
+    ['OS=="android" or OS=="linux"', {
+      'variables': {
+        'command': [
+          '<(PRODUCT_DIR)/sandbox_linux_unittests',
+        ],
+      },
+    }],
+  ],
+  'includes': [
+    # This is needed because of base/ dependencies on
+    # icudtl.dat.
+    '../base/base.isolate',
+  ],
+}
diff --git a/sandbox/win/BUILD.gn b/sandbox/win/BUILD.gn
index 1d51220..60bb499 100644
--- a/sandbox/win/BUILD.gn
+++ b/sandbox/win/BUILD.gn
@@ -154,18 +154,31 @@
     ]
   }
 
-  # Disable sanitizer coverage in the sandbox code. The sandbox code runs before
-  # sanitizer coverage can initialize. http://crbug.com/484711
-  configs -= [ "//build/config/sanitizers:default_sanitizer_flags" ]
-  configs +=
-      [ "//build/config/sanitizers:default_sanitizer_flags_but_coverage" ]
-
   configs += [ "//build/config:precompiled_headers" ]
 
   deps = [
     "//base",
     "//base:base_static",
   ]
+  if (current_cpu == "x86") {
+    deps += [ ":copy_wow_helper" ]
+  }
+}
+
+if (current_cpu == "x86") {
+  # Make a target that copies the wow_helper files to the out dir.
+  #
+  # TODO(brettw) we can probably just build this now that we have proper
+  # toolchain support.
+  copy("copy_wow_helper") {
+    sources = [
+      "wow_helper/wow_helper.exe",
+      "wow_helper/wow_helper.pdb",
+    ]
+    outputs = [
+      "$root_out_dir/{{source_file_part}}",
+    ]
+  }
 }
 
 test("sbox_integration_tests") {
@@ -191,7 +204,6 @@
     "tests/common/controller.h",
     "tests/common/test_utils.cc",
     "tests/common/test_utils.h",
-    "tests/integration_tests/cfi_unittest.cc",
     "tests/integration_tests/integration_tests.cc",
     "tests/integration_tests/integration_tests_common.h",
     "tests/integration_tests/integration_tests_test.cc",
@@ -199,30 +211,15 @@
 
   deps = [
     ":sandbox",
+    ":sbox_integration_test_hook_dll",
+    ":sbox_integration_test_win_proc",
     "//base/test:test_support",
     "//testing/gtest",
   ]
 
-  data_deps = [
-    ":cfi_unittest_exe",
-    ":sbox_integration_test_hook_dll",
-    ":sbox_integration_test_win_proc",
-  ]
-
   libs = [ "dxva2.lib" ]
 }
 
-executable("cfi_unittest_exe") {
-  sources = [
-    "tests/integration_tests/cfi_unittest_exe.cc",
-  ]
-  deps = [
-    "//base",
-    "//build/config/sanitizers:deps",
-    "//build/win:default_exe_manifest",
-  ]
-}
-
 loadable_module("sbox_integration_test_hook_dll") {
   sources = [
     "tests/integration_tests/hooking_dll.cc",
diff --git a/sandbox/win/sandbox_win.gypi b/sandbox/win/sandbox_win.gypi
new file mode 100644
index 0000000..e9673aa
--- /dev/null
+++ b/sandbox/win/sandbox_win.gypi
@@ -0,0 +1,432 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'target_defaults': {
+    'variables': {
+      'sandbox_windows_target': 0,
+      'target_arch%': 'ia32',
+    },
+    'target_conditions': [
+      ['sandbox_windows_target==1', {
+        # Files that are shared between the 32-bit and the 64-bit versions
+        # of the Windows sandbox library.
+        'sources': [
+            'src/acl.cc',
+            'src/acl.h',
+            'src/broker_services.cc',
+            'src/broker_services.h',
+            'src/crosscall_client.h',
+            'src/crosscall_params.h',
+            'src/crosscall_server.cc',
+            'src/crosscall_server.h',
+            'src/eat_resolver.cc',
+            'src/eat_resolver.h',
+            'src/filesystem_dispatcher.cc',
+            'src/filesystem_dispatcher.h',
+            'src/filesystem_interception.cc',
+            'src/filesystem_interception.h',
+            'src/filesystem_policy.cc',
+            'src/filesystem_policy.h',
+            'src/handle_closer.cc',
+            'src/handle_closer.h',
+            'src/handle_closer_agent.cc',
+            'src/handle_closer_agent.h',
+            'src/interception.cc',
+            'src/interception.h',
+            'src/interception_agent.cc',
+            'src/interception_agent.h',
+            'src/interception_internal.h',
+            'src/interceptors.h',
+            'src/internal_types.h',
+            'src/ipc_tags.h',
+            'src/job.cc',
+            'src/job.h',
+            'src/named_pipe_dispatcher.cc',
+            'src/named_pipe_dispatcher.h',
+            'src/named_pipe_interception.cc',
+            'src/named_pipe_interception.h',
+            'src/named_pipe_policy.cc',
+            'src/named_pipe_policy.h',
+            'src/nt_internals.h',
+            'src/policy_broker.cc',
+            'src/policy_broker.h',
+            'src/policy_engine_opcodes.cc',
+            'src/policy_engine_opcodes.h',
+            'src/policy_engine_params.h',
+            'src/policy_engine_processor.cc',
+            'src/policy_engine_processor.h',
+            'src/policy_low_level.cc',
+            'src/policy_low_level.h',
+            'src/policy_params.h',
+            'src/policy_target.cc',
+            'src/policy_target.h',
+            'src/process_mitigations.cc',
+            'src/process_mitigations.h',
+            'src/process_mitigations_win32k_dispatcher.cc',
+            'src/process_mitigations_win32k_dispatcher.h',
+            'src/process_mitigations_win32k_interception.cc',
+            'src/process_mitigations_win32k_interception.h',
+            'src/process_mitigations_win32k_policy.cc',
+            'src/process_mitigations_win32k_policy.h',
+            'src/process_thread_dispatcher.cc',
+            'src/process_thread_dispatcher.h',
+            'src/process_thread_interception.cc',
+            'src/process_thread_interception.h',
+            'src/process_thread_policy.cc',
+            'src/process_thread_policy.h',
+            'src/registry_dispatcher.cc',
+            'src/registry_dispatcher.h',
+            'src/registry_interception.cc',
+            'src/registry_interception.h',
+            'src/registry_policy.cc',
+            'src/registry_policy.h',
+            'src/resolver.cc',
+            'src/resolver.h',
+            'src/restricted_token_utils.cc',
+            'src/restricted_token_utils.h',
+            'src/restricted_token.cc',
+            'src/restricted_token.h',
+            'src/sandbox_factory.h',
+            'src/sandbox_globals.cc',
+            'src/sandbox_nt_types.h',
+            'src/sandbox_nt_util.cc',
+            'src/sandbox_nt_util.h',
+            'src/sandbox_policy_base.cc',
+            'src/sandbox_policy_base.h',
+            'src/sandbox_policy.h',
+            'src/sandbox_rand.cc',
+            'src/sandbox_rand.h',
+            'src/sandbox_types.h',
+            'src/sandbox_utils.cc',
+            'src/sandbox_utils.h',
+            'src/sandbox.cc',
+            'src/sandbox.h',
+            'src/security_level.h',
+            'src/service_resolver.cc',
+            'src/service_resolver.h',
+            'src/sharedmem_ipc_client.cc',
+            'src/sharedmem_ipc_client.h',
+            'src/sharedmem_ipc_server.cc',
+            'src/sharedmem_ipc_server.h',
+            'src/sid.cc',
+            'src/sid.h',
+            'src/sync_dispatcher.cc',
+            'src/sync_dispatcher.h',
+            'src/sync_interception.cc',
+            'src/sync_interception.h',
+            'src/sync_policy.cc',
+            'src/sync_policy.h',
+            'src/target_interceptions.cc',
+            'src/target_interceptions.h',
+            'src/target_process.cc',
+            'src/target_process.h',
+            'src/target_services.cc',
+            'src/target_services.h',
+            'src/top_level_dispatcher.cc',
+            'src/top_level_dispatcher.h',
+            'src/win_utils.cc',
+            'src/win_utils.h',
+            'src/win2k_threadpool.cc',
+            'src/win2k_threadpool.h',
+            'src/window.cc',
+            'src/window.h',
+        ],
+        'target_conditions': [
+          ['target_arch=="x64"', {
+            'sources': [
+              'src/interceptors_64.cc',
+              'src/interceptors_64.h',
+              'src/resolver_64.cc',
+              'src/service_resolver_64.cc',
+            ],
+          }],
+          ['target_arch=="ia32"', {
+            'sources': [
+              'src/resolver_32.cc',
+              'src/service_resolver_32.cc',
+              'src/sidestep_resolver.cc',
+              'src/sidestep_resolver.h',
+              'src/sidestep\ia32_modrm_map.cpp',
+              'src/sidestep\ia32_opcode_map.cpp',
+              'src/sidestep\mini_disassembler_types.h',
+              'src/sidestep\mini_disassembler.cpp',
+              'src/sidestep\mini_disassembler.h',
+              'src/sidestep\preamble_patcher_with_stub.cpp',
+              'src/sidestep\preamble_patcher.h',
+            ],
+          }],
+        ],
+      }],
+    ],
+  },
+  'targets': [
+    {
+      'target_name': 'sandbox',
+      'type': 'static_library',
+      'variables': {
+        'sandbox_windows_target': 1,
+      },
+      'dependencies': [
+        '../base/base.gyp:base',
+        '../base/base.gyp:base_static',
+      ],
+      'export_dependent_settings': [
+        '../base/base.gyp:base',
+      ],
+      'include_dirs': [
+        '../..',
+      ],
+      'target_conditions': [
+        ['target_arch=="ia32"', {
+          'copies': [
+            {
+              'destination': '<(PRODUCT_DIR)',
+              'files': [
+                'wow_helper/wow_helper.exe',
+                'wow_helper/wow_helper.pdb',
+              ],
+            },
+          ],
+        }],
+      ],
+    },
+    {
+      'target_name': 'sbox_integration_tests',
+      'type': 'executable',
+      'dependencies': [
+        'sandbox',
+        'sbox_integration_test_hook_dll',
+        'sbox_integration_test_win_proc',
+        '../base/base.gyp:test_support_base',
+        '../testing/gtest.gyp:gtest',
+      ],
+      'sources': [
+        'src/address_sanitizer_test.cc',
+        'src/app_container_test.cc',
+        'src/file_policy_test.cc',
+        'src/handle_inheritance_test.cc',
+        'tests/integration_tests/integration_tests_test.cc',
+        'src/handle_closer_test.cc',
+        'src/integrity_level_test.cc',
+        'src/ipc_ping_test.cc',
+        'src/lpc_policy_test.cc',
+        'src/named_pipe_policy_test.cc',
+        'src/policy_target_test.cc',
+        'src/process_mitigations_test.cc',
+        'src/process_policy_test.cc',
+        'src/registry_policy_test.cc',
+        'src/restricted_token_test.cc',
+        'src/sync_policy_test.cc',
+        'src/sync_policy_test.h',
+        'src/unload_dll_test.cc',
+        'tests/common/controller.cc',
+        'tests/common/controller.h',
+        'tests/common/test_utils.cc',
+        'tests/common/test_utils.h',
+        'tests/integration_tests/integration_tests.cc',
+        'tests/integration_tests/integration_tests_common.h',
+      ],
+      'link_settings': {
+        'libraries': [
+          '-ldxva2.lib',
+        ],
+      },
+    },
+    {
+      'target_name': 'sbox_integration_test_hook_dll',
+      'type': 'shared_library',
+      'dependencies': [
+      ],
+      'sources': [
+        'tests/integration_tests/hooking_dll.cc',
+        'tests/integration_tests/integration_tests_common.h',
+      ],
+    },
+    {
+      'target_name': 'sbox_integration_test_win_proc',
+      'type': 'executable',
+      'dependencies': [
+      ],
+      'sources': [
+        'tests/integration_tests/hooking_win_proc.cc',
+        'tests/integration_tests/integration_tests_common.h',
+      ],
+      'msvs_settings': {
+        'VCLinkerTool': {
+          'SubSystem': '2',  # Set /SUBSYSTEM:WINDOWS
+        },
+      },
+    },
+    {
+      'target_name': 'sbox_validation_tests',
+      'type': 'executable',
+      'dependencies': [
+        'sandbox',
+        '../base/base.gyp:test_support_base',
+        '../testing/gtest.gyp:gtest',
+      ],
+      'sources': [
+        'tests/common/controller.cc',
+        'tests/common/controller.h',
+        'tests/validation_tests/unit_tests.cc',
+        'tests/validation_tests/commands.cc',
+        'tests/validation_tests/commands.h',
+        'tests/validation_tests/suite.cc',
+      ],
+      'link_settings': {
+        'libraries': [
+          '-lshlwapi.lib',
+        ],
+      },
+    },
+    {
+      'target_name': 'sbox_unittests',
+      'type': 'executable',
+      'dependencies': [
+        'sandbox',
+        '../base/base.gyp:test_support_base',
+        '../testing/gtest.gyp:gtest',
+      ],
+      'sources': [
+        'src/interception_unittest.cc',
+        'src/service_resolver_unittest.cc',
+        'src/restricted_token_unittest.cc',
+        'src/job_unittest.cc',
+        'src/sid_unittest.cc',
+        'src/policy_engine_unittest.cc',
+        'src/policy_low_level_unittest.cc',
+        'src/policy_opcodes_unittest.cc',
+        'src/ipc_unittest.cc',
+        'src/sandbox_nt_util_unittest.cc',
+        'src/threadpool_unittest.cc',
+        'src/win_utils_unittest.cc',
+        'tests/common/test_utils.cc',
+        'tests/common/test_utils.h',
+        'tests/unit_tests/unit_tests.cc',
+      ],
+    },
+    {
+      'target_name': 'sandbox_poc',
+      'type': 'executable',
+      'dependencies': [
+        'sandbox',
+        'pocdll',
+      ],
+      'sources': [
+        'sandbox_poc/main_ui_window.cc',
+        'sandbox_poc/main_ui_window.h',
+        'sandbox_poc/resource.h',
+        'sandbox_poc/sandbox.cc',
+        'sandbox_poc/sandbox.h',
+        'sandbox_poc/sandbox.ico',
+        'sandbox_poc/sandbox.rc',
+      ],
+      'link_settings': {
+        'libraries': [
+          '-lcomctl32.lib',
+        ],
+      },
+      'msvs_settings': {
+        'VCLinkerTool': {
+          'SubSystem': '2',         # Set /SUBSYSTEM:WINDOWS
+        },
+      },
+    },
+    {
+      'target_name': 'pocdll',
+      'type': 'shared_library',
+      'sources': [
+        'sandbox_poc/pocdll/exports.h',
+        'sandbox_poc/pocdll/fs.cc',
+        'sandbox_poc/pocdll/handles.cc',
+        'sandbox_poc/pocdll/invasive.cc',
+        'sandbox_poc/pocdll/network.cc',
+        'sandbox_poc/pocdll/pocdll.cc',
+        'sandbox_poc/pocdll/processes_and_threads.cc',
+        'sandbox_poc/pocdll/registry.cc',
+        'sandbox_poc/pocdll/spyware.cc',
+        'sandbox_poc/pocdll/utils.h',
+      ],
+      'defines': [
+        'POCDLL_EXPORTS',
+      ],
+      'include_dirs': [
+        '../..',
+      ],
+    },
+  ],
+  'conditions': [
+    ['OS=="win" and target_arch=="ia32"', {
+      'targets': [
+        {
+          'target_name': 'sandbox_win64',
+          'type': 'static_library',
+          'variables': {
+            'sandbox_windows_target': 1,
+            'target_arch': 'x64',
+          },
+          'dependencies': [
+            '../base/base.gyp:base_win64',
+            '../base/base.gyp:base_static_win64',
+          ],
+          'configurations': {
+            'Common_Base': {
+              'msvs_target_platform': 'x64',
+            },
+          },
+          'include_dirs': [
+            '../..',
+          ],
+          'defines': [
+            '<@(nacl_win64_defines)',
+          ]
+        },
+      ],
+    }],
+    ['test_isolation_mode != "noop"', {
+      'targets': [
+        {
+          'target_name': 'sbox_integration_tests_run',
+          'type': 'none',
+          'dependencies': [
+            'sbox_integration_tests',
+          ],
+          'includes': [
+            '../../build/isolate.gypi',
+          ],
+          'sources': [
+            '../sbox_integration_tests.isolate',
+          ],
+        },
+        {
+          'target_name': 'sbox_unittests_run',
+          'type': 'none',
+          'dependencies': [
+            'sbox_unittests',
+          ],
+          'includes': [
+            '../../build/isolate.gypi',
+          ],
+          'sources': [
+            '../sbox_unittests.isolate',
+          ],
+        },
+        {
+          'target_name': 'sbox_validation_tests_run',
+          'type': 'none',
+          'dependencies': [
+            'sbox_validation_tests',
+          ],
+          'includes': [
+            '../../build/isolate.gypi',
+          ],
+          'sources': [
+            '../sbox_validation_tests.isolate',
+          ],
+        },
+      ],
+    }],
+  ],
+}
diff --git a/sandbox/win/src/internal_types.h b/sandbox/win/src/internal_types.h
index 7ea4b7d..e102818 100644
--- a/sandbox/win/src/internal_types.h
+++ b/sandbox/win/src/internal_types.h
@@ -13,7 +13,7 @@
 const wchar_t kKerneldllName[] = L"kernel32.dll";
 const wchar_t kKernelBasedllName[] = L"kernelbase.dll";
 
-// Defines the supported C++ types encoding to numeric id. Like a simplified
+// Defines the supported C++ types encoding to numeric id. Like a poor's man
 // RTTI. Note that true C++ RTTI will not work because the types are not
 // polymorphic anyway.
 enum ArgType {
diff --git a/sandbox/win/src/sandbox.vcproj b/sandbox/win/src/sandbox.vcproj
index 229441c..f206e01 100644
--- a/sandbox/win/src/sandbox.vcproj
+++ b/sandbox/win/src/sandbox.vcproj
@@ -64,6 +64,11 @@
 			<Tool
 				Name="VCFxCopTool"
 			/>
+			<Tool
+				Name="VCPostBuildEventTool"
+				Description="Copy wow_helper to output directory"
+				CommandLine="copy $(ProjectDir)\..\wow_helper\wow_helper.exe $(OutDir) &amp;&amp; copy $(ProjectDir)\..\wow_helper\wow_helper.pdb $(OutDir)"
+			/>
 		</Configuration>
 		<Configuration
 			Name="Release|Win32"
@@ -113,6 +118,11 @@
 			<Tool
 				Name="VCFxCopTool"
 			/>
+			<Tool
+				Name="VCPostBuildEventTool"
+				Description="Copy wow_helper to output directory"
+				CommandLine="copy $(ProjectDir)\..\wow_helper\wow_helper.exe $(OutDir) &amp;&amp; copy $(ProjectDir)\..\wow_helper\wow_helper.pdb $(OutDir)"
+			/>
 		</Configuration>
 	</Configurations>
 	<References>
diff --git a/sandbox/win/src/sandbox_types.h b/sandbox/win/src/sandbox_types.h
index ae36ef5..919086a 100644
--- a/sandbox/win/src/sandbox_types.h
+++ b/sandbox/win/src/sandbox_types.h
@@ -5,7 +5,6 @@
 #ifndef SANDBOX_WIN_SRC_SANDBOX_TYPES_H_
 #define SANDBOX_WIN_SRC_SANDBOX_TYPES_H_
 
-#include "base/process/kill.h"
 #include "base/process/launch.h"
 
 namespace sandbox {
@@ -104,8 +103,6 @@
   SBOX_ERROR_CANNOT_RESOLVE_INTERCEPTION_THUNK = 41,
   // Cannot write interception thunk to child process.
   SBOX_ERROR_CANNOT_WRITE_INTERCEPTION_THUNK = 42,
-  // Cannot find the base address of the new process.
-  SBOX_ERROR_CANNOT_FIND_BASE_ADDRESS = 43,
   // Placeholder for last item of the enum.
   SBOX_ERROR_LAST
 };
@@ -124,10 +121,6 @@
   SBOX_FATAL_LAST
 };
 
-static_assert(SBOX_FATAL_MEMORY_EXCEEDED ==
-                  base::win::kSandboxFatalMemoryExceeded,
-              "Value for SBOX_FATAL_MEMORY_EXCEEDED must match base.");
-
 class BrokerServices;
 class TargetServices;
 
diff --git a/sandbox/win/src/security_level.h b/sandbox/win/src/security_level.h
index ecca64d..d8524c1 100644
--- a/sandbox/win/src/security_level.h
+++ b/sandbox/win/src/security_level.h
@@ -154,13 +154,11 @@
 // PROCESS_CREATION_MITIGATION_POLICY_SEHOP_ENABLE.
 const MitigationFlags MITIGATION_SEHOP                            = 0x00000004;
 
-// Forces ASLR on all images in the child process. In debug builds, must be
-// enabled after startup. Corresponds to
+// Forces ASLR on all images in the child process. Corresponds to
 // PROCESS_CREATION_MITIGATION_POLICY_FORCE_RELOCATE_IMAGES_ALWAYS_ON .
 const MitigationFlags MITIGATION_RELOCATE_IMAGE                   = 0x00000008;
 
-// Refuses to load DLLs that cannot support ASLR. In debug builds, must be
-// enabled after startup. Corresponds to
+// Refuses to load DLLs that cannot support ASLR. Corresponds to
 // PROCESS_CREATION_MITIGATION_POLICY_FORCE_RELOCATE_IMAGES_ALWAYS_ON_REQ_RELOCS.
 const MitigationFlags MITIGATION_RELOCATE_IMAGE_REQUIRED          = 0x00000010;
 
@@ -187,11 +185,6 @@
 
 // Prevents the process from making Win32k calls. Corresponds to
 // PROCESS_CREATION_MITIGATION_POLICY_WIN32K_SYSTEM_CALL_DISABLE_ALWAYS_ON.
-//
-// Applications linked to user32.dll or gdi32.dll make Win32k calls during
-// setup, even if Win32k is not otherwise used. So they also need to add a rule
-// with SUBSYS_WIN32K_LOCKDOWN and semantics FAKE_USER_GDI_INIT to allow the
-// initialization to succeed.
 const MitigationFlags MITIGATION_WIN32K_DISABLE                   = 0x00000200;
 
 // Prevents certain built-in third party extension points from being used.
diff --git a/sandbox/win/wow_helper.sln b/sandbox/win/wow_helper.sln
new file mode 100644
index 0000000..26d0da2
--- /dev/null
+++ b/sandbox/win/wow_helper.sln
@@ -0,0 +1,19 @@
+Microsoft Visual Studio Solution File, Format Version 9.00
+# Visual Studio 2005
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "wow_helper", "wow_helper\wow_helper.vcproj", "{BCF3A457-39F1-4DAA-9A65-93CFCD559036}"
+EndProject
+Global
+	GlobalSection(SolutionConfigurationPlatforms) = preSolution
+		Debug|x64 = Debug|x64
+		Release|x64 = Release|x64
+	EndGlobalSection
+	GlobalSection(ProjectConfigurationPlatforms) = postSolution
+		{BCF3A457-39F1-4DAA-9A65-93CFCD559036}.Debug|x64.ActiveCfg = Debug|x64
+		{BCF3A457-39F1-4DAA-9A65-93CFCD559036}.Debug|x64.Build.0 = Debug|x64
+		{BCF3A457-39F1-4DAA-9A65-93CFCD559036}.Release|x64.ActiveCfg = Release|x64
+		{BCF3A457-39F1-4DAA-9A65-93CFCD559036}.Release|x64.Build.0 = Release|x64
+	EndGlobalSection
+	GlobalSection(SolutionProperties) = preSolution
+		HideSolutionNode = FALSE
+	EndGlobalSection
+EndGlobal
diff --git a/sandbox/win/wow_helper/wow_helper.exe b/sandbox/win/wow_helper/wow_helper.exe
new file mode 100755
index 0000000..f9bfb4b
--- /dev/null
+++ b/sandbox/win/wow_helper/wow_helper.exe
Binary files differ
diff --git a/sandbox/win/wow_helper/wow_helper.pdb b/sandbox/win/wow_helper/wow_helper.pdb
new file mode 100644
index 0000000..9cb67d0
--- /dev/null
+++ b/sandbox/win/wow_helper/wow_helper.pdb
Binary files differ
diff --git a/sandbox/win/wow_helper/wow_helper.vcproj b/sandbox/win/wow_helper/wow_helper.vcproj
new file mode 100644
index 0000000..c8e7c9e
--- /dev/null
+++ b/sandbox/win/wow_helper/wow_helper.vcproj
@@ -0,0 +1,215 @@
+<?xml version="1.0" encoding="Windows-1252"?>
+<VisualStudioProject
+	ProjectType="Visual C++"
+	Version="8.00"
+	Name="wow_helper"
+	ProjectGUID="{BCF3A457-39F1-4DAA-9A65-93CFCD559036}"
+	RootNamespace="wow_helper"
+	Keyword="Win32Proj"
+	>
+	<Platforms>
+		<Platform
+			Name="x64"
+		/>
+	</Platforms>
+	<ToolFiles>
+	</ToolFiles>
+	<Configurations>
+		<Configuration
+			Name="Debug|x64"
+			OutputDirectory="$(ProjectDir)"
+			IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
+			ConfigurationType="1"
+			CharacterSet="1"
+			>
+			<Tool
+				Name="VCPreBuildEventTool"
+			/>
+			<Tool
+				Name="VCCustomBuildTool"
+			/>
+			<Tool
+				Name="VCXMLDataGeneratorTool"
+			/>
+			<Tool
+				Name="VCWebServiceProxyGeneratorTool"
+			/>
+			<Tool
+				Name="VCMIDLTool"
+				TargetEnvironment="3"
+			/>
+			<Tool
+				Name="VCCLCompilerTool"
+				Optimization="0"
+				AdditionalIncludeDirectories="$(SolutionDir)..;$(SolutionDir)..\third_party\platformsdk_win2008_6_1\files\Include;$(VSInstallDir)\VC\atlmfc\include"
+				PreprocessorDefinitions="_WIN32_WINNT=0x0501;WINVER=0x0501;WIN32;_DEBUG"
+				MinimalRebuild="true"
+				BasicRuntimeChecks="0"
+				RuntimeLibrary="1"
+				BufferSecurityCheck="false"
+				RuntimeTypeInfo="false"
+				UsePrecompiledHeader="0"
+				WarningLevel="3"
+				Detect64BitPortabilityProblems="true"
+				DebugInformationFormat="3"
+			/>
+			<Tool
+				Name="VCManagedResourceCompilerTool"
+			/>
+			<Tool
+				Name="VCResourceCompilerTool"
+			/>
+			<Tool
+				Name="VCPreLinkEventTool"
+			/>
+			<Tool
+				Name="VCLinkerTool"
+				LinkIncremental="1"
+				GenerateDebugInformation="true"
+				SubSystem="2"
+				TargetMachine="17"
+			/>
+			<Tool
+				Name="VCALinkTool"
+			/>
+			<Tool
+				Name="VCManifestTool"
+			/>
+			<Tool
+				Name="VCXDCMakeTool"
+			/>
+			<Tool
+				Name="VCBscMakeTool"
+			/>
+			<Tool
+				Name="VCFxCopTool"
+			/>
+			<Tool
+				Name="VCAppVerifierTool"
+			/>
+			<Tool
+				Name="VCWebDeploymentTool"
+			/>
+			<Tool
+				Name="VCPostBuildEventTool"
+			/>
+		</Configuration>
+		<Configuration
+			Name="Release|x64"
+			OutputDirectory="$(ProjectDir)"
+			IntermediateDirectory="$(PlatformName)\$(ConfigurationName)"
+			ConfigurationType="1"
+			CharacterSet="1"
+			WholeProgramOptimization="1"
+			>
+			<Tool
+				Name="VCPreBuildEventTool"
+			/>
+			<Tool
+				Name="VCCustomBuildTool"
+			/>
+			<Tool
+				Name="VCXMLDataGeneratorTool"
+			/>
+			<Tool
+				Name="VCWebServiceProxyGeneratorTool"
+			/>
+			<Tool
+				Name="VCMIDLTool"
+				TargetEnvironment="3"
+			/>
+			<Tool
+				Name="VCCLCompilerTool"
+				AdditionalIncludeDirectories="$(SolutionDir)..;$(SolutionDir)..\third_party\platformsdk_win2008_6_1\files\Include;$(VSInstallDir)\VC\atlmfc\include"
+				PreprocessorDefinitions="_WIN32_WINNT=0x0501;WINVER=0x0501;WIN32;NDEBUG"
+				RuntimeLibrary="0"
+				BufferSecurityCheck="false"
+				RuntimeTypeInfo="false"
+				UsePrecompiledHeader="0"
+				WarningLevel="3"
+				Detect64BitPortabilityProblems="true"
+				DebugInformationFormat="3"
+			/>
+			<Tool
+				Name="VCManagedResourceCompilerTool"
+			/>
+			<Tool
+				Name="VCResourceCompilerTool"
+			/>
+			<Tool
+				Name="VCPreLinkEventTool"
+			/>
+			<Tool
+				Name="VCLinkerTool"
+				LinkIncremental="1"
+				GenerateDebugInformation="true"
+				SubSystem="2"
+				OptimizeReferences="2"
+				EnableCOMDATFolding="2"
+				TargetMachine="17"
+			/>
+			<Tool
+				Name="VCALinkTool"
+			/>
+			<Tool
+				Name="VCManifestTool"
+			/>
+			<Tool
+				Name="VCXDCMakeTool"
+			/>
+			<Tool
+				Name="VCBscMakeTool"
+			/>
+			<Tool
+				Name="VCFxCopTool"
+			/>
+			<Tool
+				Name="VCAppVerifierTool"
+			/>
+			<Tool
+				Name="VCWebDeploymentTool"
+			/>
+			<Tool
+				Name="VCPostBuildEventTool"
+			/>
+		</Configuration>
+	</Configurations>
+	<References>
+	</References>
+	<Files>
+		<Filter
+			Name="sandbox"
+			>
+			<File
+				RelativePath="..\src\nt_internals.h"
+				>
+			</File>
+			<File
+				RelativePath="..\src\resolver.h"
+				>
+			</File>
+		</Filter>
+		<File
+			RelativePath=".\service64_resolver.cc"
+			>
+		</File>
+		<File
+			RelativePath=".\service64_resolver.h"
+			>
+		</File>
+		<File
+			RelativePath=".\target_code.cc"
+			>
+		</File>
+		<File
+			RelativePath=".\target_code.h"
+			>
+		</File>
+		<File
+			RelativePath=".\wow_helper.cc"
+			>
+		</File>
+	</Files>
+	<Globals>
+	</Globals>
+</VisualStudioProject>